• 如果您觉得本站非常有看点,那么赶紧使用Ctrl+D 收藏吧

Java MaterializedField类的典型用法和代码示例

java 2次浏览

本文整理汇总了Java中org.apache.drill.exec.record.MaterializedField的典型用法代码示例。如果您正苦于以下问题:Java MaterializedField类的具体用法?Java MaterializedField怎么用?Java MaterializedField使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。

MaterializedField类属于org.apache.drill.exec.record包,在下文中一共展示了MaterializedField类的34个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: newSchema

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private void newSchema() throws IOException {
  List<Type> types = Lists.newArrayList();
  for (MaterializedField field : batchSchema) {
    if (field.getPath().equals(SchemaPath.getSimplePath(WriterPrel.PARTITION_COMPARATOR_FIELD))) {
      continue;
    }
    types.add(getType(field));
  }
  schema = new MessageType("root", types);

  int initialBlockBufferSize = max(MINIMUM_BUFFER_SIZE, blockSize / this.schema.getColumns().size() / 5);
  pageStore = ColumnChunkPageWriteStoreExposer.newColumnChunkPageWriteStore(this.oContext,
      codecFactory.getCompressor(codec, pageSize),
      schema,
      initialBlockBufferSize);
  int initialPageBufferSize = max(MINIMUM_BUFFER_SIZE, min(pageSize + pageSize / 10, initialBlockBufferSize));
  store = new ColumnWriteStoreV1(pageStore, pageSize, initialPageBufferSize, dictionaryPageSize, enableDictionary, writerVersion);
  MessageColumnIO columnIO = new ColumnIOFactory(false).getColumnIO(this.schema);
  consumer = columnIO.getRecordWriter(store);
  setUp(schema, consumer);
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:22,
代码来源:ParquetRecordWriter.java

示例2: getObject

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public Object getObject(int index) {
  final List<Object> list = new JsonStringArrayList<>();
  final int end = offsets.getAccessor().get(index+1);
  String fieldName;
  for (int i =  offsets.getAccessor().get(index); i < end; i++) {
    final Map<String, Object> vv = Maps.newLinkedHashMap();
    for (final MaterializedField field : getField().getChildren()) {
      if (!field.equals(BaseRepeatedValueVector.OFFSETS_FIELD)) {
        fieldName = field.getLastName();
        final Object value = getChild(fieldName).getAccessor().getObject(i);
        if (value != null) {
          vv.put(fieldName, value);
        }
      }
    }
    list.add(vv);
  }
  return list;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:21,
代码来源:RepeatedMapVector.java

示例3: setup

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
  this.operatorContext = context;
  this.outputMutator = output;
  familyVectorMap = new HashMap<String, MapVector>();

  try {
    // Add Vectors to output in the order specified when creating reader
    for (SchemaPath column : getColumns()) {
      if (column.equals(ROW_KEY_PATH)) {
        MaterializedField field = MaterializedField.create(column, ROW_KEY_TYPE);
        rowKeyVector = outputMutator.addField(field, VarBinaryVector.class);
      } else {
        getOrCreateFamilyVector(column.getRootSegment().getPath(), false);
      }
    }
    logger.debug("Opening scanner for HBase table '{}', Zookeeper quorum '{}', port '{}', znode '{}'.",
        hbaseTableName, hbaseConf.get(HConstants.ZOOKEEPER_QUORUM),
        hbaseConf.get(HBASE_ZOOKEEPER_PORT), hbaseConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
    hTable = new HTable(hbaseConf, hbaseTableName);
    resultScanner = hTable.getScanner(hbaseScan);
  } catch (SchemaChangeException | IOException e) {
    throw new ExecutionSetupException(e);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:26,
代码来源:HBaseRecordReader.java

示例4: showSingleBatch

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private void showSingleBatch (VectorAccessibleSerializable vcSerializable, boolean showHeader) {
  final VectorContainer vectorContainer = (VectorContainer) vcSerializable.get();

  /* show the header of the batch */
  if (showHeader) {
    System.out.println(getBatchMetaInfo(vcSerializable).toString());

    System.out.println("Schema Information");
    for (final VectorWrapper w : vectorContainer) {
      final MaterializedField field = w.getValueVector().getField();
      System.out.println (String.format("name : %s, minor_type : %s, data_mode : %s",
                                        field.toExpr(),
                                        field.getType().getMinorType().toString(),
                                        field.isNullable() ? "nullable":"non-nullable"
                        ));
    }
  }

  /* show the contents in the batch */
  VectorUtil.showVectorAccessibleContent(vectorContainer);
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:22,
代码来源:DumpCat.java

示例5: getExpressionList

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private List<NamedExpression> getExpressionList() {
  if (popConfig.getExprs() != null) {
    return popConfig.getExprs();
  }

  final List<NamedExpression> exprs = Lists.newArrayList();
  for (final MaterializedField field : incoming.getSchema()) {
    if (Types.isComplex(field.getType()) || Types.isRepeated(field.getType())) {
      final LogicalExpression convertToJson = FunctionCallFactory.createConvert(ConvertExpression.CONVERT_TO, "JSON", field.getPath(), ExpressionPosition.UNKNOWN);
      final String castFuncName = CastFunctions.getCastFunc(MinorType.VARCHAR);
      final List<LogicalExpression> castArgs = Lists.newArrayList();
      castArgs.add(convertToJson);  //input_expr
      /*
       * We are implicitly casting to VARCHAR so we don't have a max length,
       * using an arbitrary value. We trim down the size of the stored bytes
       * to the actual size so this size doesn't really matter.
       */
      castArgs.add(new ValueExpressions.LongExpression(TypeHelper.VARCHAR_DEFAULT_CAST_LEN, null)); //
      final FunctionCall castCall = new FunctionCall(castFuncName, castArgs, ExpressionPosition.UNKNOWN);
      exprs.add(new NamedExpression(castCall, new FieldReference(field.getPath())));
    } else {
      exprs.add(new NamedExpression(field.getPath(), new FieldReference(field.getPath())));
    }
  }
  return exprs;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:27,
代码来源:ProjectRecordBatch.java

示例6: setupNewSchema

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
protected void setupNewSchema() throws IOException {
  try {
    // update the schema in RecordWriter
    stats.startSetup();
    recordWriter.updateSchema(incoming);
    // Create two vectors for:
    //   1. Fragment unique id.
    //   2. Summary: currently contains number of records written.
    final MaterializedField fragmentIdField =
        MaterializedField.create(SchemaPath.getSimplePath("Fragment"), Types.required(MinorType.VARCHAR));
    final MaterializedField summaryField =
        MaterializedField.create(SchemaPath.getSimplePath("Number of records written"),
            Types.required(MinorType.BIGINT));

    container.addOrGet(fragmentIdField);
    container.addOrGet(summaryField);
    container.buildSchema(BatchSchema.SelectionVectorMode.NONE);
  } finally {
    stats.stopSetup();
  }

  eventBasedRecordWriter = new EventBasedRecordWriter(incoming, recordWriter);
  container.buildSchema(SelectionVectorMode.NONE);
  schema = container.getSchema();
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:26,
代码来源:WriterRecordBatch.java

示例7: addField

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public <T extends ValueVector> T addField(MaterializedField field, Class<T> clazz) throws SchemaChangeException {
  // Check if the field exists
  ValueVector v = fieldVectorMap.get(field.key());
  if (v == null || v.getClass() != clazz) {
    // Field does not exist add it to the map and the output container
    v = TypeHelper.getNewVector(field, oContext.getAllocator(), callBack);
    if (!clazz.isAssignableFrom(v.getClass())) {
      throw new SchemaChangeException(String.format(
          "The class that was provided %s does not correspond to the expected vector type of %s.",
          clazz.getSimpleName(), v.getClass().getSimpleName()));
    }

    final ValueVector old = fieldVectorMap.put(field.key(), v);
    if (old != null) {
      old.clear();
      container.remove(old);
    }

    container.add(v);
    // Adding new vectors to the container mark that the schema has changed
    schemaChange = true;
  }

  return clazz.cast(v);
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:27,
代码来源:ScanBatch.java

示例8: materialize

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
boolean materialize(final NamedExpression ne, final VectorContainer batch, final FunctionLookupContext registry)
    throws SchemaChangeException {
  final LogicalExpression aggregate = ExpressionTreeMaterializer.materializeAndCheckErrors(ne.getExpr(), batch, registry);
  if (aggregate == null) {
    return false;
  }

  // add corresponding ValueVector to container
  final MaterializedField output = MaterializedField.create(ne.getRef(), aggregate.getMajorType());
  batch.addOrGet(output).allocateNew();
  TypedFieldId outputId = batch.getValueVectorId(ne.getRef());
  writeAggregationToOutput = new ValueVectorWriteExpression(outputId, aggregate, true);

  return true;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:17,
代码来源:WindowFunction.java

示例9: OrderedPartitionRecordBatch

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
public OrderedPartitionRecordBatch(OrderedPartitionSender pop, RecordBatch incoming, FragmentContext context) throws OutOfMemoryException {
  super(pop, context);
  this.incoming = incoming;
  this.partitions = pop.getDestinations().size();
  this.sendingMajorFragmentWidth = pop.getSendingWidth();
  this.recordsToSample = pop.getRecordsToSample();
  this.samplingFactor = pop.getSamplingFactor();
  this.completionFactor = pop.getCompletionFactor();

  DistributedCache cache = null;
  this.mmap = cache.getMultiMap(MULTI_CACHE_CONFIG);
  this.tableMap = cache.getMap(SINGLE_CACHE_CONFIG);
  Preconditions.checkNotNull(tableMap);

  this.mapKey = String.format("%s_%d", context.getHandle().getQueryId(), context.getHandle().getMajorFragmentId());
  this.minorFragmentSampleCount = cache.getCounter(mapKey);

  SchemaPath outputPath = popConfig.getRef();
  MaterializedField outputField = MaterializedField.create(outputPath, Types.required(TypeProtos.MinorType.INT));
  this.partitionKeyVector = (IntVector) TypeHelper.getNewVector(outputField, oContext.getAllocator());

}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:23,
代码来源:OrderedPartitionRecordBatch.java

示例10: constructHyperBatch

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private VectorContainer constructHyperBatch(List<BatchGroup> batchGroupList) {
  VectorContainer cont = new VectorContainer();
  for (MaterializedField field : schema) {
    ValueVector[] vectors = new ValueVector[batchGroupList.size()];
    int i = 0;
    for (BatchGroup group : batchGroupList) {
      vectors[i++] = group.getValueAccessorById(
          field.getValueClass(),
          group.getValueVectorId(field.getPath()).getFieldIds())
          .getValueVector();
    }
    cont.add(vectors);
  }
  cont.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);
  return cont;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:17,
代码来源:ExternalSortBatch.java

示例11: setup

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
  try {
    final int estimateRowSize = getEstimatedRecordSize(config.getTypes());
    valueVectors = new ValueVector[config.getTypes().length];
    batchRecordCount = 250000 / estimateRowSize;

    for (int i = 0; i < config.getTypes().length; i++) {
      final MajorType type = config.getTypes()[i].getMajorType();
      final MaterializedField field = getVector(config.getTypes()[i].getName(), type, batchRecordCount);
      final Class vvClass = TypeHelper.getValueVectorClass(field.getType().getMinorType(), field.getDataMode());
      valueVectors[i] = output.addField(field, vvClass);
    }
  } catch (SchemaChangeException e) {
    throw new ExecutionSetupException("Failure while setting up fields", e);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:18,
代码来源:MockRecordReader.java

示例12: fieldSelected

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private boolean fieldSelected(MaterializedField field) {
  // TODO - not sure if this is how we want to represent this
  // for now it makes the existing tests pass, simply selecting
  // all available data if no columns are provided
  if (isStarQuery()) {
    return true;
  }

  int i = 0;
  for (SchemaPath expr : getColumns()) {
    if ( field.matches(expr)) {
      columnsFound[i] = true;
      return true;
    }
    i++;
  }
  return false;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:19,
代码来源:ParquetRecordReader.java

示例13: getType

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private parquet.schema.Type getType(MaterializedField field) {
  MinorType minorType = field.getType().getMinorType();
  DataMode dataMode = field.getType().getMode();
  switch(minorType) {
    case MAP:
      List<parquet.schema.Type> types = Lists.newArrayList();
      for (MaterializedField childField : field.getChildren()) {
        types.add(getType(childField));
      }
      return new GroupType(dataMode == DataMode.REPEATED ? Repetition.REPEATED : Repetition.OPTIONAL, field.getLastName(), types);
    case LIST:
      throw new UnsupportedOperationException("Unsupported type " + minorType);
    default:
      return getPrimitiveType(field);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:17,
代码来源:ParquetRecordWriter.java

示例14: evalExprWithInterpreter

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private ValueVector evalExprWithInterpreter(String expression, RecordBatch batch, Drillbit bit) throws Exception {
  final LogicalExpression expr = parseExpr(expression);
  final ErrorCollector error = new ErrorCollectorImpl();
  final LogicalExpression materializedExpr = ExpressionTreeMaterializer.materialize(expr, batch, error, bit.getContext().getFunctionImplementationRegistry());
  if (error.getErrorCount() != 0) {
    logger.error("Failure while materializing expression [{}].  Errors: {}", expression, error);
    assertEquals(0, error.getErrorCount());
  }

  final MaterializedField outputField = MaterializedField.create("outCol", materializedExpr.getMajorType());
  final ValueVector vector = TypeHelper.getNewVector(outputField, bit.getContext().getAllocator());

  vector.allocateNewSafe();
  InterpreterEvaluator.evaluate(batch, vector, materializedExpr);

  return vector;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:18,
代码来源:ExpressionInterpreterTest.java

示例15: testFixedVectorReallocation

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Test(expected = OversizedAllocationException.class)
public void testFixedVectorReallocation() {
  final MaterializedField field = MaterializedField.create(EMPTY_SCHEMA_PATH, UInt4Holder.TYPE);
  final UInt4Vector vector = new UInt4Vector(field, allocator);
  // edge case 1: buffer size = max value capacity
  final int expectedValueCapacity = BaseValueVector.MAX_ALLOCATION_SIZE / 4;
  try {
    vector.allocateNew(expectedValueCapacity);
    assertEquals(expectedValueCapacity, vector.getValueCapacity());
    vector.reAlloc();
    assertEquals(expectedValueCapacity * 2, vector.getValueCapacity());
  } finally {
    vector.close();
  }

  // common case: value count < max value capacity
  try {
    vector.allocateNew(BaseValueVector.MAX_ALLOCATION_SIZE / 8);
    vector.reAlloc(); // value allocation reaches to MAX_VALUE_ALLOCATION
    vector.reAlloc(); // this should throw an IOOB
  } finally {
    vector.close();
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:25,
代码来源:TestValueVector.java

示例16: testFixedType

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Test
public void testFixedType() {
  MaterializedField field = MaterializedField.create(EMPTY_SCHEMA_PATH, UInt4Holder.TYPE);

  // Create a new value vector for 1024 integers
  try (UInt4Vector vector = new UInt4Vector(field, allocator)) {
    UInt4Vector.Mutator m = vector.getMutator();
    vector.allocateNew(1024);

    // Put and set a few values
    m.setSafe(0, 100);
    m.setSafe(1, 101);
    m.setSafe(100, 102);
    m.setSafe(1022, 103);
    m.setSafe(1023, 104);
    assertEquals(100, vector.getAccessor().get(0));
    assertEquals(101, vector.getAccessor().get(1));
    assertEquals(102, vector.getAccessor().get(100));
    assertEquals(103, vector.getAccessor().get(1022));
    assertEquals(104, vector.getAccessor().get(1023));
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:23,
代码来源:TestValueVector.java

示例17: getOrCreateFamilyVector

点赞 3

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private MapVector getOrCreateFamilyVector(String familyName, boolean allocateOnCreate) {
  try {
    MapVector v = familyVectorMap.get(familyName);
    if(v == null) {
      SchemaPath column = SchemaPath.getSimplePath(familyName);
      MaterializedField field = MaterializedField.create(column, COLUMN_FAMILY_TYPE);
      v = outputMutator.addField(field, MapVector.class);
      if (allocateOnCreate) {
        v.allocateNew();
      }
      getColumns().add(column);
      familyVectorMap.put(familyName, v);
    }
    return v;
  } catch (SchemaChangeException e) {
    throw new DrillRuntimeException(e);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:19,
代码来源:HBaseRecordReader.java

示例18: RepeatedListVector

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
protected RepeatedListVector(MaterializedField field, BufferAllocator allocator, CallBack callBack, DelegateRepeatedVector delegate) {
  super(field, allocator, callBack);
  this.delegate = Preconditions.checkNotNull(delegate);

  final List<MaterializedField> children = Lists.newArrayList(field.getChildren());
  final int childSize = children.size();
  assert childSize < 3;
  final boolean hasChild = childSize > 0;
  if (hasChild) {
    // the last field is data field
    final MaterializedField child = children.get(childSize-1);
    addOrGetVector(VectorDescriptor.create(child));
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:15,
代码来源:RepeatedListVector.java

示例19: AbstractMapVector

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
protected AbstractMapVector(MaterializedField field, BufferAllocator allocator, CallBack callBack) {
  super(field.clone(), allocator, callBack);
  MaterializedField clonedField = field.clone();
  // create the hierarchy of the child vectors based on the materialized field
  for (MaterializedField child : clonedField.getChildren()) {
    if (!child.equals(BaseRepeatedValueVector.OFFSETS_FIELD)) {
      String fieldName = child.getLastName();
      ValueVector v = TypeHelper.getNewVector(child, allocator, callBack);
      putVector(fieldName, v);
    }
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:13,
代码来源:AbstractMapVector.java

示例20: addOrGet

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public <T extends ValueVector> T addOrGet(String name, MajorType type, Class<T> clazz) {
  try {
    final ValueVector v = mutator.addField(MaterializedField.create(name, type), clazz);
    putChild(name, v);
    return this.typeify(v, clazz);
  } catch (SchemaChangeException e) {
    throw new IllegalStateException(e);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:11,
代码来源:VectorContainerWriter.java

示例21: readFromStream

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
/**
 * Reads from an InputStream and parses a RecordBatchDef. From this, we construct a SelectionVector2 if it exits
 * and construct the vectors and add them to a vector container
 * @param input the InputStream to read from
 * @throws IOException
 */
@Override
public void readFromStream(InputStream input) throws IOException {
  final VectorContainer container = new VectorContainer();
  final UserBitShared.RecordBatchDef batchDef = UserBitShared.RecordBatchDef.parseDelimitedFrom(input);
  recordCount = batchDef.getRecordCount();
  if (batchDef.hasCarriesTwoByteSelectionVector() && batchDef.getCarriesTwoByteSelectionVector()) {

    if (sv2 == null) {
      sv2 = new SelectionVector2(allocator);
    }
    sv2.allocateNew(recordCount * SelectionVector2.RECORD_SIZE);
    sv2.getBuffer().setBytes(0, input, recordCount * SelectionVector2.RECORD_SIZE);
    svMode = BatchSchema.SelectionVectorMode.TWO_BYTE;
  }
  final List<ValueVector> vectorList = Lists.newArrayList();
  final List<SerializedField> fieldList = batchDef.getFieldList();
  for (SerializedField metaData : fieldList) {
    final int dataLength = metaData.getBufferLength();
    final MaterializedField field = MaterializedField.create(metaData);
    final DrillBuf buf = allocator.buffer(dataLength);
    final ValueVector vector;
    try {
      buf.writeBytes(input, dataLength);
      vector = TypeHelper.getNewVector(field, allocator);
      vector.load(metaData, buf);
    } finally {
      buf.release();
    }
    vectorList.add(vector);
  }
  container.addCollection(vectorList);
  container.buildSchema(svMode);
  container.setRecordCount(recordCount);
  va = container;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:42,
代码来源:VectorAccessibleSerializable.java

示例22: inferOutputFieldsFromLeftSide

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private void inferOutputFieldsFromLeftSide() {
  outputFields = Lists.newArrayList();
  Iterator<MaterializedField> iter = leftSide.getRecordBatch().getSchema().iterator();
  while(iter.hasNext()) {
    MaterializedField field = iter.next();
    outputFields.add(MaterializedField.create(field.getPath(), field.getType()));
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:9,
代码来源:UnionAllRecordBatch.java

示例23: getOutputFields

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
public List<MaterializedField> getOutputFields() {
  if(outputFields == null) {
    throw new NullPointerException("Output fields have not been inferred");
  }

  return outputFields;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:8,
代码来源:UnionAllRecordBatch.java

示例24: BatchHolder

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private BatchHolder() {

      aggrValuesContainer = new VectorContainer();
      boolean success = false;
      try {
        ValueVector vector;

        for (int i = 0; i < materializedValueFields.length; i++) {
          MaterializedField outputField = materializedValueFields[i];
          // Create a type-specific ValueVector for this value
          vector = TypeHelper.getNewVector(outputField, allocator);

          // Try to allocate space to store BATCH_SIZE records. Key stored at index i in HashTable has its workspace
          // variables (such as count, sum etc) stored at index i in HashAgg. HashTable and HashAgg both have
          // BatchHolders. Whenever a BatchHolder in HashAgg reaches its capacity, a new BatchHolder is added to
          // HashTable. If HashAgg can't store BATCH_SIZE records in a BatchHolder, it leaves empty slots in current
          // BatchHolder in HashTable, causing the HashTable to be space inefficient. So it is better to allocate space
          // to fit as close to as BATCH_SIZE records.
          if (vector instanceof FixedWidthVector) {
            ((FixedWidthVector) vector).allocateNew(HashTable.BATCH_SIZE);
          } else if (vector instanceof VariableWidthVector) {
            ((VariableWidthVector) vector).allocateNew(HashTable.VARIABLE_WIDTH_VECTOR_SIZE * HashTable.BATCH_SIZE,
                HashTable.BATCH_SIZE);
          } else if (vector instanceof ObjectVector) {
            ((ObjectVector) vector).allocateNew(HashTable.BATCH_SIZE);
          } else {
            vector.allocateNew();
          }

          capacity = Math.min(capacity, vector.getValueCapacity());

          aggrValuesContainer.add(vector);
        }
        success = true;
      } finally {
        if (!success) {
          aggrValuesContainer.clear();
        }
      }
    }
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:41,
代码来源:HashAggTemplate.java

示例25: buildSchema

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public void buildSchema() throws SchemaChangeException {
  // find frag provider that has data to use to build schema, and put in tempBatchHolder for later use
  tempBatchHolder = new RawFragmentBatch[fragProviders.length];
  int i = 0;
  try {
    while (true) {
      if (i >= fragProviders.length) {
        state = BatchState.DONE;
        return;
      }
      final RawFragmentBatch batch = getNext(i);
      if (batch == null) {
        if (!context.shouldContinue()) {
          state = BatchState.STOP;
        } else {
          state = BatchState.DONE;
        }

        break;
      }
      if (batch.getHeader().getDef().getFieldCount() == 0) {
        i++;
        continue;
      }
      tempBatchHolder[i] = batch;
      for (final SerializedField field : batch.getHeader().getDef().getFieldList()) {
        final ValueVector v = outgoingContainer.addOrGet(MaterializedField.create(field));
        v.allocateNew();
      }
      break;
    }
  } catch (final IOException e) {
    throw new DrillRuntimeException(e);
  }
  outgoingContainer = VectorContainer.canonicalize(outgoingContainer);
  outgoingContainer.buildSchema(SelectionVectorMode.NONE);
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:39,
代码来源:MergingRecordBatch.java

示例26: setFlattenVector

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private void setFlattenVector() {
  try {
    final TypedFieldId typedFieldId = incoming.getValueVectorId(popConfig.getColumn());
    final MaterializedField field = incoming.getSchema().getColumn(typedFieldId.getFieldIds()[0]);
    final RepeatedValueVector vector = RepeatedValueVector.class.cast(incoming.getValueAccessorById(
        field.getValueClass(), typedFieldId.getFieldIds()).getValueVector());
    flattener.setFlattenField(vector);
  } catch (Exception ex) {
    throw UserException.unsupportedError(ex).message("Trying to flatten a non-repeated field.").build(logger);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:12,
代码来源:FlattenRecordBatch.java

示例27: getExpressionList

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private List<NamedExpression> getExpressionList() {

    List<NamedExpression> exprs = Lists.newArrayList();
    for (MaterializedField field : incoming.getSchema()) {
      if (field.getPath().equals(popConfig.getColumn())) {
        continue;
      }
      exprs.add(new NamedExpression(field.getPath(), new FieldReference(field.getPath())));
    }
    return exprs;
  }
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:12,
代码来源:FlattenRecordBatch.java

示例28: getCopier

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
/**
 * Creates a copier that does a project for every Nth record from a VectorContainer incoming into VectorContainer
 * outgoing. Each Ordering in orderings generates a column, and evaluation of the expression associated with each
 * Ordering determines the value of each column. These records will later be sorted based on the values in each
 * column, in the same order as the orderings.
 *
 * @param sv4
 * @param incoming
 * @param outgoing
 * @param orderings
 * @return
 * @throws SchemaChangeException
 */
private SampleCopier getCopier(SelectionVector4 sv4, VectorContainer incoming, VectorContainer outgoing,
    List<Ordering> orderings, List<ValueVector> localAllocationVectors) throws SchemaChangeException {
  final ErrorCollector collector = new ErrorCollectorImpl();
  final ClassGenerator<SampleCopier> cg = CodeGenerator.getRoot(SampleCopier.TEMPLATE_DEFINITION,
      context.getFunctionRegistry());

  int i = 0;
  for (Ordering od : orderings) {
    final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), incoming, collector, context.getFunctionRegistry());
    SchemaPath schemaPath = SchemaPath.getSimplePath("f" + i++);
    TypeProtos.MajorType.Builder builder = TypeProtos.MajorType.newBuilder().mergeFrom(expr.getMajorType())
        .clearMode().setMode(TypeProtos.DataMode.REQUIRED);
    TypeProtos.MajorType newType = builder.build();
    MaterializedField outputField = MaterializedField.create(schemaPath, newType);
    if (collector.hasErrors()) {
      throw new SchemaChangeException(String.format(
          "Failure while trying to materialize incoming schema.  Errors:\n %s.", collector.toErrorString()));
    }

    ValueVector vector = TypeHelper.getNewVector(outputField, oContext.getAllocator());
    localAllocationVectors.add(vector);
    TypedFieldId fid = outgoing.add(vector);
    ValueVectorWriteExpression write = new ValueVectorWriteExpression(fid, expr, true);
    HoldingContainer hc = cg.addExpr(write);
    cg.getEvalBlock()._if(hc.getValue().eq(JExpr.lit(0)))._then()._return(JExpr.FALSE);
  }
  cg.rotateBlock();
  cg.getEvalBlock()._return(JExpr.TRUE);
  outgoing.buildSchema(BatchSchema.SelectionVectorMode.NONE);
  try {
    SampleCopier sampleCopier = context.getImplementationClass(cg);
    sampleCopier.setupCopier(context, sv4, incoming, outgoing);
    return sampleCopier;
  } catch (ClassTransformationException | IOException e) {
    throw new SchemaChangeException(e);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:51,
代码来源:OrderedPartitionRecordBatch.java

示例29: init

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public void init(OutputMutator output) throws SchemaChangeException {
  MaterializedField mf = MaterializedField.create(field.getName(), type);
  @SuppressWarnings("unchecked")
  Class<V> valueVectorClass = (Class<V>) TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode());
  this.vector = output.addField(mf, valueVectorClass);
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:8,
代码来源:AbstractWriter.java

示例30: setup

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
  MaterializedField field = MaterializedField.create(ref, Types.repeated(TypeProtos.MinorType.VARCHAR));
  try {
    vector = output.addField(field, RepeatedVarCharVector.class);
  } catch (Exception e) {
    handleAndRaise("Failure in setting up reader", e);
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:10,
代码来源:DrillTextRecordReader.java

示例31: containsComplexVectors

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private boolean containsComplexVectors(BatchSchema schema) {
  for (MaterializedField field : schema) {
    MinorType type = field.getType().getMinorType();
    switch (type) {
    case MAP:
    case LIST:
      return true;
    default:
    }
  }
  return false;
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:13,
代码来源:ParquetRecordWriter.java

示例32: getPrimitiveType

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
private PrimitiveType getPrimitiveType(MaterializedField field) {
  MinorType minorType = field.getType().getMinorType();
  String name = field.getLastName();
  PrimitiveTypeName primitiveTypeName = ParquetTypeHelper.getPrimitiveTypeNameForMinorType(minorType);
  Repetition repetition = ParquetTypeHelper.getRepetitionForDataMode(field.getDataMode());
  OriginalType originalType = ParquetTypeHelper.getOriginalTypeForMinorType(minorType);
  DecimalMetadata decimalMetadata = ParquetTypeHelper.getDecimalMetadataForField(field);
  int length = ParquetTypeHelper.getLengthForMinorType(minorType);
  return new PrimitiveType(repetition, primitiveTypeName, length, name, originalType, decimalMetadata, null);
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:11,
代码来源:ParquetRecordWriter.java

示例33: getDecimalMetadataForField

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
public static DecimalMetadata getDecimalMetadataForField(MaterializedField field) {
  switch(field.getType().getMinorType()) {
    case DECIMAL9:
    case DECIMAL18:
    case DECIMAL28SPARSE:
    case DECIMAL28DENSE:
    case DECIMAL38SPARSE:
    case DECIMAL38DENSE:
      return new DecimalMetadata(field.getPrecision(), field.getScale());
    default:
      return null;
  }
}
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:14,
代码来源:ParquetTypeHelper.java

示例34: getNewVector

点赞 2

import org.apache.drill.exec.record.MaterializedField; //导入依赖的package包/类
public static ValueVector getNewVector(MaterializedField field, BufferAllocator allocator, CallBack callBack){
    MajorType type = field.getType();

    switch (type.getMinorType()) {
    
    
    case MAP:
      switch (type.getMode()) {
      case REQUIRED:
        return new MapVector(field, allocator, callBack);
      case REPEATED:
        return new RepeatedMapVector(field, allocator, callBack);
      }
    case LIST:
      switch (type.getMode()) {
      case REPEATED:
        return new RepeatedListVector(field, allocator, callBack);
      }    
<#list vv.  types as type>
  <#list type.minor as minor>
    case ${minor.class?upper_case}:
      switch (type.getMode()) {
        case REQUIRED:
          return new ${minor.class}Vector(field, allocator);
        case OPTIONAL:
          return new Nullable${minor.class}Vector(field, allocator);
        case REPEATED:
          return new Repeated${minor.class}Vector(field, allocator);
      }
  </#list>
</#list>
    case GENERIC_OBJECT:
      return new ObjectVector(field, allocator)        ;
    default:
      break;
    }
    // All ValueVector types have been handled.
    throw new UnsupportedOperationException(buildErrorMessage("get new vector", type));
  }
 

开发者ID:skhalifa,
项目名称:QDrill,
代码行数:40,
代码来源:TypeHelper.java


版权声明:本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系管理员进行删除。
喜欢 (0)