Package org.apache.drill.exec.record

Examples of org.apache.drill.exec.record.MaterializedField


      aggrValuesContainer = new VectorContainer();

      ValueVector vector ;

      for(int i = 0; i < materializedValueFields.length; i++) {
        MaterializedField outputField = materializedValueFields[i];
        // Create a type-specific ValueVector for this value
        vector = TypeHelper.getNewVector(outputField, allocator) ;
        vector.allocateNew();
        capacity = Math.min(capacity, vector.getValueCapacity());
View Full Code Here


    throw new UnsupportedOperationException("Unable to find sql accessor for minor type [" + vector.getField().getType().getMinorType() + "] and mode [" + vector.getField().getType().getMode() + "]");
  }
 
  public static ValueVector getNewVector(SchemaPath parentPath, String name, BufferAllocator allocator, MajorType type){
    SchemaPath child = parentPath.getChild(name);
    MaterializedField field = MaterializedField.create(child, type);
    return getNewVector(field, allocator);
  }
View Full Code Here

    List<String> formats = Lists.newArrayList();
    for (VectorWrapper<?> vw : va) {
      int columnWidth = getColumnWidth(columnWidths, columnIndex);
      width += columnWidth + 2;
      formats.add("| %-" + columnWidth + "s");
      MaterializedField field = vw.getValueVector().getField();
      columns.add(field.getPath().getAsUnescapedPath() + "<" + field.getType().getMinorType() + "(" + field.getType().getMode() + ")" + ">");
      columnIndex++;
    }

    int rows = va.getRecordCount();
    for (int row = 0; row < rows; row++) {
View Full Code Here

        if (joinType == JoinRelType.RIGHT && inputType.getMode() == DataMode.REQUIRED) {
          outputType = Types.overrideMode(inputType, DataMode.OPTIONAL);
        } else {
          outputType = inputType;
        }
        MaterializedField newField = MaterializedField.create(w.getField().getPath(), outputType);
       container.addOrGet(newField);
      }

      for (VectorWrapper<?> w : right) {
        MajorType inputType = w.getField().getType();
        MajorType outputType;
        if (joinType == JoinRelType.LEFT && inputType.getMode() == DataMode.REQUIRED) {
          outputType = Types.overrideMode(inputType, DataMode.OPTIONAL);
        } else {
          outputType = inputType;
        }
        MaterializedField newField = MaterializedField.create(w.getField().getPath(), outputType);
        container.addOrGet(newField);
      }
    }

    for (VectorWrapper w : container) {
View Full Code Here

      final LogicalExpression expr = ExpressionTreeMaterializer.materialize(ne.getExpr(), incoming, collector, context.getFunctionRegistry());
      if (expr == null) {
        continue;
      }

      final MaterializedField outputField = MaterializedField.create(ne.getRef(), expr.getMajorType());
      container.addOrGet(outputField);
      TypedFieldId id = container.getValueVectorId(outputField.getPath());
      assert id != null : "Got null TypedFieldId";
      valueExprs.add(new ValueVectorWriteExpression(id, expr, true));
    }

    int j = 0;
View Full Code Here

    ColumnDescriptor column;
    ColumnChunkMetaData columnChunkMetaData;
    int columnsToScan = 0;
    mockRecordsRead = 0;

    MaterializedField field;
//    ParquetMetadataConverter metaConverter = new ParquetMetadataConverter();
    FileMetaData fileMetaData;

    logger.debug("Reading row group({}) with {} records in file {}.", rowGroupIndex, footer.getBlocks().get(rowGroupIndex).getRowCount(),
        hadoopPath.toUri().getPath());
View Full Code Here

    this.mapKey = String.format("%s_%d", context.getHandle().getQueryId(), context.getHandle().getMajorFragmentId());
    this.minorFragmentSampleCount = cache.getCounter(mapKey);

    SchemaPath outputPath = popConfig.getRef();
    MaterializedField outputField = MaterializedField.create(outputPath, Types.required(TypeProtos.MinorType.INT));
    this.partitionKeyVector = (IntVector) TypeHelper.getNewVector(outputField, oContext.getAllocator());

  }
View Full Code Here

      final LogicalExpression expr = ExpressionTreeMaterializer.materialize(od.getExpr(), incoming, collector, context.getFunctionRegistry());
      SchemaPath schemaPath = SchemaPath.getSimplePath("f" + i++);
      TypeProtos.MajorType.Builder builder = TypeProtos.MajorType.newBuilder().mergeFrom(expr.getMajorType())
          .clearMode().setMode(TypeProtos.DataMode.REQUIRED);
      TypeProtos.MajorType newType = builder.build();
      MaterializedField outputField = MaterializedField.create(schemaPath, newType);
      if (collector.hasErrors()) {
        throw new SchemaChangeException(String.format(
            "Failure while trying to materialize incoming schema.  Errors:\n %s.", collector.toErrorString()));
      }
View Full Code Here

  private String composeTooBigMsg(int recordId, RecordBatch incoming) {
    String msg = String.format("Record " + recordId + " is too big to fit into the allocated memory of ValueVector.");
    msg += " Schema: ";
    for (int i = 0; i < incoming.getSchema().getFieldCount(); i++) {
      MaterializedField f = incoming.getSchema().getColumn(i);
      msg += f.getPath().getRootSegment().getPath() + " ";
    }
    return msg;
  }
View Full Code Here

    return x;
  }

  private MaterializedField getVector(String name, MajorType type, int length) {
    assert context != null : "Context shouldn't be null.";
    MaterializedField f = MaterializedField.create(SchemaPath.getSimplePath(name), type);

    return f;

  }
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.record.MaterializedField

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.