Package org.apache.drill.exec.record

Examples of org.apache.drill.exec.record.BatchSchema


    }
    if (batches.keys().size() < 1) {
      assert false : "Invalid to have an empty set of batches with no schemas.";
    }
    sv4 = new SelectionVector4(svAllocator.getAllocation(), recordCount, Character.MAX_VALUE);
    BatchSchema schema = batches.keySet().iterator().next();
    List<RecordBatchData> data = batches.get(schema);

    // now we're going to generate the sv4 pointers
    switch (schema.getSelectionVectorMode()) {
    case NONE: {
      int index = 0;
      int recordBatchId = 0;
      for (RecordBatchData d : data) {
        for (int i =0; i < d.getRecordCount(); i++, index++) {
View Full Code Here


    this.hasSv2 = hasSv2;
  }

  public void resetQueue(VectorContainer container, SelectionVector4 v4) throws SchemaChangeException {
    assert container.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.FOUR_BYTE;
    BatchSchema schema = container.getSchema();
    VectorContainer newContainer = new VectorContainer();
    for (MaterializedField field : schema) {
      int[] ids = container.getValueVectorId(field.getPath()).getFieldIds();
      newContainer.add(container.getValueAccessorById(field.getValueClass(), ids).getValueVectors());
    }
View Full Code Here

    if (first) {
      first = !first;
    }

    if (state == IterOutcome.OK || state == IterOutcome.OK_NEW_SCHEMA) {
      BatchSchema schema = incoming.getSchema();
      if (schema.getFieldCount() == 0) {
        throw new IllegalStateException ("Incoming batch has an empty schema. This is not allowed.");
      }
      if (incoming.getRecordCount() > MAX_BATCH_SIZE) {
        throw new IllegalStateException (String.format("Incoming batch of %s has size %d, which is beyond the limit of %d",  incoming.getClass().getName(), incoming.getRecordCount(), MAX_BATCH_SIZE));
      }
View Full Code Here

  }

  private boolean isSameSchemaAmongBatches(RecordBatchLoader[] batchLoaders) {
    Preconditions.checkArgument(batchLoaders.length > 0, "0 batch is not allowed!");

    BatchSchema schema = batchLoaders[0].getSchema();

    for (int i = 1; i < batchLoaders.length; i++) {
      if (!schema.equals(batchLoaders[i].getSchema())) {
        logger.error("Schemas are different. Schema 1 : " + schema + ", Schema 2: " + batchLoaders[i].getSchema() );
        return false;
      }
    }
    return true;
View Full Code Here

  private boolean load(RecordBatchData batch) {
    VectorContainer newContainer = batch.getContainer();
    if (schema != null && newContainer.getSchema().equals(schema)) {
      container.zeroVectors();
      BatchSchema schema = container.getSchema();
      for (int i = 0; i < container.getNumberOfColumns(); i++) {
        MaterializedField field = schema.getColumn(i);
        MajorType type = field.getType();
        ValueVector vOut = container.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()),
                container.getValueVectorId(field.getPath()).getFieldIds()).getValueVector();
        ValueVector vIn = newContainer.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()),
                newContainer.getValueVectorId(field.getPath()).getFieldIds()).getValueVector();
View Full Code Here

  private boolean load(RecordBatchData batch) {
    VectorContainer newContainer = batch.getContainer();
    if (schema != null && newContainer.getSchema().equals(schema)) {
      container.zeroVectors();
      BatchSchema schema = container.getSchema();
      for (int i = 0; i < container.getNumberOfColumns(); i++) {
        MaterializedField field = schema.getColumn(i);
        MajorType type = field.getType();
        ValueVector vOut = container.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()),
                container.getValueVectorId(field.getPath()).getFieldIds()).getValueVector();
        ValueVector vIn = newContainer.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()),
                newContainer.getValueVectorId(field.getPath()).getFieldIds()).getValueVector();
View Full Code Here

    if(batches.size() > Character.MAX_VALUE) throw new SchemaChangeException("Sort cannot work on more than %d batches at a time.", (int) Character.MAX_VALUE);
    if(batches.keys().size() < 1){
      assert false : "Invalid to have an empty set of batches with no schemas.";
    }
    sv4 = new SelectionVector4(svAllocator.getAllocation(), recordCount, Character.MAX_VALUE);
    BatchSchema schema = batches.keySet().iterator().next();
    List<RecordBatchData> data = batches.get(schema);

    // now we're going to generate the sv4 pointers
    switch(schema.getSelectionVectorMode()){
    case NONE: {
      int index = 0;
      int recordBatchId = 0;
      for(RecordBatchData d : data){
        for(int i =0; i < d.getRecordCount(); i++, index++){
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.record.BatchSchema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.