Package org.apache.drill.exec.physical.impl.materialize

Examples of org.apache.drill.exec.physical.impl.materialize.QueryWritableBatch


    cleanupAndSendResult(result);
  }
 
  void cleanupAndSendResult(QueryResult result){
    bee.retireForeman(this);
    initiatingClient.sendResult(new ResponseSendListener(), new QueryWritableBatch(result));
  }
View Full Code Here


              .setRowCount(0) //
              .addError(ErrorHelper.logAndConvertError(context.getIdentity(), "Screen received stop request sent.", context.getFailureCause(), logger))
              .setDef(RecordBatchDef.getDefaultInstance()) //
              .setIsLastChunk(true) //
              .build();
          QueryWritableBatch batch = new QueryWritableBatch(header);
          connection.sendResult(listener, batch);

          return false;
      }
      case NONE: {
        if(materializer == null){
          // receive no results.
          context.batchesCompleted.inc(1);
          context.recordsCompleted.inc(incoming.getRecordCount());
          QueryResult header = QueryResult.newBuilder() //
              .setQueryId(context.getHandle().getQueryId()) //
              .setRowCount(0) //
              .setDef(RecordBatchDef.getDefaultInstance()) //
              .setIsLastChunk(true) //
              .build();
          QueryWritableBatch batch = new QueryWritableBatch(header);
          connection.sendResult(listener, batch);

        }else{
          QueryWritableBatch batch = materializer.convertNext(true);
          connection.sendResult(listener, batch);
        }

        return false;
      }
      case OK_NEW_SCHEMA:
        materializer = new VectorRecordMaterializer(context, incoming);
        // fall through.
      case OK:
        context.batchesCompleted.inc(1);
        context.recordsCompleted.inc(incoming.getRecordCount());
        QueryWritableBatch batch = materializer.convertNext(false);
        connection.sendResult(listener, batch);
        return true;
      default:
        throw new UnsupportedOperationException();
      }
View Full Code Here

              .addError(ErrorHelper.logAndConvertError(context.getIdentity(), "Screen received stop request sent.",
                context.getFailureCause(), logger, verbose))
              .setDef(RecordBatchDef.getDefaultInstance()) //
              .setIsLastChunk(true) //
              .build();
          QueryWritableBatch batch = new QueryWritableBatch(header);
          stats.startWait();
          try {
            connection.sendResult(listener, batch);
          } finally {
            stats.stopWait();
          }
          sendCount.increment();

          return false;
      }
      case NONE: {
        sendCount.waitForSendComplete();
//        context.getStats().batchesCompleted.inc(1);
        QueryWritableBatch batch;
        if (!first) {
          QueryResult header = QueryResult.newBuilder() //
              .setQueryId(context.getHandle().getQueryId()) //
              .setRowCount(0) //
              .setDef(RecordBatchDef.getDefaultInstance()) //
              .setIsLastChunk(true) //
              .build();
          batch = new QueryWritableBatch(header);
        } else {
          batch = QueryWritableBatch.getEmptyBatchWithSchema(context.getHandle().getQueryId(), 0, true, incoming.getSchema());
        }
        stats.startWait();
        try {
          connection.sendResult(listener, batch);
        } finally {
          stats.stopWait();
        }
        sendCount.increment();

        return false;
      }
      case OK_NEW_SCHEMA:
        materializer = new VectorRecordMaterializer(context, incoming);
        // fall through.
      case OK:
//        context.getStats().batchesCompleted.inc(1);
//        context.getStats().recordsCompleted.inc(incoming.getRecordCount());
        QueryWritableBatch batch = materializer.convertNext(false);
        updateStats(batch);
        stats.startWait();
        try {
          connection.sendResult(listener, batch);
        } finally {
View Full Code Here

    cleanupAndSendResult(result);
  }

  void cleanupAndSendResult(QueryResult result){
    bee.retireForeman(this);
    initiatingClient.sendResult(new ResponseSendListener(), new QueryWritableBatch(result), true);
    state.updateState(QueryState.RUNNING, QueryState.COMPLETED);
  }
View Full Code Here

    cleanupAndSendResult(result);
  }

  void cleanupAndSendResult(QueryResult result) {
    bee.retireForeman(this);
    initiatingClient.sendResult(new ResponseSendListener(), new QueryWritableBatch(result), true);
    state.updateState(state.getState(), result.getQueryState());

    this.fragmentManager.getStatus().updateQueryStateInStore();
  }
View Full Code Here

          incoming.buildSchema();
        } finally {
          stats.startProcessing();
        }

        QueryWritableBatch batch = QueryWritableBatch.getEmptyBatchWithSchema(context.getHandle().getQueryId(), 0, false, incoming.getSchema());
        stats.startWait();
        try {
          connection.sendResult(listener, batch);
        } finally {
          stats.stopWait();
View Full Code Here

              .addError(ErrorHelper.logAndConvertMessageError(context.getIdentity(), "Query stopeed.",
                context.getFailureCause(), logger, verbose))
              .setDef(RecordBatchDef.getDefaultInstance()) //
              .setIsLastChunk(true) //
              .build();
          QueryWritableBatch batch = new QueryWritableBatch(header);
          stats.startWait();
          try {
            connection.sendResult(listener, batch);
          } finally {
            stats.stopWait();
          }
          sendCount.increment();

          return false;
      }
      case NONE: {
        this.internalStop();
        QueryWritableBatch batch;
        QueryResult header = QueryResult.newBuilder() //
            .setQueryId(context.getHandle().getQueryId()) //
            .setRowCount(0) //
            .setDef(RecordBatchDef.getDefaultInstance()) //
            .setIsLastChunk(true) //
            .build();
        batch = new QueryWritableBatch(header);
        stats.startWait();
        try {
          connection.sendResult(listener, batch);
        } finally {
          stats.stopWait();
        }
        sendCount.increment();

        return false;
      }
      case OK_NEW_SCHEMA:
        materializer = new VectorRecordMaterializer(context, incoming);
        // fall through.
      case OK:
//        context.getStats().batchesCompleted.inc(1);
//        context.getStats().recordsCompleted.inc(incoming.getRecordCount());
        QueryWritableBatch batch = materializer.convertNext(false);
        updateStats(batch);
        stats.startWait();
        try {
          connection.sendResult(listener, batch);
        } finally {
View Full Code Here

              .addError(ErrorHelper.logAndConvertError(context.getIdentity(), "Screen received stop request sent.",
                context.getFailureCause(), logger, verbose))
              .setDef(RecordBatchDef.getDefaultInstance()) //
              .setIsLastChunk(true) //
              .build();
          QueryWritableBatch batch = new QueryWritableBatch(header);
          stats.startWait();
          try {
            connection.sendResult(listener, batch);
          } finally {
            stats.stopWait();
          }
          sendCount.increment();

          return false;
      }
      case NONE: {
        this.internalStop();
        QueryWritableBatch batch;
        if (!first) {
          QueryResult header = QueryResult.newBuilder() //
              .setQueryId(context.getHandle().getQueryId()) //
              .setRowCount(0) //
              .setDef(RecordBatchDef.getDefaultInstance()) //
              .setIsLastChunk(true) //
              .build();
          batch = new QueryWritableBatch(header);
        } else {
          batch = QueryWritableBatch.getEmptyBatchWithSchema(context.getHandle().getQueryId(), 0, true, incoming.getSchema());
        }
        stats.startWait();
        try {
          connection.sendResult(listener, batch);
        } finally {
          stats.stopWait();
        }
        sendCount.increment();

        return false;
      }
      case OK_NEW_SCHEMA:
        materializer = new VectorRecordMaterializer(context, incoming);
        // fall through.
      case OK:
//        context.getStats().batchesCompleted.inc(1);
//        context.getStats().recordsCompleted.inc(incoming.getRecordCount());
        QueryWritableBatch batch = materializer.convertNext(false);
        updateStats(batch);
        stats.startWait();
        try {
          connection.sendResult(listener, batch);
        } finally {
View Full Code Here

    cleanupAndSendResult(result);
  }

  void cleanupAndSendResult(QueryResult result){
    bee.retireForeman(this);
    initiatingClient.sendResult(new ResponseSendListener(), new QueryWritableBatch(result), true);
    state.updateState(QueryState.RUNNING, QueryState.COMPLETED);
  }
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.physical.impl.materialize.QueryWritableBatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.