Examples of OutputCollector


Examples of backtype.storm.task.OutputCollector

        AbstractMetricsBolt bolt = getBolt();

        outputCollector = new MockOutputCollector();

        // Prepare the bolt so that it uses our mock output collector
        bolt.prepare(null, null, new OutputCollector(outputCollector));

        // Emit all events
        for (Map<String, Object> event : events) {
            Tuple tuple = Mockito.mock(Tuple.class);
View Full Code Here

Examples of backtype.storm.task.OutputCollector

            String connectionId = connection.getString("_id");
            final String connectionType = connection.getString("type");

            when(tuple.getValue(0)).thenReturn(connection);

            bolt.prepare(null, null, new OutputCollector(outputCollector));

            bolt.execute(tuple);

            if (connectionType.equals(ConnectionTypeConstants.PROJECT_HOSTING_TYPE)) {
                expectedInventoryItems = mongoClient.getProjectHostingInventoryItems(connectionId);
View Full Code Here

Examples of backtype.storm.task.OutputCollector

        if(_delegate instanceof TimeoutCallback) {
            callback = new TimeoutItems();
        }
        _tracked = new TimeCacheMap<Object, TrackingInfo>(context.maxTopologyMessageTimeout(), callback);
        _collector = collector;
        _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
        for(String component: Utils.get(context.getThisTargets(),
                                        Constants.COORDINATED_STREAM_ID,
                                        new HashMap<String, Grouping>())
                                        .keySet()) {
            for(Integer task: context.getComponentTasks(component)) {
View Full Code Here

Examples of com.btaz.datautil.files.mapreduce.OutputCollector

            throw new DataUtilException("The comparator parameter can not be a null value");
        }

        // reduce all data
        FileInputStream inputStream;
        OutputCollector collector = new OutputCollector(outputFile);
        QueueReader queueReader;

        // Aggregate all items matching the comparator and call the Reducable callback
        try {
            inputStream = new FileInputStream(inputFile);
            BufferedReader br = new BufferedReader(new InputStreamReader(inputStream,
                    DataUtilDefaults.charSet));
            queueReader = new QueueReader(br);

            // reduce data
            String prev = null;
            String curr;
            ArrayList<String> items = new ArrayList<String>();
            while(true) {
                curr = queueReader.readLine();
                if(curr == null) {
                    // no more data
                    reduceItems(collector, reducer, items);
                    break;
                } else if(prev == null) {
                    // first row in a new batch
                    items.add(curr);
                    prev = curr;
                } else if(comparator.compare(prev, curr) == 0) {
                    // same keys
                    items.add(curr);
                    prev = curr;
                } else {
                    // different keys
                    queueReader.push(curr);
                    reduceItems(collector, reducer, items);
                    items.clear();
                    prev = null;
                }
            }
            collector.close();
            inputStream.close();
        } catch (IOException e) {
            throw new DataUtilException(e);
        } catch (MapReduceException e) {
            throw new DataUtilException("Irrecoverable reduce operation", e);
View Full Code Here

Examples of com.btaz.datautil.files.mapreduce.OutputCollector

        if(mapper == null) {
            throw new DataUtilException("The mappable parameter can not be a null value");
        }

        // setup output collector
        OutputCollector collector = new OutputCollector(workDir, "map");

        // map all data
        try {
            for(File file : inputFiles) {
                FileInputStream inputStream;

                try {
                    inputStream = new FileInputStream(file);
                    BufferedReader br = new BufferedReader(new InputStreamReader(inputStream,
                            DataUtilDefaults.charSet));

                    // map data
                    String row;
                    while((row = br.readLine()) != null) {
                        mapper.map(row, collector);
                    }

                    inputStream.close();
                } catch (IOException e) {
                    throw new DataUtilException(e);
                } catch (MapReduceException e) {
                    throw new DataUtilException("Irrecoverable map operation", e);
                }
            }
        } finally {
            collector.close();
        }

        // return output files
        return collector.getFiles();
    }
View Full Code Here

Examples of com.btaz.util.mr.OutputCollector

            throw new DataUtilException("The comparator parameter can not be a null value");
        }

        // reduce all data
        FileInputStream inputStream;
        OutputCollector collector = new OutputCollector(outputFile);
        QueueReader queueReader;

        // Aggregate all items matching the comparator and call the Reducable callback
        try {
            inputStream = new FileInputStream(inputFile);
            BufferedReader br = new BufferedReader(new InputStreamReader(inputStream,
                    DataUtilDefaults.charSet));
            queueReader = new QueueReader(br);

            // reduce data
            String prev = null;
            String curr;
            ArrayList<String> items = new ArrayList<String>();
            while(true) {
                curr = queueReader.readLine();
                if(curr == null) {
                    // no more data
                    reduceItems(collector, reducer, items);
                    break;
                } else if(prev == null) {
                    // first row in a new batch
                    items.add(curr);
                    prev = curr;
                } else if(comparator.compare(prev, curr) == 0) {
                    // same keys
                    items.add(curr);
                    prev = curr;
                } else {
                    // different keys
                    queueReader.push(curr);
                    reduceItems(collector, reducer, items);
                    items.clear();
                    prev = null;
                }
            }
            collector.close();
            inputStream.close();
        } catch (IOException e) {
            throw new DataUtilException(e);
        } catch (MapReduceException e) {
            throw new DataUtilException("Irrecoverable reduce operation", e);
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

    @Override
    public void sink( FlowProcess<JobConf> flowProcess, SinkCall<Object[], OutputCollector> sinkCall ) throws IOException {
        // it's ok to use NULL here so the collector does not write anything
        TupleEntry tupleEntry = sinkCall.getOutgoingEntry();
        OutputCollector outputCollector = sinkCall.getOutput();
        if( updateBy != null )
        {
            Tuple allValues = tupleEntry.selectTuple( updateValueFields );
            Tuple updateValues = tupleEntry.selectTuple( updateByFields );

            allValues = cleanTuple( allValues );

            TupleRecord key = new TupleRecord( allValues );

            if( updateValues.equals( updateIfTuple ) )
                outputCollector.collect( key, null );
            else
                outputCollector.collect( key, key );

            return;
        }

        Tuple result = tupleEntry.selectTuple( getSinkFields() );

        result = cleanTuple( result );

        outputCollector.collect( new TupleRecord( result ), null );
    }
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

  @Override
  public void sink(FlowProcess<JobConf> flowProcess, SinkCall<Object[], OutputCollector> sinkCall)
      throws IOException {
    TupleEntry tupleEntry = sinkCall.getOutgoingEntry();
    OutputCollector outputCollector = sinkCall.getOutput();
    Tuple key = tupleEntry.selectTuple(keyField);
    ImmutableBytesWritable keyBytes = (ImmutableBytesWritable) key.getObject(0);
    Put put = new Put(keyBytes.get());

    for (int i = 0; i < valueFields.length; i++) {
      Fields fieldSelector = valueFields[i];
      TupleEntry values = tupleEntry.selectEntry(fieldSelector);

      for (int j = 0; j < values.getFields().size(); j++) {
        Fields fields = values.getFields();
        Tuple tuple = values.getTuple();

        ImmutableBytesWritable valueBytes = (ImmutableBytesWritable) tuple.getObject(j);
        put.add(Bytes.toBytes(familyNames[i]), Bytes.toBytes((String) fields.get(j)), valueBytes.get());
      }
    }

    outputCollector.collect(null, put);
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

        // called at all in this task). If reducer still generates output,
        // which is very uncommon and we may not have to support this case.
        // So we don't write this output to HDFS, but we consume/collect
        // this output just to avoid reducer hanging forever.

        OutputCollector collector = new OutputCollector() {
          public void collect(Object key, Object value)
            throws IOException {
            //just consume it, no need to write the record anywhere
          }
        };
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

    final OutputCollector output, Reporter r)
    throws IOException
  {
    final String collection = Nutchwax.getCollectionFromWaxKey(key);
   
    final OutputCollector oo = new OutputCollector()
    {
      public void collect(WritableComparable k, Writable v)
        throws IOException
      {
        output.collect(Nutchwax.generateWaxKey(k, collection), v);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.