Examples of OutputCollector


Examples of org.apache.hadoop.mapred.OutputCollector

   
    updateJobWithSplit(job, inputSplit);

    RecordReader in = new OldRecordReader(input);

    OutputCollector collector = new OldOutputCollector(output);

    MapRunnable runner =
        (MapRunnable)ReflectionUtils.newInstance(job.getMapRunnerClass(), job);

    runner.run(in, collector, (Reporter)reporter);
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

        // called at all in this task). If reducer still generates output,
        // which is very uncommon and we may not have to support this case.
        // So we don't write this output to HDFS, but we consume/collect
        // this output just to avoid reducer hanging forever.

        OutputCollector collector = new OutputCollector() {
          public void collect(Object key, Object value)
            throws IOException {
            //just consume it, no need to write the record anywhere
          }
        };
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

      // validate input split
      InputSplit split = splits[i];
      Assert.assertTrue(split instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit);

      // validate record reader
      OutputCollector collector = mock(OutputCollector.class);
      Reporter reporter = mock(Reporter.class);
      RecordReader<ImmutableBytesWritable, Result> rr = tsif.getRecordReader(split, job, reporter);

      // validate we can read all the data back
      ImmutableBytesWritable key = rr.createKey();
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

        // create a new record writer
        writer = outputFormat.getRecordWriter(FileSystem.get(outputConf),
                                              outputConf, fileName, reporter);

        // return an output collector using the writer we just created.
        return new StoreFuncAdaptor(new OutputCollector()
            {
                @SuppressWarnings({"unchecked"})
                public void collect(Object key, Object value) throws IOException {
                    writer.write(key,value);
                }
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

        // called at all in this task). If reducer still generates output,
        // which is very uncommon and we may not have to support this case.
        // So we don't write this output to HDFS, but we consume/collect
        // this output just to avoid reducer hanging forever.

        OutputCollector collector = new OutputCollector() {
          public void collect(Object key, Object value)
            throws IOException {
            //just consume it, no need to write the record anywhere
          }
        };
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

        // called at all in this task). If reducer still generates output,
        // which is very uncommon and we may not have to support this case.
        // So we don't write this output to HDFS, but we consume/collect
        // this output just to avoid reducer hanging forever.

        OutputCollector collector = new OutputCollector() {
          public void collect(Object key, Object value)
            throws IOException {
            //just consume it, no need to write the record anywhere
          }
        };
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

        // called at all in this task). If reducer still generates output,
        // which is very uncommon and we may not have to support this case.
        // So we don't write this output to HDFS, but we consume/collect
        // this output just to avoid reducer hanging forever.

        OutputCollector collector = new OutputCollector() {
          public void collect(Object key, Object value)
            throws IOException {
            //just consume it, no need to write the record anywhere
          }
        };
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

        // called at all in this task). If reducer still generates output,
        // which is very uncommon and we may not have to support this case.
        // So we don't write this output to HDFS, but we consume/collect
        // this output just to avoid reducer hanging forever.

        OutputCollector collector = new OutputCollector() {
          public void collect(Object key, Object value)
            throws IOException {
            //just consume it, no need to write the record anywhere
          }
        };
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

  @Override
  public void sink(FlowProcess<JobConf> flowProcess, SinkCall<Object[], OutputCollector> sinkCall)
      throws IOException {
    TupleEntry tupleEntry = sinkCall.getOutgoingEntry();
    OutputCollector outputCollector = sinkCall.getOutput();
    Tuple key = tupleEntry.selectTuple(keyField);
    ImmutableBytesWritable keyBytes = (ImmutableBytesWritable) key.getObject(0);

    if (useSalt) {
      keyBytes = HBaseSalter.addSaltPrefix(keyBytes);
    }

    Put put;
    if (this.timeStamp == 0L) {
      put = new Put(keyBytes.get());
    } else {
      put = new Put(keyBytes.get(), this.timeStamp);
    }
   
    for (int i = 0; i < valueFields.length; i++) {
      Fields fieldSelector = valueFields[i];
      TupleEntry values = tupleEntry.selectEntry(fieldSelector);

      for (int j = 0; j < values.getFields().size(); j++) {
        Fields fields = values.getFields();
        Tuple tuple = values.getTuple();

        ImmutableBytesWritable valueBytes = (ImmutableBytesWritable) tuple.getObject(j);
        if (valueBytes != null)
            put.add(Bytes.toBytes(familyNames[i]), Bytes.toBytes((String) fields.get(j)), valueBytes.get());
      }
    }

    outputCollector.collect(null, put);
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.OutputCollector

  @SuppressWarnings("unchecked")
  @Override
  public void sink(FlowProcess<JobConf> flowProcess, SinkCall<Object[], OutputCollector> sinkCall) throws IOException {
    TupleEntry tupleEntry = sinkCall.getOutgoingEntry();
    OutputCollector outputCollector = sinkCall.getOutput();
    Tuple key = tupleEntry.selectTuple(RowKeyField);
    Object okey = key.getObject(0);
    ImmutableBytesWritable keyBytes = getBytes(okey);
    Put put = new Put(keyBytes.get());
    Fields outFields = tupleEntry.getFields().subtract(RowKeyField);
    if (null != outFields) {
      TupleEntry values = tupleEntry.selectEntry(outFields);
      for (int n = 0; n < values.getFields().size(); n++) {
        Object o = values.get(n);
        ImmutableBytesWritable valueBytes = getBytes(o);
        Comparable field = outFields.get(n);
        ColumnName cn = parseColumn((String) field);
        if (null == cn.family) {
          if (n >= familyNames.length)
            cn.family = familyNames[familyNames.length - 1];
          else
            cn.family = familyNames[n];
        }
        if (null != o || writeNulls)
          put.add(Bytes.toBytes(cn.family), Bytes.toBytes(cn.name), valueBytes.get());
      }
    }
    outputCollector.collect(null, put);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.