Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.Counters.findCounter()


    values.add(hihoValue1);
    values.add(hihoValue2);

    Reducer.Context context = mock(Reducer.Context.class);
    Counters counters = new Counters();
    Counter counter = counters.findCounter(MergeRecordCounter.OUTPUT);
    when(context.getCounter(MergeRecordCounter.OUTPUT)).thenReturn(counter);
    MergeKeyReducer mergeReducer = new MergeKeyReducer();
    mergeReducer.reduce(hihoTuple, values, context);
    verify(context).write(key, value2);
    assertEquals(1, context.getCounter(MergeRecordCounter.OUTPUT)
View Full Code Here


    values.add(hihoValue1);
    values.add(hihoValue2);

    Reducer.Context context = mock(Reducer.Context.class);
    Counters counters = new Counters();
    Counter counter = counters.findCounter(MergeRecordCounter.OUTPUT);
    when(context.getCounter(MergeRecordCounter.OUTPUT)).thenReturn(counter);
    MergeKeyReducer mergeReducer = new MergeKeyReducer();
    mergeReducer.reduce(hihoTuple, values, context);
    verify(context).write(key, value2);
    assertEquals(1, context.getCounter(MergeRecordCounter.OUTPUT)
View Full Code Here

    if (counters == null) {
      return;
    }

    // TODO: remove deprecation suppress when we don't want to rely on org.apache.hadoop.mapred
    Counter count = counters.findCounter(Task.Counter.MAP_INPUT_RECORDS);

    for (int i = 0; i < safeLongToInt(count.getValue()); i++) {
      contribution.incrementReadCount();
    }
View Full Code Here

    for (int i = 0; i < safeLongToInt(count.getValue()); i++) {
      contribution.incrementReadCount();
    }

    count = counters.findCounter(Task.Counter.MAP_SKIPPED_RECORDS);
    contribution.incrementReadSkipCount(safeLongToInt(count.getValue()));

    count = counters.findCounter(Task.Counter.REDUCE_OUTPUT_RECORDS);
    contribution.incrementWriteCount(safeLongToInt(count.getValue()));
View Full Code Here

    }

    count = counters.findCounter(Task.Counter.MAP_SKIPPED_RECORDS);
    contribution.incrementReadSkipCount(safeLongToInt(count.getValue()));

    count = counters.findCounter(Task.Counter.REDUCE_OUTPUT_RECORDS);
    contribution.incrementWriteCount(safeLongToInt(count.getValue()));

    count = counters.findCounter(Task.Counter.REDUCE_SKIPPED_RECORDS);

    for (int i = 0; i < safeLongToInt(count.getValue()); i++) {
View Full Code Here

    contribution.incrementReadSkipCount(safeLongToInt(count.getValue()));

    count = counters.findCounter(Task.Counter.REDUCE_OUTPUT_RECORDS);
    contribution.incrementWriteCount(safeLongToInt(count.getValue()));

    count = counters.findCounter(Task.Counter.REDUCE_SKIPPED_RECORDS);

    for (int i = 0; i < safeLongToInt(count.getValue()); i++) {
      contribution.incrementWriteSkipCount();
    }
  }
View Full Code Here

                  job.getConfiguration()).getClass()
              .getName());
      job.waitForCompletion(false);
      if (job.isComplete()) {
        Counters counters = job.getCounters();
        totalRecordsRead = counters.findCounter(
            DedupRecordCounter.TOTAL_RECORDS_READ).getValue();
        badRecords = counters.findCounter(
            DedupRecordCounter.BAD_RECORD).getValue();
        output = counters.findCounter(DedupRecordCounter.OUTPUT)
            .getValue();
View Full Code Here

      job.waitForCompletion(false);
      if (job.isComplete()) {
        Counters counters = job.getCounters();
        totalRecordsRead = counters.findCounter(
            DedupRecordCounter.TOTAL_RECORDS_READ).getValue();
        badRecords = counters.findCounter(
            DedupRecordCounter.BAD_RECORD).getValue();
        output = counters.findCounter(DedupRecordCounter.OUTPUT)
            .getValue();
        duplicateRecords = totalRecordsRead - output;
        logger.info("Total records read are: " + totalRecordsRead);
View Full Code Here

        Counters counters = job.getCounters();
        totalRecordsRead = counters.findCounter(
            DedupRecordCounter.TOTAL_RECORDS_READ).getValue();
        badRecords = counters.findCounter(
            DedupRecordCounter.BAD_RECORD).getValue();
        output = counters.findCounter(DedupRecordCounter.OUTPUT)
            .getValue();
        duplicateRecords = totalRecordsRead - output;
        logger.info("Total records read are: " + totalRecordsRead);
        logger.info("Bad Records are: " + badRecords);
        logger.info("Output records are: " + output);
View Full Code Here

          @Override
          public boolean dump(Job job) {
              Counters counters;
            try {
              counters = job.getCounters();
                    long wsize=counters.findCounter("higo", "dumpcount").getValue();
                    if(wsize>0)
                    {
                      try {
                  TableJoin.LOG.info("update "+tableName+ ",dump");
                  TableJoin.updatePercent(tableName, "Stage-2 map = 100%,  reduce = 100%", "DUMP";
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.