Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.JobContext


  public void testConvertValueToTuple() throws IOException,InterruptedException{
    BytesRefArrayWritable[] bytesArr = initTestEnvironment();

    HCatSchema schema = buildHiveSchema();
    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, schema);
    sd.setOutputSchema(jc, schema);
View Full Code Here


  public void testPruning() throws IOException,InterruptedException{
    BytesRefArrayWritable[] bytesArr = initTestEnvironment();

    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildPrunedSchema());

    sd.initialize(jc, getProps());
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HCatRecord[] tuples = getPrunedRecords();
    for(int j=0; j < 2; j++){
View Full Code Here

  public void testReorderdCols() throws IOException,InterruptedException{
    BytesRefArrayWritable[] bytesArr = initTestEnvironment();

    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildReorderedSchema());

    sd.initialize(jc, getProps());
    Map<String,String> map = new HashMap<String,String>(1);
    map.put("part1", "first-part");
    sd.setPartitionValues(jc, map);
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HCatRecord[] tuples = getReorderedCols();
    for(int j=0; j < 2; j++){
View Full Code Here

      writer.close();

      RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
      Configuration jonconf = new Configuration(cloneConf);
      jonconf.set("mapred.input.dir", testDir.toString());
      JobContext context = new Job(jonconf);
      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
      List<InputSplit> splits = inputFormat.getSplits(context);
      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
      int readCount = 0;
      for (int i = 0; i < splits.size(); i++) {
        TaskAttemptContext tac = new TaskAttemptContext(jonconf, new TaskAttemptID());
View Full Code Here

public class TestRCFileOutputStorageDriver extends TestCase {

  public void testConversion() throws IOException {
    Configuration conf = new Configuration();
    JobContext jc = new JobContext(conf, new JobID());

    HCatSchema schema = buildHiveSchema();
    HCatInputStorageDriver isd = new RCFileInputDriver();

    isd.setOriginalSchema(jc, schema);
View Full Code Here

  }

  private boolean isRecoverySupported(OutputCommitter committer2)
      throws IOException {
    boolean isSupported = false;
    JobContext _jobContext;
    if (committer != null) {
      if (newApiCommitter) {
         _jobContext = new JobContextImpl(
            getConfig(), TypeConverter.fromYarn(getJobId()));
      } else {
View Full Code Here

          InetAddress ip = InetAddress.getLocalHost();
          if (ip != null) {
            job.setJobSubmitHostAddress(ip.getHostAddress());
            job.setJobSubmitHostName(ip.getHostName());
          }
          JobContext context = new JobContext(jobCopy, jobId);

          jobCopy = (JobConf)context.getConfiguration();

          // Check the output specification
          if (reduces == 0 ? jobCopy.getUseNewMapper() :
            jobCopy.getUseNewReducer()) {
            org.apache.hadoop.mapreduce.OutputFormat<?,?> output =
              ReflectionUtils.newInstance(context.getOutputFormatClass(),
                  jobCopy);
            output.checkOutputSpecs(context);
          } else {
            jobCopy.getOutputFormat().checkOutputSpecs(fs, jobCopy);
          }
View Full Code Here

                           ", graphState" + gs);
        VertexInputFormat<LongWritable, IntWritable, FloatWritable, IntWritable>
            inputFormat = BspUtils.createVertexInputFormat(job.getConfiguration());
        List<InputSplit> splitArray =
            inputFormat.getSplits(
                new JobContext(new Configuration(), new JobID()), 1);
        ByteArrayOutputStream byteArrayOutputStream =
            new ByteArrayOutputStream();
        DataOutputStream outputStream =
            new DataOutputStream(byteArrayOutputStream);
        ((Writable) splitArray.get(0)).write(outputStream);
View Full Code Here

   
    JobImpl mockJob = mock(JobImpl.class);
    mockJob.tasks = new HashMap<TaskId, Task>();
    OutputCommitter mockCommitter = mock(OutputCommitter.class);
    EventHandler mockEventHandler = mock(EventHandler.class);
    JobContext mockJobContext = mock(JobContext.class);
   
    when(mockJob.getCommitter()).thenReturn(mockCommitter);
    when(mockJob.getEventHandler()).thenReturn(mockEventHandler);
    when(mockJob.getJobContext()).thenReturn(mockJobContext);
    doNothing().when(mockJob).setFinishTime();
View Full Code Here

  @Override
  public void checkOutputSpecs(FileSystem ignored, JobConf jc) throws IOException {
    //delegate to the new api
    Job job = new Job(jc);
    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);

    checkOutputSpecs(jobContext);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.JobContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.