Package org.apache.hadoop.mapreduce.test.system

Examples of org.apache.hadoop.mapreduce.test.system.TaskInfo


   * after task is killed.
   */
  @Test
  public void testProcessTreeCleanupOfKilledTask2() throws
      IOException {
    TaskInfo taskInfo = null;
    TaskID tID = null;
    TaskAttemptID taskAttID = null;
    TTTaskInfo [] ttTaskinfo = null;
    String pid = null;
    TTProtocol ttIns = null;
    TTClient ttClientIns = null;
    int counter = 0;

    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("Message Display");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.StrDisplayMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);
    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);   
    Assert.assertNotNull("Job information is null", jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());
    taskAttID = new TaskAttemptID(tID,0);
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
      tt.sendAction(action);
      ttTaskinfo = tt.getTasks();
      for (TTTaskInfo tttInfo : ttTaskinfo) {
        if (!tttInfo.isTaskCleanupTask()) {
          pid = tttInfo.getPid();
          ttClientIns = ttClient;
          ttIns = tt;
          break;
        }
      }
      if (ttClientIns != null) {
        break;
      }
    }


    Assert.assertTrue("Map process is not alive before task kills.",
        ttIns.isProcessTreeAlive(pid));

    runJob.killTask(taskAttID, false);

    LOG.info("Waiting till the task is killed...");
    taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    counter = 0;
    while (counter < 30) {
      if (taskInfo.getTaskStatus()[0].getRunState() ==
              TaskStatus.State.KILLED) {
        break;
      }
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
      counter ++;
    }
    runJob.killJob();
    LOG.info("Waiting till the job is completed...");
    counter = 0;
View Full Code Here


   * Verifying the child process tree clean up of a task which fails due
   * to an exception.
   */
  @Test
  public void testProcessTreeCleanupOfFailedTask1() throws IOException {
    TaskInfo taskInfo = null;
    TaskID tID = null;
    TTTaskInfo [] ttTaskinfo = null;
    String pid = null;
    TTProtocol ttIns = null;
    TTClient ttClientIns = null;
    int counter = 0;

    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("Message Display");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.FailedMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null", jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());   
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
   
    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
      tt.sendAction(action);
      ttTaskinfo = tt.getTasks();
      for (TTTaskInfo tttInfo : ttTaskinfo) {
        if (!tttInfo.isTaskCleanupTask()) {
          pid = tttInfo.getPid();
          ttClientIns = ttClient;
          ttIns = tt;
          break;
        }
      }
      if (ttClientIns != null) {
        break;
      }
    }

    Assert.assertTrue("Map process is not alive before task fails.",
            ttIns.isProcessTreeAlive(pid));

    LOG.info("Waiting till the task is failed...");
    counter = 0;
    while (counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() ==
            TaskStatus.State.FAILED) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
      counter++;
    }

    LOG.info("Waiting till the job is completed...");
    counter = 0;
View Full Code Here

   */
  @Test
  public void testFailedTaskJobStatus() throws IOException,
          InterruptedException {
    conf = remoteJTClient.getDaemonConf();
    TaskInfo taskInfo = null;
    SleepJob job = new SleepJob();
    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(1, 1, 10000, 4000, 100, 100);
    RunningJob runJob = jobClient.submitJob(jobConf);
    JobID jobId = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(jobId));
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(jobId);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
        taskInfo = taskinfo;
        break;
      }
    }
    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    // Fail the running task.
    RunningJob networkJob = jobClient.getJob(jobId);
    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
    networkJob.killTask(taskAttID, true);

    LOG.info("Waiting till the job is completed...");
    while (!jInfo.getStatus().isJobComplete()) {
View Full Code Here

   * after killing the task.
   */
  @Test
  public void testDirCleanupAfterTaskKilled() throws IOException,
          InterruptedException {
    TaskInfo taskInfo = null;
    boolean isTempFolderExists = false;
    String localTaskDir = null;
    TTClient ttClient = null;
    FileStatus filesStatus [] = null;
    Path inputDir = new Path("input");
    Path outputDir = new Path("output");
    Configuration conf = new Configuration(cluster.getConf());
    JobConf jconf = new JobConf(conf);
    jconf.setJobName("Word Count");
    jconf.setJarByClass(WordCount.class);
    jconf.setMapperClass(WordCount.MapClass.class);
    jconf.setCombinerClass(WordCount.Reduce.class);
    jconf.setReducerClass(WordCount.Reduce.class);
    jconf.setNumMapTasks(1);
    jconf.setNumReduceTasks(1);
    jconf.setOutputKeyClass(Text.class);
    jconf.setOutputValueClass(IntWritable.class);

    cleanup(inputDir, conf);
    cleanup(outputDir, conf);
    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    Assert.assertTrue("Job has not been started for 1 min.",
       jtClient.isJobStarted(id));

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
       jtClient.isTaskStarted(taskInfo));

    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    String[] taskTrackers = taskInfo.getTaskTrackers();
    int counter = 0;
    TaskInfo prvTaskInfo = taskInfo;
    while (counter++ < 30) {
      if (taskTrackers.length > 0) {
        break;
      } else {
        UtilsForTests.waitFor(100);
View Full Code Here

  public void testDirCleanupAfterTaskFailed() throws IOException,
          InterruptedException {
    TTClient ttClient = null;
    FileStatus filesStatus [] = null;
    String localTaskDir = null;
    TaskInfo taskInfo = null;
    TaskID tID = null;
    boolean isTempFolderExists = false;
    conf = remoteJTClient.getDaemonConf();
    SleepJob job = new SleepJob();
    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(1, 0, 10000,100, 10, 10);
    RunningJob runJob = jobClient.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    Assert.assertTrue("Job has not been started for 1 min.",
       jtClient.isJobStarted(id));

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
        taskInfo = taskinfo;
        break;
      }
    }
    Assert.assertTrue("Task has not been started for 1 min.",
       jtClient.isTaskStarted(taskInfo));
   
    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
    String[] taskTrackers = taskInfo.getTaskTrackers();
    int counter = 0;
    TaskInfo prvTaskInfo = taskInfo;
    while (counter++ < 30) {
      if (taskTrackers.length > 0) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
View Full Code Here

    boolean continueLoop = true;

    //counter for job Loop
    int countLoop = 0;

    TaskInfo taskInfo = null;

    String jobTrackerUserName = remoteJTClient.getDaemonUser();

    LOG.info("jobTrackerUserName is :" + jobTrackerUserName);

    //This counter will check for count of a loop,
    //which might become infinite.
    int count = 0;

    SleepJob job = new SleepJob();
    job.setConf(conf);
    int totalMapTasks = 5;
    int totalReduceTasks = 1;
    conf = job.setupJobConf(totalMapTasks, totalReduceTasks,
      100, 100, 100, 100);
    JobConf jconf = new JobConf(conf);

    count = 0;
    //The last hour and last day are given 60 seconds and 120 seconds
    //recreate values rate, replacing one hour and 1 day. Waiting for
    //them to be ona  just created stage when testacse starts.
    while (remoteJTClient.getInfoFromAllClients("last_day","total_tasks")
        != 0) {
      count++;
      UtilsForTests.waitFor(1000);
      //If the count goes beyond a point, then break; This is to avoid
      //infinite loop under unforeseen circumstances. Testcase will
      //anyway fail later.
      if (count > 140) {
        Assert.fail("Since this value has not reached 0" +
          "in more than 140 seconds. Failing at this point");
      }
    }

    statisticsCollectionHandler = null;
    statisticsCollectionHandler = remoteJTClient.
        getInfoFromAllClientsForAllTaskType();

    int totalTasksSinceStartBeforeJob = statisticsCollectionHandler.
        getSinceStartTotalTasks();
    int succeededTasksSinceStartBeforeJob = statisticsCollectionHandler.
        getSinceStartSucceededTasks();
    int totalTasksLastHourBeforeJob = statisticsCollectionHandler.
        getLastHourTotalTasks();
    int succeededTasksLastHourBeforeJob = statisticsCollectionHandler.
        getLastHourSucceededTasks();
    int totalTasksLastDayBeforeJob = statisticsCollectionHandler.
        getLastDayTotalTasks();
    int succeededTasksLastDayBeforeJob = statisticsCollectionHandler.
        getLastDaySucceededTasks();
    //Submitting the job
    RunningJob rJob = cluster.getJTClient().getClient().
        submitJob(jconf);

    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());
    LOG.info("jInfo is :" + jInfo);

    count = 0;
    while (count < 60) {
      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        jInfo = remoteJTClient.getJobInfo(rJob.getID());
      }
      count++;
    }
    Assert.assertTrue("Job has not been started for 1 min.",
        count != 60);

    //Assert if jobInfo is null
    Assert.assertNotNull("jobInfo is null", jInfo);

    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(rJob.getID());
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
      }
    }

    count = 0;
    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    while (count < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState()
              == TaskStatus.State.RUNNING) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
      count++;
    }

    Assert.assertTrue("Task has not been started for 1 min.",
      count != 60);

    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
    rJob.killTask(taskAttID, false);

    count = 0;
    LOG.info("Waiting till the job is completed...");
View Full Code Here

    boolean continueLoop = true;

    //counter for job Loop
    int countLoop = 0;

    TaskInfo taskInfo = null;

    String jobTrackerUserName = remoteJTClient.getDaemonUser();

    LOG.info("jobTrackerUserName is :" + jobTrackerUserName);
View Full Code Here

   * Verifying the process tree clean up of a task after fails
   * due to memory limit and also job is killed while in progress.
   */
  @Test
  public void testProcessTreeCleanupAfterJobKilled() throws IOException {
    TaskInfo taskInfo = null;
    long PER_TASK_LIMIT = 500L;
    Matcher mat = null;
    TTTaskInfo[] ttTaskinfo = null;
    String pid = null;
    TTClient ttClientIns = null;
    TTProtocol ttIns = null;
    TaskID tID = null;
    int counter = 0;

    String taskOverLimitPatternString =
        "TaskTree \\[pid=[0-9]*,tipID=.*\\] is "
        + "running beyond memory-limits. "
        + "Current usage : [0-9]*bytes. Limit : %sbytes. Killing task.";
   
    Pattern taskOverLimitPattern = Pattern.compile(String.format(
        taskOverLimitPatternString,
            String.valueOf(PER_TASK_LIMIT * 1024 * 1024L)));

    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("String Appending");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.StrAppendMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);
    jobConf.setMemoryForMapTask(PER_TASK_LIMIT);
    jobConf.setMemoryForReduceTask(PER_TASK_LIMIT);
   
    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null",jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID tAttID = new TaskAttemptID(tID,0);
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
View Full Code Here

   * Verifying the process tree clean up of a task after it fails
   * due to exceeding memory limit of mapper.
   */
  @Test
  public void testProcessTreeCleanupOfFailedTask() throws IOException {
    TaskInfo taskInfo = null;
    long PER_TASK_LIMIT = 500L;
    Matcher mat = null;
    TTTaskInfo[] ttTaskinfo = null;
    String pid = null;
    TTClient ttClientIns = null;
    TTProtocol ttIns = null;
    TaskID tID = null;
    int counter = 0;
   
    String taskOverLimitPatternString =
        "TaskTree \\[pid=[0-9]*,tipID=.*\\] is "
        + "running beyond memory-limits. "
        + "Current usage : [0-9]*bytes. Limit : %sbytes. Killing task.";

    Pattern taskOverLimitPattern = Pattern.compile(String.format(
        taskOverLimitPatternString,
            String.valueOf(PER_TASK_LIMIT * 1024 * 1024L)));

    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("String Appending");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.StrAppendMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);
    jobConf.setMemoryForMapTask(PER_TASK_LIMIT);
    jobConf.setMemoryForReduceTask(PER_TASK_LIMIT);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null", jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }
    Assert.assertNotNull("Task information is null.", taskInfo);

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID tAttID = new TaskAttemptID(tID,0);
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
View Full Code Here

   * by using -fail-task option.
   */
  @Test
  public void testProcessTreeCleanupOfFailedTask2() throws
      Exception {
    TaskInfo taskInfo = null;
    TaskID tID = null;
    TTTaskInfo [] ttTaskinfo = null;
    String pid = null;
    TTProtocol ttIns = null;
    TTClient ttClientIns = null;
    int counter = 0;
   
    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("Message Display");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.StrDisplayMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null", jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID tAttID = new TaskAttemptID(tID,0);
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
      tt.sendAction(action);
      ttTaskinfo = tt.getTasks();
      for (TTTaskInfo tttInfo : ttTaskinfo) {
        if (!tttInfo.isTaskCleanupTask()) {
          pid = tttInfo.getPid();
          ttClientIns = ttClient;
          ttIns = tt;
          break;
        }
      }
      if (ttClientIns != null) {
        break;
      }
    }


    Assert.assertTrue("Map process is not alive before task fails.",
        ttIns.isProcessTreeAlive(pid));

    String args[] = new String[] { "-fail-task", tAttID.toString() };
    int exitCode = runTool(jobConf, client, args);
    Assert.assertEquals("Exit Code:", 0, exitCode);

    LOG.info("Waiting till the task is failed...");
    taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    counter = 0;
    while (counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() ==
            TaskStatus.State.FAILED) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
      counter ++;
    }
    counter = 0;
    LOG.info("Waiting till the job is completed...");
    while (counter < 60) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.test.system.TaskInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.