Package org.apache.hadoop.examples

Examples of org.apache.hadoop.examples.SleepJob


  @Test
  public void testFilePermission() throws Exception {
    wovenClient = cluster.getJTClient().getProxy();
    Configuration conf = new Configuration(cluster.getConf());
    FinishTaskControlAction.configureControlActionForJob(conf);
    SleepJob job = new SleepJob();
    job.setConf(conf);
    conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
    JobConf jconf = new JobConf(conf);
    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
    taskController = conf.get("mapred.task.tracker.task-controller");
    // get the job info so we can get the env variables from the daemon.
    // Now wait for the task to be in the running state, only then the
View Full Code Here


        UserGroupInformation.createUserForTesting(user, new String[] {});
    RunningJob job = (RunningJob) ugi.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        JobClient jobClient = new JobClient(clusterConf);
        SleepJob sleepJob = new SleepJob();
        sleepJob.setConf(clusterConf);
        JobConf jobConf = sleepJob.setupJobConf(1, 0, 2000, 1000, 1000, 1000);
        RunningJob runningJob = jobClient.submitJob(jobConf);
        return runningJob;
      }
    });
    return job;
View Full Code Here

        equals("org.apache.hadoop.mapred.LinuxTaskController")) {
      //Changing the User name
      proxyUGI = UserGroupInformation.createRemoteUser(
          "hadoop1");
      SleepJob job = new SleepJob();
      job.setConf(conf);
      final JobConf jobConf = job.setupJobConf(2, 1, 2000, 2000, 100, 100);
      String error = null;
      RunningJob runJob = null;
      //Getting the jobClient with the changed remote user and
      //then submit the command.
      try {
View Full Code Here

    int taskTrackerCounter = 0;
    //This will store all the tasktrackers in which tasks ran
    ArrayList<String> taskTrackerCollection = new ArrayList<String>();

    do {
      SleepJob job = new SleepJob();
      job.setConf(conf);
      conf = job.setupJobConf(5, 1, 1000, 1000, 100, 100);
      conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
      DistributedCache.createSymlink(conf);
      URI uri = URI.create(uriPath);
      DistributedCache.addCacheFile(uri, conf);
      JobConf jconf = new JobConf(conf);
View Full Code Here

  public void testRetiredJobsHistoryLocation() throws Exception {
    JTProtocol remoteJTClient = cluster.getJTClient().getProxy();
    int testIterationLoop = 0;

    do {
      SleepJob job = null;
      testIterationLoop++;
      job = new SleepJob();
      job.setConf(conf);
      conf = job.setupJobConf(5, 1, 100, 100, 100, 100);
      //Get the value of mapred.jobtracker.retirejob.check. If not
      //found then use 60000 milliseconds, which is the application default.
      retiredJobInterval =
        conf.getInt("mapred.jobtracker.retirejob.check", 60000);
      //Assert if retiredJobInterval is 0
View Full Code Here

    RunningJob[] rJobCollection = new RunningJob[4];
    JobID[] rJobIDCollection = new JobID[4];
    String jobHistoryDonePathString = null;
    JobInfo jInfo = null;
    for ( int noOfJobs = 0; noOfJobs < 4; noOfJobs++ ) {
      SleepJob job = null;
      testIterationLoop++;
      job = new SleepJob();
      job.setConf(conf);
      conf = job.setupJobConf(5, 1, 100, 100, 100, 100);
      conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens",
        false);
      JobConf jconf = new JobConf(conf);

      jobHistoryDonePathString = null;
View Full Code Here

   * @return the job id of the high ram job
   * @throws Exception is thrown when the method fails to run the high ram job
   */
  public JobID runHighRamJob (Configuration conf, JobClient jobClient,
      JTProtocol remoteJTClient,String assertMessage) throws Exception {
    SleepJob job = new SleepJob();
    String jobArgs []= {"-D","mapred.cluster.max.map.memory.mb=2048",
                        "-D","mapred.cluster.max.reduce.memory.mb=2048",
                        "-D","mapred.cluster.map.memory.mb=1024",
                        "-D","mapreduce.job.complete.cancel.delegation.tokens=false",
                        "-D","mapred.cluster.reduce.memory.mb=1024",
View Full Code Here

  @Test
  public void testJobCleanupAfterJobFail() throws IOException {
    HashMap<TTClient,ArrayList<String>> map =
        new HashMap<TTClient,ArrayList<String>>();
    conf = rtClient.getDaemonConf();
    SleepJob job = new SleepJob();
    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(1, 0, 10000,0, 10, 10);
    JobClient client = jtClient.getClient();
    RunningJob runJob = client.submitJob(jobConf);
    JobID jobId = runJob.getID();
    JobInfo jobInfo = rtClient.getJobInfo(jobId);
    Assert.assertTrue("Job has not been started for 1 min",
View Full Code Here

    return list;
  }
 
  private static RunningJob createJobAndSubmit() throws IOException {
    conf = rtClient.getDaemonConf();
    SleepJob job = new SleepJob();
    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(3, 1, 12000, 12000, 100, 100);
    JobClient client = jtClient.getClient();
    RunningJob runJob = client.submitJob(jobConf);
    return runJob;
  }
View Full Code Here

     }
  }

 
  public JobID runSleepJob(boolean signalJob) throws Exception{
    SleepJob job = new SleepJob();
    job.setConf(conf);
    conf = job.setupJobConf(5, 1, 100, 5, 100, 5);
    JobConf jconf = new JobConf(conf);
    //Controls the job till all verification is done
    FinishTaskControlAction.configureControlActionForJob(conf);
    //Submitting the job
    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.examples.SleepJob

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.