Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.Cluster


  @Test
  public void testAccessToKillJob() throws Exception {
    Job job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", "p1"
        + NAME_SEPARATOR + "p11", conf);
    final JobConf jobConf = miniMRCluster.createJobConf();
    Cluster cluster = null;
    JobID jobID = job.getStatus().getJobID();
    //Ensure that the jobinprogress is initied before we issue a kill
    //signal to the job.
    JobTracker tracker = miniMRCluster.getJobTrackerRunner().getJobTracker();
    JobInProgress jip = tracker.getJob(org.apache.hadoop.mapred.JobID
        .downgrade(jobID));
    tracker.initJob(jip);
    try {
      final Configuration userConf =
          new Configuration(miniMRCluster.createJobConf());
      UserGroupInformation ugi =
          UserGroupInformation.createUserForTesting("someRandomUser",
              new String[] { "someRandomGroup" });
      cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
        public Cluster run() throws IOException {
          return new Cluster(userConf);
        }
      });
      cluster.getJob(jobID).killJob();
      fail("user 'someRandomeUser' is neither u1 nor in the administer group list");
    } catch (Exception e) {
      final Configuration userConf = new Configuration(miniMRCluster.createJobConf());
      UserGroupInformation ugi =
        UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
      cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
        public Cluster run() throws IOException {
          return new Cluster(userConf);
        }
      });
      cluster.getJob(jobID).killJob();
      // kill the running job
      assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
          cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
    }
   
    job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", "p1" + NAME_SEPARATOR
        + "p12", conf);
    jobID = job.getStatus().getJobID();
    //Ensure that the jobinprogress is initied before we issue a kill
    //signal to the job.
    jip =  tracker.getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID));
    tracker.initJob(jip);
    tracker.killJob(job.getJobID());
    // kill the job by the user who submitted the job
    assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
        cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
   
    final Configuration userConf = new Configuration(miniMRCluster.createJobConf());
    UserGroupInformation ugi =
      UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
    cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
      public Cluster run() throws IOException {
        return new Cluster(userConf);
      }
    });
    job = submitSleepJob(1, 1, 10, 10, false, "u1,g1", "p1" + NAME_SEPARATOR
        + "p11", conf);
    jobID = job.getStatus().getJobID();
    //Ensure that the jobinprogress is initied before we issue a kill
    //signal to the job.
    jip =  tracker.getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID));
    tracker.initJob(jip);
    ugi =
      UserGroupInformation.createUserForTesting("u3",new String[]{"g3"});
    cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
      public Cluster run() throws IOException {
        return new Cluster(jobConf);
      }
    });
    // try killing job with user not in administer list
    try {
      cluster.getJob(jobID).killJob();
      fail("u3 not in administer list");
    } catch (Exception e) {
      ugi =
        UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
      cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
        public Cluster run() throws IOException {
          return new Cluster(jobConf);
        }
      });
      assertFalse(cluster.getJob(jobID).isComplete());
      cluster.getJob(jobID).killJob();
      // kill the running job
      assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
          cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
    }
  }
View Full Code Here


        + "p11", conf);
    // kill the job by any user   
    final JobConf jobConf = miniMRCluster.createJobConf();
    UserGroupInformation ugi =
      UserGroupInformation.createUserForTesting("u3",new String[]{"g3"});
    Cluster cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
      public Cluster run() throws IOException {
        return new Cluster(jobConf);
      }
    });
    JobID jobID = job.getStatus().getJobID();
    //Ensure that the jobinprogress is initied before we issue a kill
    //signal to the job.
    JobInProgress jip = miniMRCluster.getJobTrackerRunner().getJobTracker()
        .getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID));
    miniMRCluster.getJobTrackerRunner().getJobTracker().initJob(jip);
    cluster.getJob(jobID).killJob();
    assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
        cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
  }
View Full Code Here

    if (!canRun()) {
      return;
    }
    TrackerDistributedCacheManager manager =
      new FakeTrackerDistributedCacheManager(conf);
    Cluster cluster = new Cluster(conf);
    String userName = getJobOwnerName();
    File workDir = new File(new Path(TEST_ROOT_DIR, "workdir").toString());

    // Configures a job with a regular file
    Job job1 = Job.getInstance(cluster, conf);
View Full Code Here

 
  private void checkLocalizedPath(String visibility)
  throws IOException, LoginException, InterruptedException {
    TrackerDistributedCacheManager manager =
      new TrackerDistributedCacheManager(conf, taskController);
    Cluster cluster = new Cluster(conf);
    String userName = getJobOwnerName();
    File workDir = new File(TEST_ROOT_DIR, "workdir");
    Path cacheFile = new Path(TEST_ROOT_DIR, "fourthcachefile");
    if ("true".equals(visibility)) {
      createPublicTempFile(cacheFile);
View Full Code Here

      // Framework history log file location
      Path logFile = new Path(doneDir, logFileName);
      FileSystem fileSys = logFile.getFileSystem(conf);

      Cluster cluster = new Cluster(conf);
      assertEquals("Client returned wrong history url", logFile.toString(),
          cluster.getJobHistoryUrl(id));
  
      // Check if the history file exists
      assertTrue("History file does not exist", fileSys.exists(logFile));

      // check if the corresponding conf file exists
View Full Code Here

   * @param conf the job configuration.
   * @throws IOException
   */
  public void init(JobConf conf) throws IOException {
    setConf(conf);
    cluster = new Cluster(conf);
  }
View Full Code Here

   * @param jobTrackAddr the job tracker to connect to.
   * @param conf configuration.
   */
  public JobClient(InetSocketAddress jobTrackAddr,
                   Configuration conf) throws IOException {
    cluster = new Cluster(jobTrackAddr, conf);
  }
View Full Code Here

      throw new IOException("Invalid Output: " + outputPath);
    }
    conf.set(DST_DIR_LABEL, outputPath.toString());
    Path stagingArea;
    try {
      stagingArea = JobSubmissionFiles.getStagingDir(new Cluster(conf),
          conf);
    } catch (InterruptedException ie) {
      throw new IOException(ie);
    }
    Path jobDirectory = new Path(stagingArea,
View Full Code Here

  private final CountDownLatch startFlag;

  public Statistics(
    Configuration conf, int pollingInterval, CountDownLatch startFlag)
    throws IOException {
    this.cluster = new Cluster(conf);
    this.jtPollingInterval = pollingInterval;
    maxJobCompletedInInterval = conf.getInt(
      MAX_JOBS_COMPLETED_IN_POLL_INTERVAL_KEY, 1);
    this.startFlag = startFlag;
  }
View Full Code Here

      throw new IOException("Invalid Output: " + outputPath);
    }
    conf.set(DST_DIR_LABEL, outputPath.toString());
    Path stagingArea;
    try {
      stagingArea = JobSubmissionFiles.getStagingDir(new Cluster(conf),
          conf);
    } catch (InterruptedException ie) {
      throw new IOException(ie);
    }
    Path jobDirectory = new Path(stagingArea,
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.