Package org.apache.hadoop.mapreduce.jobhistory

Examples of org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent


        Container container = Container.newInstance(cId, nodeId,
            NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
        JobID id = TypeConverter.fromYarn(applicationId);
        JobId jobId = TypeConverter.toYarn(id);
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.REDUCE,
            100)));
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.MAP,
            100)));
        getContext().getEventHandler().handle(
            new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
                container, null));
View Full Code Here


        Container container = Container.newInstance(cId, nodeId,
            NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
        JobID id = TypeConverter.fromYarn(applicationId);
        JobId jobId = TypeConverter.toYarn(id);
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.REDUCE,
            100)));
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.MAP,
            100)));
        getContext().getEventHandler().handle(
            new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
                container, null));
View Full Code Here

          getMaxContainerCapability().getMemory();
      if (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP)) {
        if (mapResourceRequest == 0) {
          mapResourceRequest = reqEvent.getCapability().getMemory();
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
                  mapResourceRequest)));
          LOG.info("mapResourceRequest:"+ mapResourceRequest);
          if (mapResourceRequest > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceRequest: " +
                mapResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceRequest);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceRequest == 0) {
          reduceResourceRequest = reqEvent.getCapability().getMemory();
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
                  reduceResourceRequest)));
          LOG.info("reduceResourceRequest:"+ reduceResourceRequest);
          if (reduceResourceRequest > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
View Full Code Here

          mapResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          mapResourceReqt = (int) Math.ceil((float) mapResourceReqt/minSlotMemSize)
              * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
              mapResourceReqt)));
          LOG.info("mapResourceReqt:"+mapResourceReqt);
          if (mapResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceReqt);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceReqt == 0) {
          reduceResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          //round off on slotsize
          reduceResourceReqt = (int) Math.ceil((float)
              reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
              reduceResourceReqt)));
          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
          if (reduceResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
View Full Code Here

        Container container = BuilderUtils.newContainer(cId, nodeId,
            NM_HOST + ":" + NM_HTTP_PORT, null, null, null);
        JobID id = TypeConverter.fromYarn(applicationId);
        JobId jobId = TypeConverter.toYarn(id);
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.REDUCE,
            100)));
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.MAP,
            100)));
        getContext().getEventHandler().handle(
            new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
                container, null));
View Full Code Here

        Container container = BuilderUtils.newContainer(cId, nodeId,
            NM_HOST + ":" + NM_HTTP_PORT, null, null, null);
        JobID id = TypeConverter.fromYarn(applicationId);
        JobId jobId = TypeConverter.toYarn(id);
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.REDUCE,
            100)));
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.MAP,
            100)));
        getContext().getEventHandler().handle(
            new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
                container, null));
View Full Code Here

        Container container = BuilderUtils.newContainer(cId, nodeId,
            NM_HOST + ":" + NM_HTTP_PORT, null, null, null);
        JobID id = TypeConverter.fromYarn(applicationId);
        JobId jobId = TypeConverter.toYarn(id);
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.REDUCE,
            100)));
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.MAP,
            100)));
        getContext().getEventHandler().handle(
            new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
                container, null));
View Full Code Here

          mapResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          mapResourceReqt = (int) Math.ceil((float) mapResourceReqt/minSlotMemSize)
              * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
              mapResourceReqt)));
          LOG.info("mapResourceReqt:"+mapResourceReqt);
          if (mapResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceReqt);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceReqt == 0) {
          reduceResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          //round off on slotsize
          reduceResourceReqt = (int) Math.ceil((float)
              reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
              reduceResourceReqt)));
          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
          if (reduceResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
View Full Code Here

          getMaxContainerCapability().getMemory();
      if (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP)) {
        if (mapResourceReqt == 0) {
          mapResourceReqt = reqEvent.getCapability().getMemory();
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
              mapResourceReqt)));
          LOG.info("mapResourceReqt:"+mapResourceReqt);
          if (mapResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceReqt);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceReqt == 0) {
          reduceResourceReqt = reqEvent.getCapability().getMemory();
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
              reduceResourceReqt)));
          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
          if (reduceResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
View Full Code Here

        Container container = Container.newInstance(cId, nodeId,
            NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
        JobID id = TypeConverter.fromYarn(applicationId);
        JobId jobId = TypeConverter.toYarn(id);
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.REDUCE,
            100)));
        getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
            new NormalizedResourceEvent(
                org.apache.hadoop.mapreduce.TaskType.MAP,
            100)));
        getContext().getEventHandler().handle(
            new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
                container, null));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.