Package org.apache.hadoop.hdfs.server.namenode.startupprogress

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress


  /**
   * Load cache pools from fsimage
   */
  private void loadPools(DataInput in)
      throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.CACHE_POOLS);
    prog.beginStep(Phase.LOADING_FSIMAGE, step);
    int numberOfPools = in.readInt();
    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
    for (int i = 0; i < numberOfPools; i++) {
      addCachePool(FSImageSerialization.readCachePoolInfo(in));
      counter.increment();
    }
    prog.endStep(Phase.LOADING_FSIMAGE, step);
  }
View Full Code Here


  /**
   * Load cache directives from the fsimage
   */
  private void loadDirectives(DataInput in) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.CACHE_ENTRIES);
    prog.beginStep(Phase.LOADING_FSIMAGE, step);
    int numDirectives = in.readInt();
    prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
    for (int i = 0; i < numDirectives; i++) {
      CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
      // Get pool reference by looking it up in the map
      final String poolName = info.getPool();
      CachePool pool = cachePools.get(poolName);
      if (pool == null) {
        throw new IOException("Directive refers to pool " + poolName +
            ", which does not exist.");
      }
      CacheDirective directive =
          new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
              info.getReplication(), info.getExpiration().getAbsoluteMillis());
      boolean addedDirective = pool.getDirectiveList().add(directive);
      assert addedDirective;
      if (directivesById.put(directive.getId(), directive) != null) {
        throw new IOException("A directive with ID " + directive.getId() +
            " already exists");
      }
      List<CacheDirective> directives =
          directivesByPath.get(directive.getPath());
      if (directives == null) {
        directives = new LinkedList<CacheDirective>();
        directivesByPath.put(directive.getPath(), directives);
      }
      directives.add(directive);
      counter.increment();
    }
    prog.endStep(Phase.LOADING_FSIMAGE, step);
  }
View Full Code Here

  @Test
  public void testGenerateStartupProgress() throws Exception {
    cluster.waitClusterUp();
    NamenodeJspHelper.HealthJsp jsp = new NamenodeJspHelper.HealthJsp();
    StartupProgress prog = NameNode.getStartupProgress();
    JspWriter out = mock(JspWriter.class);
    jsp.generateStartupProgress(out, prog);
    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
    verify(out, atLeastOnce()).println(captor.capture());
    List<String> contents = captor.getAllValues();
View Full Code Here

  /**
   * Private helper methods to save delegation keys and tokens in fsimage
   */
  private synchronized void saveCurrentTokens(DataOutputStream out,
      String sdPath) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
    prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
    out.writeInt(currentTokens.size());
    Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
        .iterator();
    while (iter.hasNext()) {
      DelegationTokenIdentifier id = iter.next();
      id.write(out);
      DelegationTokenInformation info = currentTokens.get(id);
      out.writeLong(info.getRenewDate());
      counter.increment();
    }
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
  }
View Full Code Here

  /*
   * Save the current state of allKeys
   */
  private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
      throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
    prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
    out.writeInt(allKeys.size());
    Iterator<Integer> iter = allKeys.keySet().iterator();
    while (iter.hasNext()) {
      Integer key = iter.next();
      allKeys.get(key).write(out);
      counter.increment();
    }
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
  }
View Full Code Here

  /**
   * Private helper methods to load Delegation tokens from fsimage
   */
  private synchronized void loadCurrentTokens(DataInput in)
      throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.DELEGATION_TOKENS);
    prog.beginStep(Phase.LOADING_FSIMAGE, step);
    int numberOfTokens = in.readInt();
    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
    for (int i = 0; i < numberOfTokens; i++) {
      DelegationTokenIdentifier id = new DelegationTokenIdentifier();
      id.readFields(in);
      long expiryTime = in.readLong();
      addPersistedDelegationToken(id, expiryTime);
      counter.increment();
    }
    prog.endStep(Phase.LOADING_FSIMAGE, step);
  }
View Full Code Here

   * Private helper method to load delegation keys from fsimage.
   * @param in
   * @throws IOException
   */
  private synchronized void loadAllKeys(DataInput in) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.DELEGATION_KEYS);
    prog.beginStep(Phase.LOADING_FSIMAGE, step);
    int numberOfKeys = in.readInt();
    prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
    for (int i = 0; i < numberOfKeys; i++) {
      DelegationKey value = new DelegationKey();
      value.readFields(in);
      addKey(value);
      counter.increment();
    }
    prog.endStep(Phase.LOADING_FSIMAGE, step);
  }
View Full Code Here

    final FSImageStorageInspector inspector = storage.readAndInspectDirs(nnfs);

    isUpgradeFinalized = inspector.isUpgradeFinalized();
    List<FSImageFile> imageFiles = inspector.getLatestImages();

    StartupProgress prog = NameNode.getStartupProgress();
    prog.beginPhase(Phase.LOADING_FSIMAGE);
    File phaseFile = imageFiles.get(0).getFile();
    prog.setFile(Phase.LOADING_FSIMAGE, phaseFile.getAbsolutePath());
    prog.setSize(Phase.LOADING_FSIMAGE, phaseFile.length());
    boolean needToSave = inspector.needToSave();

    Iterable<EditLogInputStream> editStreams = null;

    initEditLog(startOpt);

    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
      // If we're open for write, we're either non-HA or we're the active NN, so
      // we better be able to load all the edits. If we're the standby NN, it's
      // OK to not be able to read all of edits right now.
      // In the meanwhile, for HA upgrade, we will still write editlog thus need
      // this toAtLeastTxId to be set to the max-seen txid
      // For rollback in rolling upgrade, we need to set the toAtLeastTxId to
      // the txid right before the upgrade marker. 
      long toAtLeastTxId = editLog.isOpenForWrite() ? inspector
          .getMaxSeenTxId() : 0;
      if (rollingRollback) {
        // note that the first image in imageFiles is the special checkpoint
        // for the rolling upgrade
        toAtLeastTxId = imageFiles.get(0).getCheckpointTxId() + 2;
      }
      editStreams = editLog.selectInputStreams(
          imageFiles.get(0).getCheckpointTxId() + 1,
          toAtLeastTxId, recovery, false);
    } else {
      editStreams = FSImagePreTransactionalStorageInspector
        .getEditLogStreams(storage);
    }
    int maxOpSize = conf.getInt(DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_KEY,
        DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT);
    for (EditLogInputStream elis : editStreams) {
      elis.setMaxOpSize(maxOpSize);
    }
    for (EditLogInputStream l : editStreams) {
      LOG.debug("Planning to load edit log stream: " + l);
    }
    if (!editStreams.iterator().hasNext()) {
      LOG.info("No edit log streams selected.");
    }
   
    FSImageFile imageFile = null;
    for (int i = 0; i < imageFiles.size(); i++) {
      try {
        imageFile = imageFiles.get(i);
        loadFSImageFile(target, recovery, imageFile);
        break;
      } catch (IOException ioe) {
        LOG.error("Failed to load image from " + imageFile, ioe);
        target.clear();
        imageFile = null;
      }
    }
    // Failed to load any images, error out
    if (imageFile == null) {
      FSEditLog.closeAllStreams(editStreams);
      throw new IOException("Failed to load an FSImage file!");
    }
    prog.endPhase(Phase.LOADING_FSIMAGE);
   
    if (!rollingRollback) {
      long txnsAdvanced = loadEdits(editStreams, target, startOpt, recovery);
      needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(),
          txnsAdvanced);
View Full Code Here

  private long loadEdits(Iterable<EditLogInputStream> editStreams,
      FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
      throws IOException {
    LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
    StartupProgress prog = NameNode.getStartupProgress();
    prog.beginPhase(Phase.LOADING_EDITS);
   
    long prevLastAppliedTxId = lastAppliedTxId; 
    try {   
      FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
     
      // Load latest edits
      for (EditLogInputStream editIn : editStreams) {
        LOG.info("Reading " + editIn + " expecting start txid #" +
              (lastAppliedTxId + 1));
        try {
          loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
        } finally {
          // Update lastAppliedTxId even in case of error, since some ops may
          // have been successfully applied before the error.
          lastAppliedTxId = loader.getLastAppliedTxId();
        }
        // If we are in recovery mode, we may have skipped over some txids.
        if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
          lastAppliedTxId = editIn.getLastTxId();
        }
      }
    } finally {
      FSEditLog.closeAllStreams(editStreams);
      // update the counts
      updateCountForQuota(target.dir.rootDir);
    }
    prog.endPhase(Phase.LOADING_EDITS);
    return lastAppliedTxId - prevLastAppliedTxId;
  }
View Full Code Here

    saveFSImageInAllDirs(source, NameNodeFile.IMAGE, txid, null);
  }

  private synchronized void saveFSImageInAllDirs(FSNamesystem source,
      NameNodeFile nnf, long txid, Canceler canceler) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    prog.beginPhase(Phase.SAVING_CHECKPOINT);
    if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) {
      throw new IOException("No image directories available!");
    }
    if (canceler == null) {
      canceler = new Canceler();
    }
    SaveNamespaceContext ctx = new SaveNamespaceContext(
        source, txid, canceler);
   
    try {
      List<Thread> saveThreads = new ArrayList<Thread>();
      // save images into current
      for (Iterator<StorageDirectory> it
             = storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
        StorageDirectory sd = it.next();
        FSImageSaver saver = new FSImageSaver(ctx, sd, nnf);
        Thread saveThread = new Thread(saver, saver.toString());
        saveThreads.add(saveThread);
        saveThread.start();
      }
      waitForThreads(saveThreads);
      saveThreads.clear();
      storage.reportErrorsOnDirectories(ctx.getErrorSDs());
 
      if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) {
        throw new IOException(
          "Failed to save in any storage directories while saving namespace.");
      }
      if (canceler.isCancelled()) {
        deleteCancelledCheckpoint(txid);
        ctx.checkCancelled(); // throws
        assert false : "should have thrown above!";
      }
 
      renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false);
 
      // Since we now have a new checkpoint, we can clean up some
      // old edit logs and checkpoints.
      purgeOldStorage(nnf);
    } finally {
      // Notify any threads waiting on the checkpoint to be canceled
      // that it is complete.
      ctx.markComplete();
      ctx = null;
    }
    prog.endPhase(Phase.SAVING_CHECKPOINT);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.