Package org.apache.hadoop.hdfs.server.namenode.INode

Examples of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo


      }
      break;
    }
    case OP_DELETE_SNAPSHOT: {
      DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
      BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
      List<INode> removedINodes = new ChunkedArrayList<INode>();
      final String snapshotRoot =
          renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot,
              logVersion);
      fsNamesys.getSnapshotManager().deleteSnapshot(
          snapshotRoot, deleteSnapshotOp.snapshotName,
          collectedBlocks, removedINodes);
      fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
      collectedBlocks.clear();
      fsNamesys.dir.removeFromInodeMap(removedINodes);
      removedINodes.clear();
     
      if (toAddRetryCache) {
        fsNamesys.addCacheEntry(deleteSnapshotOp.rpcClientId,
View Full Code Here


   */
  private boolean deleteInternal(String src, boolean recursive,
      boolean enforcePermission, boolean logRetryCache)
      throws AccessControlException, SafeModeException, UnresolvedLinkException,
             IOException {
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    List<INode> removedINodes = new ChunkedArrayList<INode>();
    FSPermissionChecker pc = getPermissionChecker();
    checkOperation(OperationCategory.WRITE);
    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
    boolean ret = false;

    waitForLoadingFSImage();
    writeLock();
    try {
      checkOperation(OperationCategory.WRITE);
      checkNameNodeSafeMode("Cannot delete " + src);
      src = FSDirectory.resolvePath(src, pathComponents, dir);
      if (!recursive && dir.isNonEmptyDirectory(src)) {
        throw new PathIsNotEmptyDirectoryException(src + " is non empty");
      }
      if (enforcePermission && isPermissionEnabled) {
        checkPermission(pc, src, false, null, FsAction.WRITE, null,
            FsAction.ALL, true, false);
      }
      long mtime = now();
      // Unlink the target directory from directory tree
      long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
              mtime);
      if (filesRemoved < 0) {
        return false;
      }
      getEditLog().logDelete(src, mtime, logRetryCache);
      incrDeletedFileCount(filesRemoved);
      // Blocks/INodes will be handled later
      removePathAndBlocks(src, null, removedINodes, true);
      ret = true;
    } finally {
      writeUnlock();
    }
    getEditLog().logSync();
    removeBlocks(collectedBlocks); // Incremental deletion of blocks
    collectedBlocks.clear();

    if (NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
        + src +" is removed");
    }
View Full Code Here

    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
      return; // Return previous response
    }
    boolean success = false;
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    writeLock();
    try {
      checkOperation(OperationCategory.WRITE);
      checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
      if (isPermissionEnabled) {
        checkOwner(pc, snapshotRoot);
      }

      List<INode> removedINodes = new ChunkedArrayList<INode>();
      dir.writeLock();
      try {
        snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,
            collectedBlocks, removedINodes);
        dir.removeFromInodeMap(removedINodes);
      } finally {
        dir.writeUnlock();
      }
      removedINodes.clear();
      getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
          cacheEntry != null);
      success = true;
    } finally {
      writeUnlock();
      RetryCache.setState(cacheEntry, success);
    }
    getEditLog().logSync();

    removeBlocks(collectedBlocks);
    collectedBlocks.clear();

    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
      String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
      logAuditEvent(true, "deleteSnapshot", rootPath, null, null);
    }
View Full Code Here

   */
  private boolean deleteInternal(String src, boolean recursive,
      boolean enforcePermission, boolean logRetryCache)
      throws AccessControlException, SafeModeException, UnresolvedLinkException,
             IOException {
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    List<INode> removedINodes = new ArrayList<INode>();
    FSPermissionChecker pc = getPermissionChecker();
    checkOperation(OperationCategory.WRITE);
    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
    boolean ret = false;
    writeLock();
    try {
      checkOperation(OperationCategory.WRITE);
      if (isInSafeMode()) {
        throw new SafeModeException("Cannot delete " + src, safeMode);
      }
      src = FSDirectory.resolvePath(src, pathComponents, dir);
      if (!recursive && dir.isNonEmptyDirectory(src)) {
        throw new IOException(src + " is non empty");
      }
      if (enforcePermission && isPermissionEnabled) {
        checkPermission(pc, src, false, null, FsAction.WRITE, null,
            FsAction.ALL, false);
      }
      // Unlink the target directory from directory tree
      if (!dir.delete(src, collectedBlocks, removedINodes, logRetryCache)) {
        return false;
      }
      ret = true;
    } finally {
      writeUnlock();
    }
    getEditLog().logSync();
    removeBlocks(collectedBlocks); // Incremental deletion of blocks
    collectedBlocks.clear();
    dir.writeLock();
    try {
      dir.removeFromInodeMap(removedINodes);
    } finally {
      dir.writeUnlock();
View Full Code Here

        throw new SafeModeException(
            "Cannot delete snapshot for " + snapshotRoot, safeMode);
      }
      checkOwner(pc, snapshotRoot);

      BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
      List<INode> removedINodes = new ArrayList<INode>();
      dir.writeLock();
      try {
        snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,
            collectedBlocks, removedINodes);
        dir.removeFromInodeMap(removedINodes);
      } finally {
        dir.writeUnlock();
      }
      removedINodes.clear();
      this.removeBlocks(collectedBlocks);
      collectedBlocks.clear();
      getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
          cacheEntry != null);
      success = true;
    } finally {
      writeUnlock();
View Full Code Here

        // Collect the blocks and remove the lease for previous dst
        long filesDeleted = -1;
        if (removedDst != null) {
          undoRemoveDst = false;
          if (removedNum > 0) {
            BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
            List<INode> removedINodes = new ChunkedArrayList<INode>();
            filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID,
                dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
                true).get(Quota.NAMESPACE);
            getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
View Full Code Here

   * @throws SnapshotAccessControlException if path is in RO snapshot
   */
  void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException,
      QuotaExceededException, SnapshotAccessControlException, IOException {
    assert hasWriteLock();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    List<INode> removedINodes = new ChunkedArrayList<INode>();

    final INodesInPath inodesInPath = getINodesInPath4Write(
        normalizePath(src), false);
    long filesRemoved = -1;
View Full Code Here

      }
      break;
    }
    case OP_DELETE_SNAPSHOT: {
      DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
      BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
      List<INode> removedINodes = new ArrayList<INode>();
      fsNamesys.getSnapshotManager().deleteSnapshot(
          deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName,
          collectedBlocks, removedINodes);
      fsNamesys.removeBlocks(collectedBlocks);
      collectedBlocks.clear();
      fsNamesys.dir.removeFromInodeMap(removedINodes);
      removedINodes.clear();
     
      if (toAddRetryCache) {
        fsNamesys.addCacheEntry(deleteSnapshotOp.rpcClientId,
View Full Code Here

        // Collect the blocks and remove the lease for previous dst
        long filesDeleted = -1;
        if (removedDst != null) {
          undoRemoveDst = false;
          BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
          List<INode> removedINodes = new ArrayList<INode>();
          filesDeleted = removedDst.cleanSubtree(null,
              dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true)
              .get(Quota.NAMESPACE);
          getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
View Full Code Here

   * @throws SnapshotAccessControlException if path is in RO snapshot
   */
  void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException,
      QuotaExceededException, SnapshotAccessControlException {
    assert hasWriteLock();
    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
    List<INode> removedINodes = new ArrayList<INode>();

    final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
        normalizePath(src), false);
    final long filesRemoved = deleteAllowed(inodesInPath, src) ?
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.