Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile


        modDirStr + "file15");
    FileStatus statusBeforeDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusBeforeDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusBeforeDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
    INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
        file14_s2.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks_14 = file14Node.getBlocks();
    TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
        blockmanager);
   
    // delete s2, in which process we need to combine the diff in s2 to s1
    hdfs.deleteSnapshot(snapshotRoot, "s2");
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 12 + delta,
        14 * BLOCKSIZE);
   
    // check the correctness of s1
    FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusAfterDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusAfterDeletion13 = hdfs.getFileStatus(file13_s1);
    assertEquals(statusBeforeDeletion10.toString(),
        statusAfterDeletion10.toString());
    assertEquals(statusBeforeDeletion11.toString(),
        statusAfterDeletion11.toString());
    assertEquals(statusBeforeDeletion12.toString(),
        statusAfterDeletion12.toString());
    assertEquals(statusBeforeDeletion13.toString(),
        statusAfterDeletion13.toString());
    TestSnapshotBlocksMap.assertBlockCollection(file10_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file11_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file12_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file13_s1.toString(), 1, fsdir,
        blockmanager);
   
    // make sure file14 and file15 are not included in s1
    Path file14_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
        modDirStr + "file14");
    Path file15_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
        modDirStr + "file15");
    assertFalse(hdfs.exists(file14_s1));
    assertFalse(hdfs.exists(file15_s1));
    for (BlockInfo b : blocks_14) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
    assertEquals(REPLICATION_1, nodeFile13.getBlockReplication());
    TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir,
        blockmanager);
   
    INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
    assertEquals(REPLICATION_1, nodeFile12.getBlockReplication());
  }
View Full Code Here


    // stored in the deleted list of foo, and will be destroyed.
    hdfs.delete(foo2, true);
   
    // check if /dir3/bar still exists
    assertTrue(hdfs.exists(bar3));
    INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
    assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
  }
View Full Code Here

      hdfs.setReplication(filePath, (short) DATANODE_COUNT);
      BlockManagerTestUtil.computeAllPendingWork(blkManager);

      assertEquals(1, blkManager.pendingReplications.size());
      INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
      Block[] blocks = fileNode.getBlocks();
      assertEquals(DATANODE_COUNT - 1,
          blkManager.pendingReplications.getNumReplicas(blocks[0]));

      LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0)
          .get(0);
View Full Code Here

    // 1. create snapshot --> create file --> append
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
   
    // 2. create snapshot --> modify the file --> append
    hdfs.createSnapshot(dir, "s1");
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1, fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize());

    // 3. create snapshot --> append
    hdfs.createSnapshot(dir, "s2");
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1,  fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 4, fileNode.computeFileSize());
  }
View Full Code Here

    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
    // deleted list, with size BLOCKSIZE*2
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
    INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
        .getINode(dir.toString());
    DirectoryDiff last = dirNode.getDiffs().getLast();
    Snapshot s0 = last.snapshot;
   
    // 2. append without closing stream
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
   
    // re-check nodeInDeleted_S0
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0));
   
    // 3. take snapshot --> close stream
    hdfs.createSnapshot(dir, "s1");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
    // have been stored in s1's deleted list
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    last = dirNode.getDiffs().getLast();
    Snapshot s1 = last.snapshot;
    assertTrue(fileNode instanceof INodeFileWithSnapshot);
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
   
    // 4. modify file --> append without closing stream --> take snapshot -->
    // close stream
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s2");
    out.close();
   
    // re-check the size of nodeInDeleted_S1
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
  }
View Full Code Here

   * @throws Exception
   */
  private void checkSnapshotFileReplication(Path currentFile,
      Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
    // First check the getBlockReplication for the INode of the currentFile
    final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
    assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
    // Then check replication for every snapshot
    for (Path ss : snapshotRepMap.keySet()) {
      final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
      final INodeFile ssInode = (INodeFile)iip.getLastINode();
      // The replication number derived from the
      // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
      assertEquals(expectedBlockRep, ssInode.getBlockReplication());
      // Also check the number derived from INodeFile#getFileReplication
      assertEquals(snapshotRepMap.get(ss).shortValue(),
          ssInode.getFileReplication(iip.getPathSnapshot()));
    }
  }
View Full Code Here

   
    // Delete file1
    hdfs.delete(file1, true);
    // Check replication of snapshots
    for (Path ss : snapshotRepMap.keySet()) {
      final INodeFile ssInode = getINodeFile(ss);
      // The replication number derived from the
      // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
      assertEquals(REPLICATION, ssInode.getBlockReplication());
      // Also check the number derived from INodeFile#getFileReplication
      assertEquals(snapshotRepMap.get(ss).shortValue(),
          ssInode.getFileReplication());
    }
  }
View Full Code Here

    }
  }

  static INodeFile assertBlockCollection(String path, int numBlocks,
     final FSDirectory dir, final BlockManager blkManager) throws Exception {
    final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
    assertEquals(numBlocks, file.getBlocks().length);
    for(BlockInfo b : file.getBlocks()) {
      assertBlockCollection(blkManager, file, b);
    }
    return file;
  }
View Full Code Here

    DFSTestUtil.createFile(hdfs, file1, 2*BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, file2, 3*BLOCKSIZE, REPLICATION, seed);
   
    // Normal deletion
    {
      final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
          blockmanager);
      BlockInfo[] blocks = f2.getBlocks();
      hdfs.delete(sub2, true);
      // The INode should have been removed from the blocksMap
      for(BlockInfo b : blocks) {
        assertNull(blockmanager.getBlockCollection(b));
      }
    }
   
    // Create snapshots for sub1
    final String[] snapshots = {"s0", "s1", "s2"};
    DFSTestUtil.createFile(hdfs, file3, 5*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
    DFSTestUtil.createFile(hdfs, file4, 1*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
    DFSTestUtil.createFile(hdfs, file5, 7*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);

    // set replication so that the inode should be replaced for snapshots
    {
      INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir,
          blockmanager);
      Assert.assertSame(INodeFile.class, f1.getClass());
      hdfs.setReplication(file1, (short)2);
      f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
      Assert.assertSame(INodeFileWithSnapshot.class, f1.getClass());
    }
   
    // Check the block information for file0
    final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
        blockmanager);
    BlockInfo[] blocks0 = f0.getBlocks();
   
    // Also check the block information for snapshot of file0
    Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
        file0.getName());
    assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
View Full Code Here

  }

  private Collection<LocatedBlock> getCompanionBlocks(
      FSNamesystem namesystem, BlockPlacementPolicyRaid policy,
      ExtendedBlock block) throws IOException {
    INodeFile inode = (INodeFile)blockManager.blocksMap.getINode(block
        .getLocalBlock());
    FileType type = policy.getFileType(inode.getFullPathName());
    return policy.getCompanionBlocks(inode.getFullPathName(), type,
        block.getLocalBlock());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INodeFile

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.