Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile


    assert namesystem.hasWriteLock();

    long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0;
    neededReplications.clear();
    for (BlockInfo block : blocksMap.getBlocks()) {
      INodeFile fileINode = block.getINode();
      if (fileINode == null) {
        // block does not belong to any file
        nrInvalid++;
        addToInvalidates(block);
        continue;
      }
      // calculate current replication
      short expectedReplication = fileINode.getReplication();
      NumberReplicas num = countNodes(block);
      int numCurrentReplica = num.liveReplicas();
      // add to under-replicated queue if need to be
      if (isNeededReplication(block, expectedReplication, numCurrentReplica)) {
        if (neededReplications.add(block, numCurrentReplica, num
View Full Code Here


                              DatanodeDescriptor addedNode,
                              DatanodeDescriptor delNodeHint,
                              BlockPlacementPolicy replicator) {
    assert namesystem.hasWriteLock();
    // first form a rack to datanodes map and
    INodeFile inode = getINode(b);
    final Map<String, List<DatanodeDescriptor>> rackMap
        = new HashMap<String, List<DatanodeDescriptor>>();
    for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
        iter.hasNext(); ) {
      final DatanodeDescriptor node = iter.next();
View Full Code Here

  void processOverReplicatedBlocksOnReCommission(
      final DatanodeDescriptor srcNode) {
    final Iterator<? extends Block> it = srcNode.getBlockIterator();
    while(it.hasNext()) {
      final Block block = it.next();
      INodeFile fileINode = blocksMap.getINode(block);
      short expectedReplication = fileINode.getReplication();
      NumberReplicas num = countNodes(block);
      int numCurrentReplica = num.liveReplicas();
      if (numCurrentReplica > expectedReplication) {
        // over-replicated block
        processOverReplicatedBlock(block, expectedReplication, null, null);
View Full Code Here

    }
  }

  /* get replication factor of a block */
  private int getReplication(Block block) {
    INodeFile fileINode = blocksMap.getINode(block);
    if (fileINode == null) { // block does not belong to any file
      return 0;
    }
    assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
    return fileINode.getReplication();
  }
View Full Code Here

    }
  }

  static INodeFile assertBlockCollection(String path, int numBlocks,
     final FSDirectory dir, final BlockManager blkManager) throws Exception {
    final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
    assertEquals(numBlocks, file.getBlocks().length);
    for(BlockInfo b : file.getBlocks()) {
      assertBlockCollection(blkManager, file, b);
    }
    return file;
  }
View Full Code Here

    DFSTestUtil.createFile(hdfs, file1, 2*BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, file2, 3*BLOCKSIZE, REPLICATION, seed);
   
    // Normal deletion
    {
      final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
          blockmanager);
      BlockInfo[] blocks = f2.getBlocks();
      hdfs.delete(sub2, true);
      // The INode should have been removed from the blocksMap
      for(BlockInfo b : blocks) {
        assertNull(blockmanager.getBlockCollection(b));
      }
    }
   
    // Create snapshots for sub1
    final String[] snapshots = {"s0", "s1", "s2"};
    DFSTestUtil.createFile(hdfs, file3, 5*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
    DFSTestUtil.createFile(hdfs, file4, 1*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
    DFSTestUtil.createFile(hdfs, file5, 7*BLOCKSIZE, REPLICATION, seed);
    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);

    // set replication so that the inode should be replaced for snapshots
    {
      INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir,
          blockmanager);
      Assert.assertSame(INodeFile.class, f1.getClass());
      hdfs.setReplication(file1, (short)2);
      f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
      assertTrue(f1.isWithSnapshot());
      assertFalse(f1.isUnderConstruction());
    }
   
    // Check the block information for file0
    final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
        blockmanager);
    BlockInfo[] blocks0 = f0.getBlocks();
   
    // Also check the block information for snapshot of file0
    Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
        file0.getName());
    assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
View Full Code Here

    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
    hdfs.append(bar);

    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc()
        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
            null, barNode.getId(), null);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

    barNode = fsdir.getINode4Write(bar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(2, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    assertEquals(0, blks[1].getNumBytes());

    hdfs.delete(bar, true);
    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
        bar.getName());
    barNode = fsdir.getINode(sbar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  }
View Full Code Here

    final Path bar = new Path(subDir, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

    hdfs.append(bar);

    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc()
        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
            null, barNode.getId(), null);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

    barNode = fsdir.getINode4Write(bar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(2, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    assertEquals(0, blks[1].getNumBytes());

    hdfs.delete(subDir, true);
    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
    barNode = fsdir.getINode(sbar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  }
View Full Code Here

    final Path bar = new Path(subDir, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

    hdfs.append(bar);

    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc()
        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
            null, barNode.getId(), null);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

    // rename bar
    final Path bar2 = new Path(subDir, "bar2");
    hdfs.rename(bar, bar2);
   
    INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
    blks = bar2Node.getBlocks();
    assertEquals(2, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    assertEquals(0, blks[1].getNumBytes());

    // delete subDir
View Full Code Here

    // 1. create snapshot --> create file --> append
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
   
    // 2. create snapshot --> modify the file --> append
    hdfs.createSnapshot(dir, "s1");
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1, fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize());

    // 3. create snapshot --> append
    hdfs.createSnapshot(dir, "s2");
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
   
    // check corresponding inodes
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(REPLICATION - 1,  fileNode.getFileReplication());
    assertEquals(BLOCKSIZE * 4, fileNode.computeFileSize());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INodeFile

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.