Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols


      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of a volume -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      StorageDirectory sd1 = storage.getStorageDir(1);
     
      currentDir = sd0.getCurrentDir();
      currentDir.setExecutable(false);

      // Upload checkpoint when NN has a bad storage dir. This should
      // succeed and create the checkpoint in the good dir.
      secondary.doCheckpoint();
     
      GenericTestUtils.assertExists(
          new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
     
      // Restore the good dir
      currentDir.setExecutable(true);
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to both dirs
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here


      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of its only name dir -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
      currentDir = sd0.getCurrentDir();
      currentDir.setExecutable(false);

      // Try to upload checkpoint -- this should fail since there are no
      // valid storage dirs
      try {
        secondary.doCheckpoint();
        fail("Did not fail to checkpoint when there are no valid storage dirs");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "No targets in destination storage", ioe);
      }
     
      // Restore the good dir
      currentDir.setExecutable(true);
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to the restored name dir
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN saves namespace 3 times
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      for (int i = 0; i < 3; i++) {
        nn.saveNamespace();
      }
      nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
     
      // Now the secondary tries to checkpoint again with its
      // old image in memory.
      secondary.doCheckpoint();
     
View Full Code Here

          .build();
      InetSocketAddress addr = new InetSocketAddress(
        "localhost",
        cluster.getNameNodePort());
      DFSClient client = new DFSClient(addr, conf);
      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();

      // register a datanode
      DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
          "fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
          DN_IPC_PORT);
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
          .getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
          .getLayoutVersion();
      DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
          mockStorageInfo, null, VersionInfo.getVersion());
      rpcServer.registerDatanode(dnReg);

      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
      assertEquals("Expected a registered datanode", 1, report.length);

      // register the same datanode again with a different storage ID
      dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
          "changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT,
          DN_INFO_SECURE_PORT, DN_IPC_PORT);
      dnReg = new DatanodeRegistration(dnId,
          mockStorageInfo, null, VersionInfo.getVersion());
      rpcServer.registerDatanode(dnReg);

      report = client.datanodeReport(DatanodeReportType.ALL);
      assertEquals("Datanode with changed storage ID not recognized",
          1, report.length);
    } finally {
View Full Code Here

    try {
      cluster = new MiniDFSCluster.Builder(conf)
          .numDataNodes(0)
          .build();
     
      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
     
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
     
      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn(123).when(mockDnReg).getXferPort();
      doReturn("fake-storage-id").when(mockDnReg).getStorageID();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
     
      // Should succeed when software versions are the same.
      doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should succeed when software version of DN is above minimum required by NN.
      doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should fail when software version of DN is below minimum required by NN.
      doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail("Should not have been able to register DN with too-low version.");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains(
            "The reported DataNode version is too low", ive);
        LOG.info("Got expected exception", ive);
View Full Code Here

    try {
      cluster = new MiniDFSCluster.Builder(conf)
          .numDataNodes(0)
          .build();
     
      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
     
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
     
      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn(123).when(mockDnReg).getXferPort();
      doReturn("fake-storage-id").when(mockDnReg).getStorageID();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
     
      // Should succeed when software versions are the same and CTimes are the
      // same.
      doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
      doReturn(123).when(mockDnReg).getXferPort();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should succeed when software versions are the same and CTimes are
      // different.
      doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should fail when software version of DN is different from NN and CTimes
      // are different.
      doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail("Should not have been able to register DN with different software" +
            " versions and CTimes");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains(
            "does not match CTime of NN", ive);
View Full Code Here

  {
    final String exceptionMsg = "Nope, not replicated yet...";
    final int maxRetries = 1; // Allow one retry (total of two calls)
    conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
   
    NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
    Answer<Object> answer = new ThrowsException(new IOException()) {
      int retryCount = 0;
     
      @Override
      public Object answer(InvocationOnMock invocation)
                       throws Throwable {
        retryCount++;
        System.out.println("addBlock has been called "  + retryCount + " times");
        if(retryCount > maxRetries + 1) // First call was not a retry
          throw new IOException("Retried too many times: " + retryCount);
        else
          throw new RemoteException(NotReplicatedYetException.class.getName(),
                                    exceptionMsg);
      }
    };
    when(mockNN.addBlock(anyString(),
                         anyString(),
                         any(ExtendedBlock.class),
                         any(DatanodeInfo[].class),
                         anyLong(), any(String[].class))).thenAnswer(answer);
   
View Full Code Here

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

    try {
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();
      NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
      NamenodeProtocols spyNN = spy(preSpyNN);
      DFSClient client = new DFSClient(null, spyNN, conf, null);
      int maxBlockAcquires = client.getMaxBlockAcquireFailures();
      assertTrue(maxBlockAcquires > 0);

View Full Code Here

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

    try {
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();
      NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
      NamenodeProtocols spyNN = spy(preSpyNN);
      DFSClient client = new DFSClient(null, spyNN, conf, null);

     
      // Make the call to addBlock() get called twice, as if it were retried
      // due to an IPC issue.
View Full Code Here

   * @param path
   * @param size
   * @throws IOException
   */
  private void triggerFailure(String path, long size) throws IOException {
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks =
      nn.getBlockLocations(path, 0, size).getLocatedBlocks();
   
    for (LocatedBlock lb : locatedBlocks) {
      DatanodeInfo dinfo = lb.getLocations()[1];
      ExtendedBlock b = lb.getBlock();
      try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.