Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol


    public void cancel(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
          (Token<DelegationTokenIdentifier>) token;
      LOG.info("Cancelling " +
               DelegationTokenIdentifier.stringifyToken(delToken));
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        nn.cancelDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
            AccessControlException.class);
      }
    }
View Full Code Here


      final Map<String, String> root = buildRoot(request, doc);
      final String path = root.get("path");
      final boolean recur = "yes".equals(root.get("recursive"));
      final Pattern filter = Pattern.compile(root.get("filter"));
      final Pattern exclude = Pattern.compile(root.get("exclude"));
      ClientProtocol nnproxy = createNameNodeProxy(ugi);

      doc.declaration();
      doc.startTag("listing");
      for (Map.Entry<String,String> m : root.entrySet()) {
        doc.attribute(m.getKey(), m.getValue());
      }

      FileStatus base = nnproxy.getFileInfo(path);
      if ((base != null) && base.isDir()) {
        writeInfo(base, doc);
      }

      Stack<String> pathstack = new Stack<String>();
      pathstack.push(path);
      while (!pathstack.empty()) {
        String p = pathstack.pop();
        try {
          for (FileStatus i : nnproxy.getListing(p)) {
            if (exclude.matcher(i.getPath().getName()).matches()
                || !filter.matcher(i.getPath().getName()).matches()) {
              continue;
            }
            if (recur && i.isDir()) {
View Full Code Here

 
  /* check if there are at least two nodes are on the same rack */
  private void checkFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = DFSClient.createNamenode(conf);
     
    waitForBlockReplication(name.toString(), namenode,
                            Math.min(numDatanodes, repl), -1);
   
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
                                                         Long.MAX_VALUE);
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
      DatanodeInfo[] datanodes = blk.getLocations();
      if (datanodes.length <= 1) break;
View Full Code Here

      FileStatus srcStat = fileSys.getFileStatus(file1);
      LocatedBlocks locations = getBlockLocations(file1, srcStat.getLen());

      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      ClientProtocol namenode = dfs.getClient().namenode;

      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals(corruptFiles.length, 0);

      // Corrupt blocks in two different stripes. We can fix them.
      TestRaidDfs.corruptBlock(file1, locations.get(0).getBlock(),
               NUM_DATANODES, true, dfsCluster); // delete block
      TestRaidDfs.corruptBlock(file1, locations.get(4).getBlock(),
               NUM_DATANODES, false, dfsCluster); // corrupt block
      TestRaidDfs.corruptBlock(file1, locations.get(6).getBlock(),
               NUM_DATANODES, true, dfsCluster); // delete last (partial) block
      LocatedBlock[] toReport = new LocatedBlock[3];
      toReport[0] = locations.get(0);
      toReport[1] = locations.get(4);
      toReport[2] = locations.get(6);
      namenode.reportBadBlocks(toReport);

      corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals(corruptFiles.length, 1);
      assertEquals(corruptFiles[0], file1.toString());
View Full Code Here

   * name-node proxy.
   */
  public void testClientUpdateMethodList() throws IOException {
    InetSocketAddress addr = cluster.getNameNode().getNameNodeDNAddress();
    DFSClient client = new DFSClient(addr, cluster.getNameNode().getConf());
    ClientProtocol oldNamenode = client.namenode;
   
    // Client's name-node proxy should keep the same if the same namenode
    // sends the same fingerprint
    //
    OutputStream os = client.create("/testClientUpdateMethodList.txt", true);
    os.write(66);
    os.close();
    TestCase.assertSame(oldNamenode, client.namenode);   
    int oldFingerprint = cluster.getNameNode().getClientProtocolMethodsFingerprint();
    TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy
        .getMethodsFingerprint());
   
    // Namenode's fingerprint will be different to client. Client is suppsoed
    // to get a new proxy.
    //
    cluster.getNameNode().setClientProtocolMethodsFingerprint(666);
    os = client.create("/testClientUpdateMethodList1.txt", true);
    os.write(88);
    os.close();
    TestCase.assertNotSame(oldNamenode, client.namenode);
    // Since we didn't change method list of name-node, the fingerprint
    // got from the new proxy should be the same as the previous one.
    TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy
        .getMethodsFingerprint());
   
    // Client's name-node proxy should keep the same if the same namenode
    // sends the same fingerprint
    //
    ClientProtocol namenode1 = client.namenode;
    cluster.getNameNode().setClientProtocolMethodsFingerprint(oldFingerprint);
    DFSInputStream dis = client.open("/testClientUpdateMethodList.txt");
    int val = dis.read();
    TestCase.assertEquals(66, val);
    dis.close();
View Full Code Here

  @Test
  public void testCreateFile() throws Exception {
    InjectionHandler.set(new TestHandler());
    cluster.clearZooKeeperNode(0);
    ClientProtocol namenode = ((DistributedAvatarFileSystem) fs).getClient()
        .getNameNodeRPC();
    new FailoverThread().start();
    FsPermission perm = new FsPermission((short) 0264);
    namenode.create("/test", perm, ((DistributedAvatarFileSystem) fs)
        .getClient().getClientName(), true, true, (short) 3, (long) 1024);
    LOG.info("Done with create");
    assertTrue(fs.exists(new Path("/test")));
    assertTrue(pass);
  }
View Full Code Here

  @Test
  public void testCreateFileWithoutOverwrite() throws Exception {
    InjectionHandler.set(new TestHandler());
    cluster.clearZooKeeperNode(0);
    ClientProtocol namenode = ((DistributedAvatarFileSystem) fs).getClient()
        .getNameNodeRPC();
    new FailoverThread().start();
    FsPermission perm = new FsPermission((short) 0264);
    namenode.create("/test1", perm, ((DistributedAvatarFileSystem) fs)
        .getClient().getClientName(), false, true, (short) 3, (long) 1024);
    LOG.info("Done with create");
    assertTrue(fs.exists(new Path("/test1")));
    assertTrue(pass);
  }
View Full Code Here

  /**
   * Test closeFile() name-node RPC is idempotent
   */
  public void testIdepotentCloseCalls() throws IOException {
    ClientProtocol nn = cluster.getNameNode();
    FileSystem fs = cluster.getFileSystem();
    DFSClient dfsclient = ((DistributedFileSystem) fs).dfs;
    DFSClient mockDfs = spy(dfsclient);

    ClientProtocol mockNameNode = spy(nn);
    mockDfs.namenode = mockNameNode;

    String src = "/testNameNodeFingerprintSent1.txt";
    // Path f = new Path(src);

View Full Code Here

  /**
   * Test addBlock() name-node RPC is idempotent
   */
  public void testIdepotentCallsAddBlock() throws IOException {
    ClientProtocol nn = cluster.getNameNode();
    FileSystem fs = cluster.getFileSystem();
    DFSClient dfsclient = ((DistributedFileSystem) fs).dfs;

    String src = "/testNameNodeFingerprintSent1.txt";
    // Path f = new Path(src);

    DFSOutputStream dos = (DFSOutputStream) dfsclient.create(src, true,
        (short) 1, 512L);

    FSDataOutputStream a_out = new FSDataOutputStream(dos); // fs.create(f);

    for (int i = 0; i < 512; i++) {
      a_out.writeBytes("bc");
    }
    a_out.flush();

    LocatedBlocks lb = nn.getBlockLocations(src, 256, 257);
    LocatedBlock lb1 = nn.addBlockAndFetchMetaInfo(src, dfsclient.clientName,
        null, null, 512L, lb.getLocatedBlocks().get(lb.locatedBlockCount() - 1)
            .getBlock());
    LocatedBlock lb2 = nn.addBlockAndFetchMetaInfo(src, dfsclient.clientName,
        null, null, 512L, lb.getLocatedBlocks().get(lb.locatedBlockCount() - 1)
            .getBlock());
    TestCase.assertTrue("blocks: " + lb1.getBlock() + " and " + lb2.getBlock(),
        lb1.getBlock().equals(lb2.getBlock()));
  }
View Full Code Here

      final Map<String, String> root = buildRoot(request, doc);
      final String path = root.get("path");
      final boolean recur = "yes".equals(root.get("recursive"));
      final Pattern filter = Pattern.compile(root.get("filter"));
      final Pattern exclude = Pattern.compile(root.get("exclude"));
      ClientProtocol nnproxy = createNameNodeProxy(ugi);

      doc.declaration();
      doc.startTag("listing");
      for (Map.Entry<String,String> m : root.entrySet()) {
        doc.attribute(m.getKey(), m.getValue());
      }

      HdfsFileStatus base = nnproxy.getHdfsFileInfo(path);
      if ((base != null) && base.isDir()) {
        writeInfo(path, base, doc);
      }

      Stack<String> pathstack = new Stack<String>();
      pathstack.push(path);
      while (!pathstack.empty()) {
        String p = pathstack.pop();
        try {
          byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
          DirectoryListing thisListing;
          do {
            assert lastReturnedName != null;
            thisListing = nnproxy.getPartialListing(p, lastReturnedName);
            if (thisListing == null) {
              if (lastReturnedName.length == 0) {
                LOG.warn("ListPathsServlet - Path " + p + " does not exist");
              }
              break;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.ClientProtocol

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.