Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol


 
  /* check if there are at least two nodes are on the same rack */
  private void checkFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(),
        ClientProtocol.class).getProxy();
     
    waitForBlockReplication(name.toString(), namenode,
                            Math.min(numDatanodes, repl), -1);
   
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
                                                         Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat,0L,
                                                         Long.MAX_VALUE);
    // verify that rack locations match
View Full Code Here


    @SuppressWarnings("unchecked")
    @Override
    public long renew(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
        (Token<DelegationTokenIdentifier>) token;
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        return nn.renewDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
                                       AccessControlException.class);
      }
    }
View Full Code Here

    public void cancel(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
          (Token<DelegationTokenIdentifier>) token;
      LOG.info("Cancelling " +
               DelegationTokenIdentifier.stringifyToken(delToken));
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        nn.cancelDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
            AccessControlException.class);
      }
    }
View Full Code Here

        failoverProxyProvider, RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            Integer.MAX_VALUE,
            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
    ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { ClientProtocol.class }, dummyHandler);
   
    DFSClient client = new DFSClient(null, proxy, conf, null);
    return client;
View Full Code Here

      nf.setAccessible(true);
      Field modifiersField = Field.class.getDeclaredField("modifiers");
      modifiersField.setAccessible(true);
      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);

      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
      if (namenode == null) {
        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" +
            " reordering interceptor. Continuing, but this is unexpected."
        );
        return false;
      }

      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
      nf.set(dfsc, cp1);
      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" +
        " using class " + lrb.getClass());
    } catch (NoSuchFieldException e) {
      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
View Full Code Here

      nf.setAccessible(true);
      Field modifiersField = Field.class.getDeclaredField("modifiers");
      modifiersField.setAccessible(true);
      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);

      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
      if (namenode == null) {
        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" +
            " reordering interceptor. Continuing, but this is unexpected."
        );
        return false;
      }

      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
      nf.set(dfsc, cp1);
      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" +
        " using class " + lrb.getClass());
    } catch (NoSuchFieldException e) {
      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
View Full Code Here

 
  /* check if there are at least two nodes are on the same rack */
  private void checkFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = DFSClient.createNamenode(conf);
     
    waitForBlockReplication(name.toString(), namenode,
                            Math.min(numDatanodes, repl), -1);
   
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
                                                         Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat,0L,
                                                         Long.MAX_VALUE);
    // verify that rack locations match
View Full Code Here

      final Pattern filter = Pattern.compile(root.get("filter"));
      final Pattern exclude = Pattern.compile(root.get("exclude"));
      final Configuration conf =
        (Configuration) getServletContext().getAttribute("name.conf");
     
      ClientProtocol nnproxy = getUGI(request, conf).doAs
        (new PrivilegedExceptionAction<ClientProtocol>() {
        @Override
        public ClientProtocol run() throws IOException {
          return createNameNodeProxy();
        }
      });

      doc.declaration();
      doc.startTag("listing");
      for (Map.Entry<String,String> m : root.entrySet()) {
        doc.attribute(m.getKey(), m.getValue());
      }

      HdfsFileStatus base = nnproxy.getFileInfo(path);
      if ((base != null) && base.isDir()) {
        writeInfo(path, base, doc);
      }

      Stack<String> pathstack = new Stack<String>();
      pathstack.push(path);
      while (!pathstack.empty()) {
        String p = pathstack.pop();
        try {
          byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
          DirectoryListing thisListing;
          do {
            assert lastReturnedName != null;
            thisListing = nnproxy.getListing(p, lastReturnedName);
            if (thisListing == null) {
              if (lastReturnedName.length == 0) {
                LOG.warn("ListPathsServlet - Path " + p + " does not exist");
              }
              break;
View Full Code Here

      // create two files with one block each
      DFSTestUtil util = new DFSTestUtil("testCorruptFilesMissingBlock", 2, 1, 512);
      util.createFiles(fs, "/srcdat");

      // verify that there are no bad blocks.
      ClientProtocol namenode = DFSClient.createNamenode(conf);
      FileStatus[] badFiles = namenode.getCorruptFiles();
      assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting none.",
          badFiles.length == 0);

      // Now deliberately remove one block
      File data_dir = new File(System.getProperty("test.build.data"),
      "dfs/data/data1/current/finalized");
      assertTrue("data directory does not exist", data_dir.exists());
      File[] blocks = data_dir.listFiles();
      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
      for (int idx = 0; idx < blocks.length; idx++) {
        if (!blocks[idx].getName().startsWith("blk_")) {
          continue;
        }
        LOG.info("Deliberately removing file "+blocks[idx].getName());
        assertTrue("Cannot remove file.", blocks[idx].delete());
        break;
      }

      badFiles = namenode.getCorruptFiles();
      while (badFiles.length == 0) {
        Thread.sleep(1000);
        badFiles = namenode.getCorruptFiles();
      }
      LOG.info("Namenode has bad files. " + badFiles.length);
      assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 1.",
          badFiles.length == 1);
      util.cleanup(fs, "/srcdat");
View Full Code Here

      // create two files with one block each
      DFSTestUtil util = new DFSTestUtil("testMaxCorruptFiles", 4, 1, 512);
      util.createFiles(fs, "/srcdat2");

      // verify that there are no bad blocks.
      ClientProtocol namenode = DFSClient.createNamenode(conf);
      FileStatus[] badFiles = namenode.getCorruptFiles();
      assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting none.",
          badFiles.length == 0);

      // Now deliberately remove one block
      File data_dir = new File(System.getProperty("test.build.data"),
      "dfs/data/data1/current/finalized");
      assertTrue("data directory does not exist", data_dir.exists());
      File[] blocks = data_dir.listFiles();
      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
      for (int idx = 0; idx < blocks.length; idx++) {
        if (!blocks[idx].getName().startsWith("blk_")) {
          continue;
        }
        LOG.info("Deliberately removing file "+blocks[idx].getName());
        assertTrue("Cannot remove file.", blocks[idx].delete());
      }

      badFiles = namenode.getCorruptFiles();
      while (badFiles.length < 2) {
        badFiles = namenode.getCorruptFiles();
        Thread.sleep(10000);
      }
      badFiles = namenode.getCorruptFiles(); // once more since time has passed
      LOG.info("Namenode has bad files. " + badFiles.length);
      assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 2.",
          badFiles.length == 2);
      util.cleanup(fs, "/srcdat2");
    } finally {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.ClientProtocol

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.