Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol


      nf.setAccessible(true);
      Field modifiersField = Field.class.getDeclaredField("modifiers");
      modifiersField.setAccessible(true);
      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);

      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
      if (namenode == null) {
        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" +
            " reordering interceptor. Continuing, but this is unexpected."
        );
        return false;
      }

      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
      nf.set(dfsc, cp1);
      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" +
        " using class " + lrb.getClass());
    } catch (NoSuchFieldException e) {
      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
View Full Code Here


    @SuppressWarnings("unchecked")
    @Override
    public long renew(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
        (Token<DelegationTokenIdentifier>) token;
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        return nn.renewDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
                                       AccessControlException.class);
      }
    }
View Full Code Here

    public void cancel(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
          (Token<DelegationTokenIdentifier>) token;
      LOG.info("Cancelling " +
               DelegationTokenIdentifier.stringifyToken(delToken));
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        nn.cancelDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
            AccessControlException.class);
      }
    }
View Full Code Here

    public long renew(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
          (Token<DelegationTokenIdentifier>) token;
      LOG.info("Renewing " +
               DelegationTokenIdentifier.stringifyToken(delToken));
      ClientProtocol nn =
        DFSUtil.createRPCNamenode
           (SecurityUtil.getTokenServiceAddr(delToken),
            conf, UserGroupInformation.getCurrentUser());
      try {
        return nn.renewDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
                                       AccessControlException.class);
      }
    }
View Full Code Here

    public void cancel(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
          (Token<DelegationTokenIdentifier>) token;
      LOG.info("Cancelling " +
               DelegationTokenIdentifier.stringifyToken(delToken));
      ClientProtocol nn = DFSUtil.createRPCNamenode(
          SecurityUtil.getTokenServiceAddr(delToken), conf,
          UserGroupInformation.getCurrentUser());
      try {
        nn.cancelDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
            AccessControlException.class);
      }
    }
View Full Code Here

    ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
  }

  @Test
  public void testDelegationTokenRpc() throws Exception {
    ClientProtocol mockNN = mock(ClientProtocol.class);
    FSNamesystem mockNameSys = mock(FSNamesystem.class);
    when(mockNN.getProtocolVersion(anyString(), anyLong())).thenReturn(
        ClientProtocol.versionID);
    doReturn(ProtocolSignature.getProtocolSignature(
        mockNN, ClientProtocol.class.getName(),
        ClientProtocol.versionID, 0))
      .when(mockNN).getProtocolSignature(anyString(), anyLong(), anyInt());

    DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
        3600000, mockNameSys);
    sm.startThreads();
    final Server server = RPC.getServer(ClientProtocol.class, mockNN, ADDRESS,
        0, 5, true, conf, sm);

    server.start();

    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    String user = current.getUserName();
    Text owner = new Text(user);
    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null);
    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
        dtId, sm);
    Text host = new Text(addr.getAddress().getHostAddress() + ":"
        + addr.getPort());
    token.setService(host);
    LOG.info("Service IP address for token is " + host);
    current.addToken(token);
    current.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        ClientProtocol proxy = null;
        try {
          proxy = (ClientProtocol) RPC.getProxy(ClientProtocol.class,
              ClientProtocol.versionID, addr, conf);
          proxy.getServerDefaults();
        } finally {
          server.stop();
          if (proxy != null) {
            RPC.stopProxy(proxy);
          }
View Full Code Here

 
  /* check if there are at least two nodes are on the same rack */
  private void checkFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = DFSUtil.createNamenode(conf);
     
    waitForBlockReplication(name.toString(), namenode,
                            Math.min(numDatanodes, repl), -1);
   
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
                                                         Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat,0L,
                                                         Long.MAX_VALUE);
    // verify that rack locations match
View Full Code Here

     
      getUGI(request, conf).doAs
        (new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws IOException {
          ClientProtocol nn = createNameNodeProxy();
          doc.declaration();
          doc.startTag("listing");
          for (Map.Entry<String,String> m : root.entrySet()) {
            doc.attribute(m.getKey(), m.getValue());
          }

          HdfsFileStatus base = nn.getFileInfo(path);
          if ((base != null) && base.isDir()) {
            writeInfo(base.getFullPath(new Path(path)), base, doc);
          }

          Stack<String> pathstack = new Stack<String>();
          pathstack.push(path);
          while (!pathstack.empty()) {
            String p = pathstack.pop();
            try {
              byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;        
              DirectoryListing thisListing;
              do {
                assert lastReturnedName != null;
                thisListing = nn.getListing(p, lastReturnedName);
                if (thisListing == null) {
                  if (lastReturnedName.length == 0) {
                    LOG.warn("ListPathsServlet - Path " + p + " does not exist");
                  }
                  break;
View Full Code Here

    try {
      ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws IOException {
              ClientProtocol nn = createNameNodeProxy();
              final String path =
                request.getPathInfo() != null ? request.getPathInfo() : "/";
             
              String delegationToken =
                request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
             
              HdfsFileStatus info = nn.getFileInfo(path);
              if ((info != null) && !info.isDir()) {
                try {
                  response.sendRedirect(createUri(path, info, ugi, nn,
                        request, delegationToken).toURL().toString());
                } catch (URISyntaxException e) {
View Full Code Here

          final PrintWriter out = response.getWriter();
          final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
          xml.declaration();
          try {
            //get content summary
            final ClientProtocol nnproxy = createNameNodeProxy();
            final ContentSummary cs = nnproxy.getContentSummary(path);

            //write xml
            xml.startTag(ContentSummary.class.getName());
            if (cs != null) {
              xml.attribute("length"        , "" + cs.getLength());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.ClientProtocol

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.