Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.Block


                              + " does not match current recovery id "
                              + recoveryId + " for block " + lastblock);
      }

      if (deleteblock) {
        Block blockToDel = ExtendedBlock.getLocalBlock(lastblock);
        boolean remove = iFile.removeLastBlock(blockToDel);
        if (remove) {
          blockManager.removeBlockFromMap(storedBlock);
        }
      }
View Full Code Here


      for (int i = 0; i < skip && blkIterator.hasNext(); i++) {
        blkIterator.next();
      }

      while (blkIterator.hasNext()) {
        Block blk = blkIterator.next();
        final INode inode = (INode)blockManager.getBlockCollection(blk);
        skip++;
        if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
          String src = FSDirectory.getFullPathName(inode);
          if (src.startsWith(path)){
View Full Code Here

    int headIndex;
    int curIndex;

    LOG.info("Building block list...");
    for (int i = 0; i < MAX_BLOCKS; i++) {
      blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
      blockInfoList.add(new BlockInfo(blockList.get(i), 3));
      dd.addBlock(blockInfoList.get(i));

      // index of the datanode should be 0
      assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
View Full Code Here

    // from datanode.
    if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
      LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
      if (last.getLocations().length > 0) {
        try {
          Block newBlock = getBlockInfo(last);
          // only if the block has data (not null)
          if (newBlock != null) {
            long newBlockSize = newBlock.getNumBytes();
            newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize);
          }
        } catch (IOException e) {
          DFSClient.LOG.debug("DFSClient file " + src +
                    " is being concurrently append to" +
                    " but datanodes probably does not have block " +
View Full Code Here

      try {
        cdp = DFSClient.createClientDNProtocolProxy(datanode,
            dfsClient.conf, dfsClient.socketTimeout);

        final Block newBlock;
        if (cdp.isMethodSupported("getBlockInfo", int.class, Block.class)) {
          newBlock = cdp.getProxy().getBlockInfo(
              namespaceId, locatedblock.getBlock());
        } else {
          newBlock = cdp.getProxy().getBlockInfo(locatedblock.getBlock());
View Full Code Here

      chosenNode = retval.info;
      InetSocketAddress targetAddr = retval.addr;

      // try reading the block locally. if this fails, then go via
      // the datanode
      Block blk = targetBlock.getBlock();
      try {
        if (DFSClient.LOG.isDebugEnabled()) {
          DFSClient.LOG.warn("blockSeekTo shortCircuitLocalReads "
                   + dfsClient.shortCircuitLocalReads +
                   " localhost " + dfsClient.localHost +
                   " targetAddr " + targetAddr);
        }
        if (dfsClient.shortCircuitLocalReads && dfsClient.localHost != null &&
            (targetAddr.equals(dfsClient.localHost) ||
             targetAddr.getHostName().startsWith("localhost"))) {
          blockReader = BlockReaderLocal.newBlockReader(dfsClient.conf, src,
                                                 namespaceId, blk,
                                                 chosenNode,
                                                 offsetIntoBlock,
                                                 blk.getNumBytes() - offsetIntoBlock,
                                                 dfsClient.metrics,
                                                 this.verifyChecksum,
                                                 this.clearOsBuffer);
          blockReader.setReadLocal(true);
          blockReader.setFsStats(dfsClient.stats);
          return chosenNode;
        }
      } catch (IOException ex) {
        DFSClient.LOG.info("Failed to read block " + targetBlock.getBlock() +
                 " on local machine " + dfsClient.localHost +
                 ". Try via the datanode on " + targetAddr + ":"
                  + StringUtils.stringifyException(ex));
      }

      try {
        s = dfsClient.socketFactory.createSocket();
        NetUtils.connect(s, targetAddr, dfsClient.socketTimeout,
            dfsClient.ipTosValue);
        s.setSoTimeout(dfsClient.socketTimeout);

        long minReadSpeedBps = (dfsClient.numNodeLeft(targetBlock.getLocations(),
            deadNodes) > 1) ? dfsClient.minReadSpeedBps : -1;
        blockReader = BlockReader.newBlockReader(
            dfsClient.getDataTransferProtocolVersion(), namespaceId,
            s, src, blk.getBlockId(),
            blk.getGenerationStamp(),
            offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock,
            buffersize, verifyChecksum,
            dfsClient.clientName, minReadSpeedBps);
        boolean isLocalHost = NetUtils.isLocalAddressWithCaching(targetAddr
            .getAddress());
        blockReader.setReadLocal(isLocalHost);
View Full Code Here

     case DatanodeProtocol.DNA_INVALIDATE:
       //
       // Some local block(s) are obsolete and can be
       // safely garbage-collected.
       //
       Block toDelete[] = bcmd.getBlocks();
       try {
         if (blockScanner != null) {
           blockScanner.deleteBlocks(namespaceId, toDelete);
         }       
         data.invalidate(namespaceId, toDelete);
View Full Code Here

          throw new BlockAlreadyCommittedException(e);
        } else {
          throw e;
        }
      }
      Block newblock = new Block(block.getBlockId(), block.getNumBytes(), generationstamp);

      for(BlockRecord r : syncList) {
        try {
          throwIfAfterTime(deadline);
          LOG.info("Updating block " + r + " to " + newblock);
          r.datanode.updateBlock(namespaceId, r.info.getBlock(), newblock, closeFile);
          successList.add(r.id);
        } catch (BlockRecoveryTimeoutException e) {
          throw e;
        } catch (IOException e) {
          InterDatanodeProtocol.LOG.warn("Failed to updateBlock (newblock="
              + newblock + ", datanode=" + r.id + ")", e);
        }
      }

      LOG.info("Updated blocks on syncList for block " + block + " to " + newblock);

        stopAllProxies(datanodeProxies);

      if (!successList.isEmpty()) {
        DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]);

        throwIfAfterTime(deadline);
        nsNamenode.commitBlockSynchronization(block,
            newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false,
            nlist);
        DatanodeInfo[] info = new DatanodeInfo[nlist.length];
        for (int i = 0; i < nlist.length; i++) {
          info[i] = new DatanodeInfo(nlist[i]);
        }
View Full Code Here

      ) throws IOException {
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block);
    }

    Block stored = data.getStoredBlock(namespaceId, block.getBlockId());

    if (stored == null) {
      return null;
    }
    BlockMetaDataInfo info = new BlockMetaDataInfo(stored,
                                 blockScanner.getLastScanTime(namespaceId, stored));
    if (LOG.isDebugEnabled()) {
      LOG.debug("getBlockMetaDataInfo successful block=" + stored +
                " length " + stored.getNumBytes() +
                " genstamp " + stored.getGenerationStamp());
    }

    // paranoia! verify that the contents of the stored block
    // matches the block file on disk.
   
View Full Code Here

      DatanodeID[] datanodeids, boolean closeFile, long deadline) throws IOException {
    // If the block is already being recovered, then skip recovering it.
    // This can happen if the namenode and client start recovering the same
    // file at the same time.
    synchronized (ongoingRecovery) {
      Block tmp = new Block();
      tmp.set(block.getBlockId(), block.getNumBytes(), GenerationStamp.WILDCARD_STAMP);
      if (ongoingRecovery.get(tmp) != null) {
        String msg = "Block " + block + " is already being recovered, " +
                     " ignoring this request to recover it.";
        LOG.info(msg);
        throw new IOException(msg);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.Block

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.