Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeID


  }

  @Test
  public void testAddAndRetrieve() throws Exception {
    PeerCache cache = new PeerCache(3, 100000);
    DatanodeID dnId = new DatanodeID("192.168.0.1",
          "fakehostname", "fake_datanode_id",
          100, 101, 102, 103);
    FakePeer peer = new FakePeer(dnId, false);
    cache.put(dnId, peer);
    assertTrue(!peer.isClosed());
View Full Code Here


  @Test
  public void testExpiry() throws Exception {
    final int CAPACITY = 3;
    final int EXPIRY_PERIOD = 10;
    PeerCache cache = new PeerCache(CAPACITY, EXPIRY_PERIOD);
    DatanodeID dnIds[] = new DatanodeID[CAPACITY];
    FakePeer peers[] = new FakePeer[CAPACITY];
    for (int i = 0; i < CAPACITY; ++i) {
      dnIds[i] = new DatanodeID("192.168.0.1",
          "fakehostname_" + i, "fake_datanode_id",
          100, 101, 102, 103);
      peers[i] = new FakePeer(dnIds[i], false);
    }
    for (int i = 0; i < CAPACITY; ++i) {
View Full Code Here

  @Test
  public void testEviction() throws Exception {
    final int CAPACITY = 3;
    PeerCache cache = new PeerCache(CAPACITY, 100000);
    DatanodeID dnIds[] = new DatanodeID[CAPACITY + 1];
    FakePeer peers[] = new FakePeer[CAPACITY + 1];
    for (int i = 0; i < dnIds.length; ++i) {
      dnIds[i] = new DatanodeID("192.168.0.1",
          "fakehostname_" + i, "fake_datanode_id_" + i,
          100, 101, 102, 103);
      peers[i] = new FakePeer(dnIds[i], false);
    }
    for (int i = 0; i < CAPACITY; ++i) {
View Full Code Here

  @Test
  public void testMultiplePeersWithSameKey() throws Exception {
    final int CAPACITY = 3;
    PeerCache cache = new PeerCache(CAPACITY, 100000);
    DatanodeID dnId = new DatanodeID("192.168.0.1",
          "fakehostname", "fake_datanode_id",
          100, 101, 102, 103);
    HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
    for (int i = 0; i < CAPACITY; ++i) {
      FakePeer peer = new FakePeer(dnId, false);
View Full Code Here

  @Test
  public void testDomainSocketPeers() throws Exception {
    final int CAPACITY = 3;
    PeerCache cache = new PeerCache(CAPACITY, 100000);
    DatanodeID dnId = new DatanodeID("192.168.0.1",
          "fakehostname", "fake_datanode_id",
          100, 101, 102, 103);
    HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
    for (int i = 0; i < CAPACITY; ++i) {
      FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1);
View Full Code Here

   
    if (listDeadNodes) {
      Iterator<String> it = mustList.keySet().iterator();
      while (it.hasNext()) {
        DatanodeDescriptor dn =
            new DatanodeDescriptor(new DatanodeID(it.next()));
        dn.setLastUpdate(0);
        nodes.add(dn);
      }
    }
    return nodes;
View Full Code Here

      return;
    }
    boolean allAlive = false;
    while (!allAlive) {
      // locate the first dead node.
      DatanodeID dead = null;
      synchronized(this) {
        for (DatanodeDescriptor d : datanodes) {
          if (dm.isDatanodeDead(d)) {
            stats.incrExpiredHeartbeats();
            dead = d;
View Full Code Here

    }

    String hostName = nodeReg.getHost();
     
    // update the datanode's name with ip:port
    DatanodeID dnReg = new DatanodeID(dnAddress + ":" + nodeReg.getPort(),
                                      nodeReg.getStorageID(),
                                      nodeReg.getInfoPort(),
                                      nodeReg.getIpcPort());
    nodeReg.updateRegInfo(dnReg);
    nodeReg.exportedKeys = getBlockKeys();
View Full Code Here

      return;
    }
    boolean allAlive = false;
    while (!allAlive) {
      boolean foundDead = false;
      DatanodeID nodeID = null;

      // locate the first dead node.
      synchronized(heartbeats) {
        for (Iterator<DatanodeDescriptor> it = heartbeats.iterator();
             it.hasNext();) {
View Full Code Here

    }
   
    if (listDeadNodes) {
      for (Iterator<String> it = mustList.keySet().iterator(); it.hasNext();) {
        DatanodeDescriptor dn =
            new DatanodeDescriptor(new DatanodeID(it.next()));
        dn.setLastUpdate(0);
        nodes.add(dn);
      }
    }
   
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeID

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.