Package org.apache.hadoop.hdfs.test.system

Examples of org.apache.hadoop.hdfs.test.system.DNClient


    @Test
    public void testBalancerBasicScenario() throws IOException {
        Path balancerTempDir = null;
        try {
            List<DNClient> testnodes = reserveDatanodesForTest(2);
            DNClient testnode1 = testnodes.get(0);
            DNClient testnode2 = testnodes.get(1);
            shutdownNonTestNodes(testnodes);

            LOG.info("attempting to kill both test nodes");
            stopDatanode(testnode1);
            stopDatanode(testnode2);

            LOG.info("starting up datanode ["+
            testnode1.getHostName()+
            "] and loading it with data");
            startDatanode(testnode1);

            // mkdir balancer-temp
            balancerTempDir = makeTempDir();
            // write 2 blocks to file system
            LOG.info("generating filesystem load");
            // TODO spec blocks to generate by blockCount, blockSize, # of writers
            generateFileSystemLoad(2)// generate 2 blocks of test data

            LOG.info("measure space used on 1st node");
            long usedSpace0 = getDatanodeUsedSpace(testnode1);
            LOG.info("datanode " + testnode1.getHostName()
                    + " contains " + usedSpace0 + " bytes");

            LOG.info("bring up a 2nd node and run balancer on DFS");
            startDatanode(testnode2);
            runBalancerAndVerify(testnodes);
        } catch (Throwable t) {
            LOG.info("method testBalancer failed", t);
        } finally {
            // finally block to run cleanup
            LOG.info("clean off test data from DFS [rmr ~/balancer-temp]");
            try {
                deleteTempDir(balancerTempDir);
            } catch (Exception e) {
                LOG.warn("problem cleaning up temp dir", e);
            }

            // restart killed nodes
            Iterator<DNClient> iter = dfsCluster.getDNClients().iterator();

            while (iter.hasNext()) {
                DNClient dn = iter.next();
                startDatanode( dn );
            }
        }
    }
View Full Code Here


    private void shutdownNonTestNodes(List<DNClient> testnodes) {
        Set killSet = new HashSet(getAllDatanodes());
        killSet.removeAll(testnodes);
        LOG.info("attempting to kill/suspend all the nodes not used for this test");
        Iterator<DNClient> iter = killSet.iterator();
        DNClient dn = null;
        while (iter.hasNext()) {
            dn = iter.next();
            // kill may not work with some secure-HDFS configs,
            // so using our stopDataNode() method
            stopDatanode(dn);
View Full Code Here

        dieDNs = new LinkedList<DNClient>(dnList);
        testDNs = new LinkedList<DNClient>();

        final int LEN = dnCount - 1;
        int i = getRandom(LEN);
        DNClient testDN = dieDNs.get(i);
        testDNs.add(testDN);
        dieDNs.remove(testDN);
        int j = i;
        do {
            i = getRandom(LEN);
View Full Code Here

        double totalUsedSpace = 0L;
        double totalCapacity = 0L;
        Map datanodeVolumeMap = new HashMap();
        // accumulate space stored on each node
        for(int i=0; i<datanodes.length; i++) {
            DNClient datanode = datanodes[i];
            Map volumeInfoMap = getDatanodeVolumeAttributes(datanode);
            long usedSpace = (Long)volumeInfoMap.get(ATTRNAME_USED_SPACE);
            long capacity  = (Long)volumeInfoMap.get(ATTRNAME_CAPACITY  );
            utilizationByNode[i] = ( ((double)usedSpace)/capacity ) * 100;
            totalUsedSpace += usedSpace;
View Full Code Here

     public void testBalancerTwoNodeSingleRackClusterWuthNewNodeAdded()
             throws IOException {

        final short TEST_REPLICATION_FACTOR = 3;
        List<DNClient> testnodes = reserveDatanodesForTest(3);
        DNClient dnA = testnodes.get(0);
        DNClient dnB = testnodes.get(1);

        DNClient dnC = testnodes.get(2);
        stopDatanode(dnC);

        // change test: 30% full-er (ie, 30% over pre-test capacity),
        // use most heavily node as baseline
        long targetLoad = (long) (
View Full Code Here

        try {
        // reserve 2 nodes for test
        List<DNClient> testnodes = reserveDatanodesForTest(2);
        shutdownNonTestNodes(testnodes);

        DNClient testnode1 = testnodes.get(0);
        DNClient testnode2 = testnodes.get(1);

        // write some blocks with replication factor of 1
        balancerTempDir = makeTempDir();
        generateFileSystemLoad(20, replication);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.test.system.DNClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.