Package com.vmware.bdd.apitypes

Examples of com.vmware.bdd.apitypes.ClusterCreate


      assertEquals(false, defaultSoftwareManager.isComputeOnlyRoles(roles));
   }

   @Test
   public void testContainsComputeOnlyNodeGroups() {
      ClusterCreate cluster = new ClusterCreate();
      NodeGroupCreate compute = new NodeGroupCreate();
      NodeGroupCreate data = new NodeGroupCreate();
      compute.setRoles(Arrays.asList(HadoopRole.HADOOP_TASKTRACKER.toString()));
      data.setRoles(Arrays.asList(HadoopRole.HADOOP_DATANODE.toString()));
      cluster.setNodeGroups(new NodeGroupCreate[] { compute, data });
      assertEquals(true, cluster.containsComputeOnlyNodeGroups(defaultSoftwareManager));
      compute.setRoles(Arrays.asList(HadoopRole.HADOOP_TASKTRACKER.toString(),
            HadoopRole.TEMPFS_CLIENT_ROLE.toString()));
      cluster.setNodeGroups(new NodeGroupCreate[] { compute, data });
      assertEquals(true, cluster.containsComputeOnlyNodeGroups(defaultSoftwareManager));
      NodeGroupCreate worker = new NodeGroupCreate();
      worker.setRoles(Arrays.asList(HadoopRole.HADOOP_TASKTRACKER.toString(),
            HadoopRole.HADOOP_DATANODE.toString()));
      cluster.setNodeGroups(new NodeGroupCreate[] { worker });
      assertEquals(false, cluster.containsComputeOnlyNodeGroups(defaultSoftwareManager));
   }
View Full Code Here


      cluster.validateSetParamParameters(null, 1, 5);
   }

   @Test
   public void testSortingNodeGroups() {
      ClusterCreate cluster = new ClusterCreate();
      NodeGroupCreate client = new NodeGroupCreate();
      client.setRoles(Arrays.asList("hadoop_client"));
      NodeGroupCreate worker = new NodeGroupCreate();
      worker.setRoles(Arrays.asList("hadoop_tasktracker",
      "hadoop_datanode"));
      NodeGroupCreate master = new NodeGroupCreate();
      master.setRoles(Arrays.asList("hadoop_namenode",
      "hadoop_jobtracker"));
      cluster.setNodeGroups(new NodeGroupCreate[] { client, worker, master });
      assertEquals(3, cluster.getNodeGroups().length);
      ClusterBlueprint blueprint = cluster.toBlueprint();
      defaultSoftwareManager.updateInfrastructure(blueprint);
      assertEquals(master.getName(), blueprint.getNodeGroups().get(0).getName());
      assertEquals(worker.getName(), blueprint.getNodeGroups().get(1).getName());
      assertEquals(client.getName(), blueprint.getNodeGroups().get(2).getName());
   }
View Full Code Here

      assertTrue(defaultSoftwareManager.hasComputeMasterGroup(blueprint));
   }

   @Test
   public void testValidateGroupConfig() throws Exception {
      ClusterCreate cluster =
         TestFileUtil
         .getSimpleClusterSpec(TestFileUtil.HDFS_HA_CLUSTER_FILE);
      cluster.setDistro("bigtop");
      List<String> failedMsgList = new ArrayList<String>();
      List<String> warningMsgList = new ArrayList<String>();
      validator.validateGroupConfig(cluster.toBlueprint(), failedMsgList, warningMsgList);
      assertTrue("Should get empty fail message.", failedMsgList.isEmpty());
      assertTrue("Should get empty warning message.", warningMsgList.isEmpty());
   }
View Full Code Here

   }

   @Test(groups = { "TestClusteringJobs" })
   @Transactional(propagation = Propagation.NEVER)
   public void testCreateCluster() throws Exception {
      ClusterCreate createSpec = new ClusterCreate();
      createSpec.setName(TEST_STATIC_IP_CLUSTER_NAME);
      createSpec.setType(ClusterType.HDFS_MAPRED);
      createSpec.setNetworkConfig(createNetConfig(TEST_STATIC_NETWORK_NAME, staticPortgroup));
      createSpec.setDistro("bigtop");
      createSpec.setDistroVendor(Constants.DEFAULT_VENDOR);
      long jobExecutionId = clusterMgr.createCluster(createSpec);
      ClusterRead cluster =
            clusterMgr.getClusterByName(TEST_STATIC_IP_CLUSTER_NAME, false);
      Assert.assertTrue(
            cluster.getStatus() == ClusterStatus.PROVISIONING,
View Full Code Here

   }

   @Test(groups = { "TestClusteringJobs" }, dependsOnMethods = { "testCreateCluster" })
   @Transactional(propagation = Propagation.NEVER)
   public void testCreateClusterFailed() throws Exception {
      ClusterCreate createSpec =
            ClusterSpecFactory.createDefaultSpec(ClusterType.HDFS_MAPRED,
                  Constants.DEFAULT_VENDOR, null, null);
      createSpec.setName(TEST_DHCP_CLUSTER_NAME);
      createSpec.setNetworkConfig(createNetConfig(TEST_DHCP_NETWORK_NAME, dhcpPortgroup));
      createSpec.setDistro("bigtop");
      NodeGroupCreate worker = createSpec.getNodeGroup("worker");
      worker.setInstanceNum(1);
      long jobExecutionId = clusterMgr.createCluster(createSpec);
      ClusterRead cluster =
            clusterMgr.getClusterByName(TEST_DHCP_CLUSTER_NAME, false);
      Assert.assertTrue(
View Full Code Here

   }

   @Test(groups = { "TestClusteringJobs" }, dependsOnMethods = { "testLimitCluster" })
   @Transactional(propagation = Propagation.NEVER)
   public void testDupCreateCluster() throws Exception {
      ClusterCreate createSpec = new ClusterCreate();
      createSpec.setName(TEST_STATIC_IP_CLUSTER_NAME);
      createSpec.setType(ClusterType.HDFS_MAPRED);
      try {
         clusterMgr.createCluster(createSpec);
         Assert.assertTrue(false, "Cluster creation should throw exception.");
      } catch (Exception e) {
         e.printStackTrace();
View Full Code Here

      return netConfigs;
   }

   @Test(groups = { "TestClusterConfigManager" })
   public void testClusterConfig() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("bigtop");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);
      spec.setType(ClusterType.HDFS_MAPRED);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findClusterById(1l);
      List<ClusterEntity> cs = clusterEntityMgr.findAllClusters();
      for (ClusterEntity c : cs) {
         System.out.println(c.getId());
      }
      cluster = clusterEntityMgr.findByName("my-cluster");
      Assert.assertTrue(cluster != null);
      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(manifest.indexOf("master") != -1,
            "manifest should contains nodegroups");
   }
View Full Code Here

      Map<String, String> hostToRackMap = new TreeMap<String, String>();
      hostToRackMap.put("host1", "rack1");
      hostToRackMap.put("host2", "rack1");
      hostToRackMap.put("host3", "rack1");

      ClusterCreate cluster = new ClusterCreate();
      cluster.setHostToRackMap(hostToRackMap);

      logger.info("generating placement plan: "
            + getJobParameters(chunkContext).getString("cluster.name"));
      putIntoJobExecutionContext(chunkContext, "some-variable-which-need-be-saved", 10);
      putIntoJobExecutionContext(chunkContext, "clusterCreate", cluster);
View Full Code Here

      if ((spec.getType() == null)
            || (spec.getType() != null && spec.isSpecFile())) {
         return spec;
      }

      ClusterCreate newSpec =
            createDefaultSpec(spec.getType(), spec.getDistroVendor(), spec.getDistroVersion(), appManagerType);

      // --name
      if (spec.getName() != null) {
         newSpec.setName(spec.getName());
      }

      //--password
      newSpec.setPassword(spec.getPassword());

      // --appManager
      if(!CommonUtil.isBlank(spec.getAppManager())) {
         newSpec.setAppManager(spec.getAppManager());
      }

      // --locaRepoURL
      if(!CommonUtil.isBlank(spec.getLocalRepoURL())) {
         newSpec.setLocalRepoURL(spec.getLocalRepoURL());
      }

      // --distro
      if (spec.getDistro() != null) {
         newSpec.setDistro(spec.getDistro());
      }

      //vendor
      if (spec.getDistroVendor() != null) {
         newSpec.setDistroVendor(spec.getDistroVendor());
      }

      //version
      if (spec.getDistroVersion() != null) {
         newSpec.setDistroVersion(spec.getDistroVersion());
      }

      // --dsNames
      if (spec.getDsNames() != null) {
         newSpec.setDsNames(spec.getDsNames());
      }

      // --rpNames
      if (spec.getRpNames() != null) {
         newSpec.setRpNames(spec.getRpNames());
      }

      // --networkConfig
      if (spec.getNetworkConfig() != null) {
         newSpec.setNetworkConfig(spec.getNetworkConfig());
      }

      // --topology
      if (spec.getTopologyPolicy() != null) {
         newSpec.setTopologyPolicy(spec.getTopologyPolicy());
      }

      return newSpec;
   }
View Full Code Here

public class AppConfigValidationFactoryTest {
   private ClusterCreate cluster;
   @BeforeMethod
   public void setup(){
      cluster=new ClusterCreate();
      Map<String,Object> hadoopMap=new HashMap<String,Object>();
      Map<String,Object> hadoopFileMap=new HashMap<String,Object>();
      Map<String,Object> corePopertysMap=new HashMap<String,Object>();
      Map<String,Object> hdfsPopertysMap=new HashMap<String,Object>();
      Map<String,Object> mapredPopertysMap=new HashMap<String,Object>();
View Full Code Here

TOP

Related Classes of com.vmware.bdd.apitypes.ClusterCreate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.