Examples of ClusterCreate


Examples of com.vmware.bdd.apitypes.ClusterCreate

         return;
      }
      try {
         ClusterRead clusterRead = restClient.get(name, false);
         // build ClusterCreate object
         ClusterCreate clusterConfig = new ClusterCreate();
         clusterConfig.setName(clusterRead.getName());
         ClusterCreate clusterSpec =
               CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                     CommandsUtils.dataFromFile(specFilePath));
         clusterConfig.setNodeGroups(clusterSpec.getNodeGroups());
         clusterConfig.setConfiguration(clusterSpec.getConfiguration());
         clusterConfig.setExternalHDFS(clusterSpec.getExternalHDFS());
         List<String> warningMsgList = new ArrayList<String>();
         List<String> failedMsgList = new ArrayList<String>();
         validateConfiguration(clusterConfig, skipConfigValidation,
               warningMsgList, failedMsgList);
         // add a confirm message for running job
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

      NodeGroupCreate nodeGroup = new NodeGroupCreate();
      nodeGroup.setName(NODE_GROUP_NAME);
      nodeGroup.setStorage(new StorageRead());
      NodeGroupCreate[] nodeGroups = new NodeGroupCreate[] { nodeGroup };

      ClusterCreate spec = new ClusterCreate();
      spec.setName(CLUSTER_NAME);
      spec.setNodeGroups(nodeGroups);
      Set<String> patterns = new HashSet<String>();
      patterns.add(LOCAL_STORE_PATTERN);
      spec.setLocalDatastorePattern(patterns);

      Mockito.when(configMgr.getClusterConfig(CLUSTER_NAME)).thenReturn(spec);
      service.setConfigMgr(configMgr);
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   @Test(groups = { "TestClusteringService" }, dependsOnMethods = { "testReserveResource" })
   public void testCreateDhcpVmFolderFailed() {
      List<NetworkAdd> networkAdds = createNetworkAdd();
      List<BaseNode> vNodes = new ArrayList<BaseNode>();
      BaseNode node = new BaseNode("test-master-0");
      ClusterCreate spec = createClusterSpec();
      node.setCluster(spec);
      VmSchema vmSchema = createVmSchema();
      node.setVmSchema(vmSchema);
      vNodes.add(node);
      MockTmScheduler.setFlag(VmOperation.CREATE_FOLDER, false);
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   @Test(groups = { "TestClusteringService" }, dependsOnMethods = { "testCreateDhcpVmFolderFailed" })
   public void testCreateDhcpVmNullResult() {
      List<NetworkAdd> networkAdds = createNetworkAdd();
      List<BaseNode> vNodes = new ArrayList<BaseNode>();
      BaseNode node = new BaseNode("test-master-0");
      ClusterCreate spec = createClusterSpec();
      node.setCluster(spec);
      VmSchema vmSchema = createVmSchema();
      node.setVmSchema(vmSchema);
      vNodes.add(node);
      MockTmScheduler.setFlag(VmOperation.CREATE_FOLDER, true);
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   public void testCreateDhcpVmCreateVmFail() throws Exception {
      List<NetworkAdd> networkAdds = createNetworkAdd();
      List<BaseNode> vNodes = new ArrayList<BaseNode>();
      BaseNode node = new BaseNode("test-master-0");
      // create cluster spec
      ClusterCreate spec = createClusterSpec();
      node.setCluster(spec);
      node.setNodeGroup(spec.getNodeGroup("master"));
      node.setTargetVcCluster("cluster-ws");
      node.setVmMobId("test-vm");
      vNodes.add(node);
      // create vm schema
      VmSchema vmSchema = createVmSchema();
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

            "manifest should contains nodegroups");
   }

   @Test(groups = { "TestClusterConfigManager" })
   public void testCDHMapReduceV2CreateDefaultSpec() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("cdh4");
      spec.setDistroVendor(Constants.CDH_VENDOR);
      spec.setDistroVersion("4.4.0");
      spec.setType(ClusterType.HDFS_MAPRED);
      ClusterCreate newSpec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      Assert.assertTrue(newSpec.getNodeGroups().length == 3);
      List<String> masterRoles = newSpec.getNodeGroups()[0].getRoles();
      Assert.assertTrue(
            masterRoles.contains(HadoopRole.HADOOP_JOBTRACKER_ROLE.toString()),
            "expected role " + HadoopRole.HADOOP_JOBTRACKER_ROLE.toString()
            + ", but got " + masterRoles);

      spec.setDistro("cdh5");
      spec.setDistroVersion("5.0.0");
      newSpec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      Assert.assertTrue(newSpec.getNodeGroups().length == 3);
      masterRoles = newSpec.getNodeGroups()[0].getRoles();
      Assert.assertTrue(
            masterRoles.contains(HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()),
            "expected role " + HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()
            + ", but got " + masterRoles);
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

            + ", but got " + masterRoles);
   }

   @Test(groups = { "TestClusterConfigManager" })
   public void testHDPMapReduceV2CreateDefaultSpec() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("hdp1");
      spec.setDistroVendor(Constants.HDP_VENDOR);
      spec.setDistroVersion("1.3");
      spec.setType(ClusterType.HDFS_MAPRED);
      ClusterCreate newSpec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      Assert.assertTrue(newSpec.getNodeGroups().length == 3);
      List<String> masterRoles = newSpec.getNodeGroups()[0].getRoles();
      Assert.assertTrue(
            masterRoles.contains(HadoopRole.HADOOP_JOBTRACKER_ROLE.toString()),
            "expected role " + HadoopRole.HADOOP_JOBTRACKER_ROLE.toString()
            + ", but got " + masterRoles);

      spec.setDistro("hdp2");
      spec.setDistroVersion("2.0");
      newSpec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      Assert.assertTrue(newSpec.getNodeGroups().length == 3);
      masterRoles = newSpec.getNodeGroups()[0].getRoles();
      Assert.assertTrue(
            masterRoles.contains(HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()),
            "expected role " + HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()
            + ", but got " + masterRoles);
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

            + ", but got " + masterRoles);
   }

   @Test(groups = { "TestClusterConfigManager" })
   public void testBigTopMapReduceV2CreateDefaultSpec() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("bigtop");
      spec.setDistroVendor(Constants.BIGTOP_VENDOR);
      spec.setDistroVersion("0.7");
      spec.setType(ClusterType.HDFS_MAPRED);
      ClusterCreate newSpec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      Assert.assertTrue(newSpec.getNodeGroups().length == 3);
      List<String> masterRoles = newSpec.getNodeGroups()[0].getRoles();
      Assert.assertTrue(
            masterRoles.contains(HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()),
            "expected role " + HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()
            + ", but got " + masterRoles);
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

            + ", but got " + masterRoles);
   }

   @Test(groups = { "TestClusterConfigManager" })
   public void testIntelMapReduceV2CreateDefaultSpec() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("intel2");
      spec.setDistroVendor(Constants.INTEL_VENDOR);
      spec.setDistroVersion("2.6");
      spec.setType(ClusterType.HDFS_MAPRED);
      ClusterCreate newSpec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      Assert.assertTrue(newSpec.getNodeGroups().length == 3);
      List<String> masterRoles = newSpec.getNodeGroups()[0].getRoles();
      Assert.assertTrue(
            masterRoles.contains(HadoopRole.HADOOP_JOBTRACKER_ROLE.toString()),
            "expected role " + HadoopRole.HADOOP_JOBTRACKER_ROLE.toString()
            + ", but got " + masterRoles);

      spec.setDistro("intel3");
      spec.setDistroVersion("3.1");
      newSpec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      Assert.assertTrue(newSpec.getNodeGroups().length == 3);
      masterRoles = newSpec.getNodeGroups()[0].getRoles();
      Assert.assertTrue(
            masterRoles.contains(HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()),
            "expected role " + HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()
            + ", but got " + masterRoles);
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   public void testClusterConfigWithExternalHDFS() throws Exception {
      String[] hdfsArray =
            new String[] { "hdfs://168.192.0.70:8020",
                  "hdfs://168.192.0.71:8020", "hdfs://168.192.0.72:8020",
                  "hdfs://168.192.0.73:8020" };
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster-external-hdfs");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("bigtop");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);
      spec.setExternalHDFS(hdfsArray[0]);
      String clusterConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[1] + "\"}}}}";
      Map clusterConfig = (new Gson()).fromJson(clusterConfigJson, Map.class);
      spec.setConfiguration((Map<String, Object>) (clusterConfig
            .get("configuration")));
      //build a jobtracker group, two compute node groups.
      NodeGroupCreate ng0 = new NodeGroupCreate();
      List<String> computerMasterRoles = new ArrayList<String>();
      computerMasterRoles.add("hadoop_resourcemanager");
      ng0.setRoles(computerMasterRoles);
      ng0.setName("resourcemanager");
      ng0.setInstanceNum(1);
      ng0.setInstanceType(InstanceType.LARGE);
      String ng0ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[2] + "\"}}}}";
      Map ng0Config = (new Gson()).fromJson(ng0ConfigJson, Map.class);
      ng0.setConfiguration((Map<String, Object>) (ng0Config
            .get("configuration")));

      NodeGroupCreate ng1 = new NodeGroupCreate();
      List<String> computeWorkerRoles = new ArrayList<String>();
      computeWorkerRoles.add("hadoop_nodemanager");
      ng1.setRoles(computeWorkerRoles);
      ng1.setName("compute1");
      ng1.setInstanceNum(4);
      ng1.setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(10);
      ng1.setStorage(storage);
      String ng1ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[3] + "\"}}}}";
      Map ng1Config = (new Gson()).fromJson(ng1ConfigJson, Map.class);
      ng1.setConfiguration((Map<String, Object>) (ng1Config
            .get("configuration")));
      NodeGroupCreate ng2 = new NodeGroupCreate();
      ng2.setRoles(computeWorkerRoles);
      ng2.setName("compute2");
      ng2.setInstanceNum(2);
      ng2.setInstanceType(InstanceType.MEDIUM);
      StorageRead storageCompute = new StorageRead();
      storageCompute.setType("LOCAL");
      storageCompute.setSizeGB(10);
      ng2.setStorage(storageCompute);

      NodeGroupCreate[] ngs = new NodeGroupCreate[] { ng0, ng1, ng2 };
      spec.setNodeGroups(ngs);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findClusterById(1l);
      List<ClusterEntity> cs = clusterEntityMgr.findAllClusters();
      for (ClusterEntity c : cs) {
         System.out.println(c.getId());
      }
      cluster = clusterEntityMgr.findByName("my-cluster-external-hdfs");
      Assert.assertTrue(cluster != null);
      Assert.assertEquals(cluster.getAdvancedProperties(), "{\"ExternalHDFS\":\"hdfs://168.192.0.70:8020\"}");
      ClusterRead clusterRead = clusterEntityMgr.toClusterRead("my-cluster-external-hdfs");
      Assert.assertEquals(clusterRead.getExternalHDFS(), "hdfs://168.192.0.70:8020");

      ClusterCreate attrs =
            clusterConfigMgr.getClusterConfig("my-cluster-external-hdfs");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
            Pattern.compile("([\\s\\S]*" + hdfsArray[0] + "[\\s\\S]*){3}")
 
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.