Examples of ClusterCreate


Examples of com.vmware.bdd.apitypes.ClusterCreate

      vmSchema.resourceSchema = new ResourceSchema();
      return vmSchema;
   }

   private ClusterCreate createClusterSpec() {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("test");
      NodeGroupCreate[] nodeGroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      group.setVmFolderPath("root/test/master");
      group.setName("master");
      nodeGroups[0] = group;
      spec.setNodeGroups(nodeGroups);
      return spec;
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

//   @Test(groups = { "TestClusteringService" }, dependsOnMethods = { "testCreateDhcpVmCreateVmFail" })
   public void testCreateDhcpVmCreateVmPass() throws Exception {
      List<NetworkAdd> networkAdds = createNetworkAdd();
      List<BaseNode> vNodes = new ArrayList<BaseNode>();
      BaseNode node = new BaseNode("test-master-0");
      ClusterCreate spec = createClusterSpec();
      node.setCluster(spec);
      node.setNodeGroup(spec.getNodeGroup("master"));
      node.setTargetVcCluster("cluster-ws");
      vNodes.add(node);
      // create vm schema
      VmSchema vmSchema = createVmSchema();
      node.setVmSchema(vmSchema);
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   @Test
   public void testDataFromFileWithUTF8() throws Exception {
      final String specFilePath = "src/test/resources/hadoop_cluster_cn.json";
      try {
         ClusterCreate clusterSpec =
               CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                     CommandsUtils.dataFromFile(specFilePath));
         NodeGroupCreate[] nodeGroups = clusterSpec.getNodeGroups();
         assertEquals(nodeGroups.length, 3);
         assertEquals(nodeGroups[0].getName(), "主节点");
         assertEquals(nodeGroups[1].getName(), "协作节点");
         assertEquals(nodeGroups[2].getName(), "客户端");
      } catch (Exception ex) {
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   @Test
   public void testPrettyJsonOutputWithUTF8() throws Exception {
      final String specFilePath = "src/test/resources/hadoop_cluster_cn.json";
      final String exportFilePath =
            "src/test/resources/hadoop_cluster_cn_export.json";
      ClusterCreate clusterSpec =
            CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                  CommandsUtils.dataFromFile(specFilePath));
      CommandsUtils.prettyJsonOutput(clusterSpec, exportFilePath);
      File exportFile = new File(exportFilePath);
      assertTrue(exportFile.exists());
      ClusterCreate exportClusterSpec =
            CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                  CommandsUtils.dataFromFile(exportFilePath));
      NodeGroupCreate[] nodeGroups = exportClusterSpec.getNodeGroups();
      assertEquals(nodeGroups.length, 3);
      assertEquals(nodeGroups[0].getName(), "主节点");
      assertEquals(nodeGroups[1].getName(), "协作节点");
      assertEquals(nodeGroups[2].getName(), "客户端");
      exportFile.delete();
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

            .append(" \"hadoop-env.sh\" : {   ")
            .append(" \"JAVA_HOME\": \"/path/to/javahome\" ").append(" }, ")
            .append("\"log4j.properties\" : {              ")
            .append("\"hadoop.root.logger\": \"DEBUG,console\" ")
            .append("  } ").append("}  ").append("} ").append("}");
      ClusterCreate clusterCreate =
            CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                  jsonBuff.toString());
      assertNotNull(clusterCreate);
      Map<String, Object> hadoopConfig =
            (Map<String, Object>) clusterCreate.getConfiguration()
                  .get("hadoop");
      Map<String, Object> coreSiteConfig =
            (Map<String, Object>) hadoopConfig.get("core-site.xml");
      assertEquals(coreSiteConfig.get("fs.default.name"),
            "hdfs://fqdn_or_ip:8020");
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

      Mockito.when(ngEntity.getName()).thenReturn(GROUP_NAME);
     
      Mockito.when(nodeEntity.getNodeGroup()).thenReturn(ngEntity);     
     
      ClusterConfigManager clusterConfigMgr = Mockito.mock(ClusterConfigManager.class);
      Mockito.when(clusterConfigMgr.getClusterConfig(CLUSTER_NAME)).thenReturn(new ClusterCreate());
      scaleService.setClusterConfigMgr(clusterConfigMgr);
     
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

      //      Assert.assertTrue("manifest is inconsistent",
      //            manifest.indexOf("{\"name\":\"my-cluster4\",\"groups\":[{\"name\":\"master\",\"roles\":[\"hadoop_namenode\",\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster4/master\"},{\"name\":\"worker\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":3,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster4/worker\"},{\"name\":\"client\",\"roles\":[\"hadoop_client\",\"pig\",\"hive\",\"hive_server\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster4/client\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1);
   }

   public void testClusterConfigWithGroupStorage() {
      ClusterCreate spec = new ClusterCreate();
      spec.setNetworkConfig(createNetConfigs());
      spec.setName("my-cluster5");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);
      StorageRead storage = new StorageRead();
      storage.setSizeGB(50);
      storage.setType(DatastoreType.LOCAL.toString());
      group.setStorage(storage);
      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster5");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster5");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
            manifest.indexOf("main_group") != -1
                  && manifest.indexOf("expanded_master") != -1
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

                  .indexOf("{\"name\":\"my-cluster5\",\"groups\":[{\"name\":\"main_group\",\"roles\":[\"hadoop_namenode\"],\"instance_num\":1,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":3,\"memory\":15000,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster5/main_group\"},{\"name\":\"expanded_master\",\"roles\":[\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster5/expanded_master\"},{\"name\":\"expanded_worker\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":3,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster5/expanded_worker\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp2\"]},{\"name\":\"cluster2\",\"vc_rps\":[\"rp1\",\"rp2\"]},{\"name\":\"cluster4\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1,
            "manifest is inconsistent");
   }

   public void testClusterConfigWithGroupStoragePattern() {
      ClusterCreate spec = new ClusterCreate();
      spec.setNetworkConfig(createNetConfigs());
      spec.setName("my-cluster6");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);
      StorageRead storage = new StorageRead();
      storage.setType(DatastoreType.LOCAL.toString());
      List<String> dsNames = new ArrayList<String>();
      dsNames.add("testSharedStore");
      dsNames.add("testLocalStore");
      storage.setDsNames(dsNames);
      group.setStorage(storage);
      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster6");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster6");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
            manifest.indexOf("main_group") != -1
                  && manifest.indexOf("expanded_master") != -1
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

                  .indexOf("{\"name\":\"my-cluster6\",\"groups\":[{\"name\":\"main_group\",\"roles\":[\"hadoop_namenode\"],\"instance_num\":1,\"storage\":{\"type\":\"local\",\"size\":100,\"name_pattern\":[\"vmfs*\",\"local1\"]},\"cpu\":3,\"memory\":15000,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster6/main_group\"},{\"name\":\"expanded_master\",\"roles\":[\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster6/expanded_master\"},{\"name\":\"expanded_worker\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":3,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster6/expanded_worker\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp2\"]},{\"name\":\"cluster2\",\"vc_rps\":[\"rp1\",\"rp2\"]},{\"name\":\"cluster4\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1,
            "manifest is inconsistent");
   }

   public void testClusterConfigWithNoSlave() {
      ClusterCreate spec = new ClusterCreate();
      spec.setNetworkConfig(createNetConfigs());
      spec.setName("my-cluster7");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      roles.add("hadoop_jobtracker");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster7");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster7");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
            manifest.indexOf("main_group") != -1
                  && manifest.indexOf("expanded_worker") != -1,
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   }

   @SuppressWarnings({ "unchecked", "rawtypes" })
   @Test(groups = { "TestClusterConfigManager" })
   public void testClusterAppConfig() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster8");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setType(ClusterType.HDFS_MAPRED);
      spec.setDistro("bigtop");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      spec.setType(null);
      String configJson =
            "{\"cluster_configuration\":{\"hadoop\":{\"core-site.xml\":{\"hadoop.security.group.mapping\":\"xyz\",\"hadoop.security.authorization\":true}}}}";
      Map config = (new Gson()).fromJson(configJson, Map.class);
      spec.setConfiguration((Map<String, Object>) (config
            .get("cluster_configuration")));
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findClusterById(1l);
      List<ClusterEntity> cs = clusterEntityMgr.findAllClusters();
      for (ClusterEntity c : cs) {
         System.out.println(c.getId());
      }
      cluster = clusterEntityMgr.findByName("my-cluster8");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster8");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(manifest.indexOf("master") != -1,
            "manifest should contains nodegroups");
      Assert.assertTrue(
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.