Package com.vmware.bdd.apitypes

Examples of com.vmware.bdd.apitypes.NodeGroupCreate


                  + hdfsArray[1] + "\"}}}}";
      Map clusterConfig = (new Gson()).fromJson(clusterConfigJson, Map.class);
      spec.setConfiguration((Map<String, Object>) (clusterConfig
            .get("configuration")));
      //build a master group, a compute node group and a datanode.
      NodeGroupCreate ng0 = new NodeGroupCreate();
      List<String> masterRole = new ArrayList<String>();
      masterRole.add("hadoop_namenode");
      masterRole.add("hadoop_resourcemanager");
      ng0.setRoles(masterRole);
      ng0.setName("master");
      ng0.setInstanceNum(1);
      ng0.setInstanceType(InstanceType.LARGE);
      String ng0ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[2] + "\"}}}}";
      Map ng0Config = (new Gson()).fromJson(ng0ConfigJson, Map.class);
      ng0.setConfiguration((Map<String, Object>) (ng0Config
            .get("configuration")));

      NodeGroupCreate ng1 = new NodeGroupCreate();
      List<String> computeRoles = new ArrayList<String>();
      computeRoles.add("hadoop_nodemanager");
      ng1.setRoles(computeRoles);
      ng1.setName("compute1");
      ng1.setInstanceNum(4);
      ng1.setCpuNum(2);
      ng1.setMemCapacityMB(7500);
      ng1.setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(10);
      ng1.setStorage(storage);
      String ng1ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[3] + "\"}}}}";
      Map ng1Config = (new Gson()).fromJson(ng1ConfigJson, Map.class);
      ng1.setConfiguration((Map<String, Object>) (ng1Config
            .get("configuration")));
      NodeGroupCreate ng2 = new NodeGroupCreate();
      List<String> dataRoles = new ArrayList<String>();
      dataRoles.add("hadoop_datanode");
      ng2.setRoles(dataRoles);
      ng2.setName("data1");
      ng2.setInstanceNum(2);
      ng2.setInstanceType(InstanceType.MEDIUM);
      StorageRead storageCompute = new StorageRead();
      storageCompute.setType("LOCAL");
      storageCompute.setSizeGB(10);
      ng2.setStorage(storageCompute);

      NodeGroupCreate[] ngs = new NodeGroupCreate[] { ng0, ng1, ng2 };
      spec.setNodeGroups(ngs);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);
View Full Code Here


      spec.setDistro("apache");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);

      //build a master group, a datanode group, a compute node group with strict association and tempfs.
      NodeGroupCreate[] ngs = new NodeGroupCreate[3];
      NodeGroupCreate ng0 = new NodeGroupCreate();
      ngs[0] = ng0;
      List<String> masterRoles = new ArrayList<String>();
      masterRoles.add("hadoop_namenode");
      masterRoles.add("hadoop_jobtracker");
      ngs[0].setRoles(masterRoles);
      ngs[0].setName("master");
      ngs[0].setInstanceNum(1);
      ngs[0].setInstanceType(InstanceType.LARGE);

      NodeGroupCreate ng1 = new NodeGroupCreate();
      ngs[1] = ng1;
      List<String> dataNodeRoles = new ArrayList<String>();
      dataNodeRoles.add("hadoop_datanode");
      ngs[1].setRoles(dataNodeRoles);
      ngs[1].setName("data");
      ngs[1].setInstanceNum(4);
      ngs[1].setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(50);
      ngs[1].setStorage(storage);

      NodeGroupCreate ng2 = new NodeGroupCreate();
      ngs[2] = ng2;
      List<String> computeNodeRoles = new ArrayList<String>();
      computeNodeRoles.add("hadoop_tasktracker");
      ngs[2].setRoles(computeNodeRoles);
      ngs[2].setName("compute");
View Full Code Here

      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(10);
      group.setInstanceType(InstanceType.SMALL);
      group.setHaFlag("off");
      group.setName("slave");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_datanode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster1");
View Full Code Here

      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(10);
      group.setInstanceType(InstanceType.SMALL);
      group.setHaFlag("off");
      group.setName("slave");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_tasktracker");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster-slave2");
View Full Code Here

      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster2");
View Full Code Here

      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(10);
      group.setInstanceType(InstanceType.LARGE);
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      try {
         clusterConfigMgr.createClusterConfig(spec);
View Full Code Here

      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[2];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      group = new NodeGroupCreate();
      nodegroups[1] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setName("main_group1");
      roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      try {
         clusterConfigMgr.createClusterConfig(spec);
View Full Code Here

   public GroupRacks getGroupRacks() {
      /*
       * basic rule: if group A is strictly associated with group B, then A should follow
       * B's rack policy
       */
      NodeGroupCreate primary = getPrimaryGroup();
      if (primary.getPlacementPolicies() != null) {
         return primary.getPlacementPolicies().getGroupRacks();
      } else {
         return null;
      }
   }
View Full Code Here

            ClusterSpecFactory.createDefaultSpec(ClusterType.HDFS_MAPRED,
                  Constants.DEFAULT_VENDOR, null, null);
      createSpec.setName(TEST_DHCP_CLUSTER_NAME);
      createSpec.setNetworkConfig(createNetConfig(TEST_DHCP_NETWORK_NAME, dhcpPortgroup));
      createSpec.setDistro("bigtop");
      NodeGroupCreate worker = createSpec.getNodeGroup("worker");
      worker.setInstanceNum(1);
      long jobExecutionId = clusterMgr.createCluster(createSpec);
      ClusterRead cluster =
            clusterMgr.getClusterByName(TEST_DHCP_CLUSTER_NAME, false);
      Assert.assertTrue(
            cluster.getStatus() == ClusterStatus.PROVISIONING,
View Full Code Here

      zookeeperFileMap.put("java.env", zookeeperEnvMap);
      hadoopMap.put("hadoop", hadoopFileMap);
      hadoopMap.put("hbase", hbaseFileMap);
      hadoopMap.put("zookeeper", zookeeperFileMap);
      cluster.setConfiguration(hadoopMap);
      NodeGroupCreate nodeGroup1=new NodeGroupCreate();
      NodeGroupCreate nodeGroup2=new NodeGroupCreate();
      NodeGroupCreate nodeGroup3=new NodeGroupCreate();
      hadoopMap=new HashMap<String,Object>();
      Map<String, Object> zookeeperMap = new HashMap<String,Object>();
      Map<String, Object> noExistingFileZookeeperMap = new HashMap<String,Object>();
      hadoopFileMap=new HashMap<String,Object>();
      corePopertysMap=new HashMap<String,Object>();
      hdfsPopertysMap=new HashMap<String,Object>();
      mapredPopertysMap=new HashMap<String,Object>();
      corePopertysMap.put("hadoop.tmp.dir", "/tmp");
      hdfsPopertysMap.put("dfs.namenode.test.level", 4);
      hdfsPopertysMap.put("dfs.namenode.logger.level", 5);
      mapredPopertysMap.put("mapred.cluster.map.memory.mb",200);
      hadoopFileMap.put("core-site.xml", corePopertysMap);
      hadoopFileMap.put("hdfs-site.xml", hdfsPopertysMap);
      hadoopFileMap.put("mapred-site.xml", mapredPopertysMap);
      hadoopMap.put("hadoop", hadoopFileMap);
      zookeeperMap.put("zookeeper", zookeeperFileMap);
      noExistingFileZookeeperMap.put("zookeeper", hadoopFileMap);
      nodeGroup1.setConfiguration(hadoopMap);
      nodeGroup2.setConfiguration(zookeeperMap);
      nodeGroup3.setConfiguration(noExistingFileZookeeperMap);
      cluster.setNodeGroups(new NodeGroupCreate[]{nodeGroup1, nodeGroup2, nodeGroup3});
   }
View Full Code Here

TOP

Related Classes of com.vmware.bdd.apitypes.NodeGroupCreate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.