Package com.vmware.bdd.apitypes

Examples of com.vmware.bdd.apitypes.NodeGroupRead


      return toNodeGroupRead(false);
   }
  
   // this method should be called inside a transaction
   public NodeGroupRead toNodeGroupRead(boolean ignoreObsoleteNode) {
      NodeGroupRead nodeGroupRead = new NodeGroupRead();
      nodeGroupRead.setName(this.name);
      nodeGroupRead.setCpuNum(this.cpuNum);
      nodeGroupRead.setMemCapacityMB(this.memorySize);
      nodeGroupRead.setSwapRatio(this.swapRatio);
      nodeGroupRead.setInstanceNum(this.getRealInstanceNum(ignoreObsoleteNode));

      Gson gson = new Gson();
      @SuppressWarnings("unchecked")
      List<String> groupRoles = gson.fromJson(roles, List.class);
      nodeGroupRead.setRoles(groupRoles);

      StorageRead storage = new StorageRead();
      storage.setType(this.storageType.toString());
      storage.setSizeGB(this.storageSize);

      // set dsNames/dsNames4Data/dsNames4System
      List<String> datastoreNameList = getVcDatastoreNameList();
      if (datastoreNameList != null && !datastoreNameList.isEmpty())
         storage.setDsNames(datastoreNameList);
      if (getSdDatastoreNameList() != null
            && !getSdDatastoreNameList().isEmpty())
         storage.setDsNames4System(getSdDatastoreNameList());
      if (getDdDatastoreNameList() != null
            && !getDdDatastoreNameList().isEmpty())
         storage.setDsNames4Data(getDdDatastoreNameList());

      nodeGroupRead.setStorage(storage);

      List<NodeRead> nodeList = new ArrayList<NodeRead>();
      for (NodeEntity node : this.nodes) {
         if (ignoreObsoleteNode && (node.isObsoleteNode()
               || node.isDisconnected())) {
            continue;
         }
         nodeList.add(node.toNodeRead(true));
      }
      nodeGroupRead.setInstances(nodeList);

      List<GroupAssociation> associations = new ArrayList<GroupAssociation>();
      for (NodeGroupAssociation relation : groupAssociations) {
         GroupAssociation association = new GroupAssociation();
         association.setReference(relation.getReferencedGroup());
         association.setType(relation.getAssociationType());
         associations.add(association);
      }

      PlacementPolicy policy = new PlacementPolicy();
      policy.setInstancePerHost(instancePerHost);
      policy.setGroupAssociations(associations);
      policy.setGroupRacks(new Gson().fromJson(groupRacks, GroupRacks.class));

      nodeGroupRead.setPlacementPolicies(policy);

      return nodeGroupRead;
   }
View Full Code Here


         // do not throw exception for exporting cluster info
      }

      List<NodeGroupRead> groupList = new ArrayList<NodeGroupRead>();
      for (NodeGroupEntity group : cluster.getNodeGroups()) {
         NodeGroupRead groupRead = group.toNodeGroupRead(ignoreObsoleteNode);
         groupRead.setComputeOnly(false);
         try {
            groupRead.setComputeOnly(softMgr.isComputeOnlyRoles(groupRead
                  .getRoles()));
         } catch (Exception e) {
         }
         groupList.add(groupRead);
      }
View Full Code Here

   @SuppressWarnings("unchecked")
   @Test
   public void testValidateSetManualElasticity() {
      ClusterRead cluster = new ClusterRead();
      cluster.setDistroVendor(Constants.MAPR_VENDOR);
      NodeGroupRead compute = new NodeGroupRead();
      compute.setName("compute");
      compute.setRoles(Arrays.asList("mapr_tasktracker"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(true, cluster.validateSetManualElasticity());
      compute.setRoles(Arrays.asList(
            "mapr_tasktracker",
      "mapr_nfs"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(false,
            cluster.validateSetManualElasticity(Arrays.asList("compute")));
      cluster.setDistroVendor(Constants.APACHE_VENDOR);
      compute.setRoles(Arrays.asList("hadoop_tasktracker"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(true, cluster.validateSetManualElasticity());
      compute.setRoles(Arrays.asList("hadoop_tasktracker",
      "tempfs_client"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(true, cluster.validateSetManualElasticity());
   }
View Full Code Here

   @Test
   public void testValidateSetParamParameters() {
      List<String> roles1 = new LinkedList<String>();
      roles1.add("hadoop_tasktracker");
      NodeGroupRead ngr1 = new NodeGroupRead();
      ngr1.setInstanceNum(6);
      ngr1.setRoles(roles1);
      ngr1.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(ngr1.getRoles()));
      List<NodeGroupRead> nodeGroupRead = new LinkedList<NodeGroupRead>();
      nodeGroupRead.add(ngr1);
      ClusterRead cluster = new ClusterRead();
      cluster.setNodeGroups(nodeGroupRead);
      cluster.setVhmMinNum(-1);
View Full Code Here

        buildReqRespWithoutReqBody("https://127.0.0.1:8443/serengeti/api/task/12", HttpMethod.GET, HttpStatus.OK,
                mapper.writeValueAsString(task));

        ClusterRead cluster = new ClusterRead();
        List<NodeGroupRead> nodeGroups = new ArrayList<NodeGroupRead>();
        NodeGroupRead workerGroup = new NodeGroupRead();
        workerGroup.setName("worker");
        workerGroup.setInstanceNum(1);
        List<NodeRead> instances = new ArrayList<NodeRead>();
        NodeRead instance1 = new NodeRead();
        instance1.setName("worker1");
        instance1.setStatus("PoweredOn");
        instance1.setAction("Getting IP...");
        instances.add(instance1);
        workerGroup.setInstances(instances);
        nodeGroups.add(workerGroup);
        cluster.setNodeGroups(nodeGroups);
        buildReqRespWithoutReqBody("https://127.0.0.1:8443/serengeti/api/cluster/cluster1", HttpMethod.GET,
                HttpStatus.OK, mapper.writeValueAsString(cluster));
View Full Code Here

        roles1.add(Constants.ROLE_HADOOP_NAME_NODE);
        roles1.add(Constants.ROLE_HADOOP_JOB_TRACKER);
        List<String> roles2 = new LinkedList<String>();
        roles2.add(Constants.ROLE_HADOOP_DATANODE);
        roles2.add(Constants.ROLE_HADOOP_TASKTRACKER);
        NodeGroupRead ngr1 = new NodeGroupRead();
        ngr1.setName("NodeGroup1");
        ngr1.setCpuNum(6);
        ngr1.setMemCapacityMB(2048);
        ngr1.setStorage(sr1);
        ngr1.setInstanceNum(1);
        ngr1.setInstances(instances1);
        ngr1.setRoles(roles1);
        NodeGroupRead ngr2 = new NodeGroupRead();
        ngr2.setName("NodeGroup2");
        ngr2.setCpuNum(12);
        ngr2.setMemCapacityMB(2048);
        ngr2.setStorage(sr2);
        ngr2.setInstanceNum(20);
        ngr2.setInstances(instances2);
        ngr2.setRoles(roles2);
        ClusterRead cr1 = new ClusterRead();
        cr1.setName("cluster1");
        cr1.setDistro("distro1");
        cr1.setInstanceNum(10);
        cr1.setStatus(ClusterStatus.RUNNING);
View Full Code Here

      List<NodeRead> instances1 = new LinkedList<NodeRead>();
      instances1.add(nr1);
      List<String> roles1 = new LinkedList<String>();
      roles1.add(Constants.ROLE_HADOOP_NAME_NODE);
      roles1.add(Constants.ROLE_HADOOP_JOB_TRACKER);
      NodeGroupRead ngr1 = new NodeGroupRead();
      ngr1.setName("NodeGroup1");
      ngr1.setCpuNum(6);
      ngr1.setMemCapacityMB(2048);
      ngr1.setStorage(sr1);
      ngr1.setInstanceNum(1);
      ngr1.setInstances(instances1);
      ngr1.setRoles(roles1);
      ClusterRead cr1 = new ClusterRead();
      cr1.setName("cluster1");
      cr1.setDistro("distro1");
      cr1.setInstanceNum(10);
      cr1.setStatus(ClusterStatus.RUNNING);
View Full Code Here

      nr1.setStatus("running");
      List<NodeRead> instances1 = new LinkedList<NodeRead>();
      instances1.add(nr1);
      List<String> roles1 = new LinkedList<String>();
      roles1.add(Constants.ROLE_HADOOP_TASKTRACKER);
      NodeGroupRead ngr1 = new NodeGroupRead();
      ngr1.setName("NodeGroup1");
      ngr1.setCpuNum(6);
      ngr1.setMemCapacityMB(2048);
      ngr1.setStorage(sr1);
      ngr1.setInstanceNum(2);
      ngr1.setInstances(instances1);
      ngr1.setRoles(roles1);
      ClusterRead cr1 = new ClusterRead();
      cr1.setName("cluster1");
      cr1.setDistroVendor("BIGTOP");
      cr1.setDistro("distro1");
      cr1.setInstanceNum(10);
      cr1.setVhmMinNum(-1);
      cr1.setVhmMaxNum(-1);
      cr1.setStatus(ClusterStatus.RUNNING);
      List<NodeGroupRead> nodeGroupRead = new LinkedList<NodeGroupRead>();
      nodeGroupRead.add(ngr1);
      List<String> roles2 = new LinkedList<String>();
      roles2.add(Constants.ROLE_HADOOP_CLIENT);
      NodeGroupRead ngr2 = new NodeGroupRead();
      ngr2.setName("NodeGroup2");
      ngr2.setCpuNum(6);
      ngr2.setMemCapacityMB(2048);
      ngr2.setStorage(sr1);
      ngr2.setInstanceNum(1);
      ngr2.setInstances(instances1);
      ngr2.setRoles(roles2);
      nodeGroupRead.add(ngr2);
      cr1.setNodeGroups(nodeGroupRead);
      cr1.setAutomationEnable(false);

View Full Code Here

       List<NodeRead> instances1 = new LinkedList<NodeRead>();
       instances1.add(nr1);
       List<String> roles1 = new LinkedList<String>();
       roles1.add(Constants.ROLE_HADOOP_NAME_NODE);
       roles1.add(Constants.ROLE_HADOOP_JOB_TRACKER);
       NodeGroupRead ngr1 = new NodeGroupRead();
       ngr1.setName("NodeGroup1");
       ngr1.setCpuNum(6);
       ngr1.setMemCapacityMB(2048);
       ngr1.setStorage(sr1);
       ngr1.setInstanceNum(1);
       ngr1.setInstances(instances1);
       ngr1.setRoles(roles1);
       ClusterRead cr1 = new ClusterRead();
       cr1.setName("cluster1");
       cr1.setDistro("distro1");
       cr1.setInstanceNum(10);
       cr1.setStatus(ClusterStatus.RUNNING);
View Full Code Here

       List<NodeRead> instances1 = new LinkedList<NodeRead>();
       instances1.add(nr1);
       List<String> roles1 = new LinkedList<String>();
       roles1.add(Constants.ROLE_HADOOP_NAME_NODE);
       roles1.add(Constants.ROLE_HADOOP_JOB_TRACKER);
       NodeGroupRead ngr1 = new NodeGroupRead();
       ngr1.setName("NodeGroup1");
       ngr1.setCpuNum(6);
       ngr1.setMemCapacityMB(2048);
       ngr1.setStorage(sr1);
       ngr1.setInstanceNum(1);
       ngr1.setInstances(instances1);
       ngr1.setRoles(roles1);
       ClusterRead cr1 = new ClusterRead();
       cr1.setName("cluster1");
       cr1.setDistro("distro1");
       cr1.setInstanceNum(10);
       cr1.setStatus(ClusterStatus.RUNNING);
View Full Code Here

TOP

Related Classes of com.vmware.bdd.apitypes.NodeGroupRead

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.