Package com.vmware.bdd.software.mgmt.plugin.model

Examples of com.vmware.bdd.software.mgmt.plugin.model.NodeGroupInfo


   private void sortNodeGroups(ClusterCreate cluster, ClusterBlueprint blueprint) {
      NodeGroupCreate[] sortedGroups =
            new NodeGroupCreate[cluster.getNodeGroups().length];
      for (int i = 0; i < blueprint.getNodeGroups().size(); i++) {
         NodeGroupInfo groupInfo = blueprint.getNodeGroups().get(i);
         if (cluster.getNodeGroups()[i].getName().equals(groupInfo.getName())) {
            // to save query time
            sortedGroups[i] = cluster.getNodeGroups()[i];
         }
         sortedGroups[i] = cluster.getNodeGroup(groupInfo.getName());
      }
      cluster.setNodeGroups(sortedGroups);
   }
View Full Code Here


         ClusterBlueprint blueprint) {
      cluster.setConfiguration(blueprint.getConfiguration());
      sortNodeGroups(cluster, blueprint);
      // as we've sorted node groups, so here we can assume node group are in same location in the array.
      for (int i = 0; i < blueprint.getNodeGroups().size(); i++) {
         NodeGroupInfo group = blueprint.getNodeGroups().get(i);
         NodeGroupCreate groupCreate = cluster.getNodeGroups()[i];
         groupCreate.setConfiguration(group.getConfiguration());
         groupCreate.setRoles(group.getRoles());
         groupCreate.setInstanceType(group.getInstanceType());
         groupCreate.setPlacementPolicies(group.getPlacement());
         if (groupCreate.getStorage() == null) {
            groupCreate.setStorage(new StorageRead());
         }
         groupCreate.getStorage().setSizeGB(group.getStorageSize());
      }
      cluster.setExternalHDFS(blueprint.getExternalHDFS());
      cluster.setExternalMapReduce(blueprint.getExternalMapReduce());
   }
View Full Code Here

      blueprint.setHadoopStack(hadoopStack);

      // set nodes/nodegroups
      List<NodeGroupInfo> nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      for (NodeGroupEntity group : clusterEntity.getNodeGroups()) {
         NodeGroupInfo nodeGroupInfo = toNodeGroupInfo(group);
         nodeGroupInfos.add(nodeGroupInfo);
      }
      blueprint.setNodeGroups(nodeGroupInfos);
      return blueprint;
   }
View Full Code Here

      return blueprint;
   }

   private NodeGroupInfo toNodeGroupInfo(NodeGroupEntity group) {
      Gson gson = new Gson();
      NodeGroupInfo nodeGroupInfo = new NodeGroupInfo();
      nodeGroupInfo.setName(group.getName());
      nodeGroupInfo.setInstanceNum(group.getRealInstanceNum(true));
      nodeGroupInfo.setRoles(gson.fromJson(group.getRoles(), List.class));
      if (group.getHadoopConfig() != null) {
         Map<String, Object> groupConfigs =
               gson.fromJson(group.getHadoopConfig(), Map.class);
         nodeGroupInfo.setConfiguration(groupConfigs);
      }
      if (group.getHaFlag().equalsIgnoreCase(Constants.HA_FLAG_FT)
            || group.getHaFlag().equalsIgnoreCase(Constants.HA_FLAG_ON)) {
         nodeGroupInfo.setHaEnabled(true);
      }
      nodeGroupInfo.setInstanceType(group.getNodeType());
      nodeGroupInfo.setStorageSize(group.getStorageSize());
      nodeGroupInfo.setStorageType(group.getStorageType().name());

      // set nodes
      List<NodeInfo> nodeInfos = new ArrayList<NodeInfo>();
      for (NodeEntity node : group.getNodes()) {
         NodeInfo nodeInfo = new NodeInfo();
         nodeInfo.setName(node.getVmName());
         nodeInfo.setHostname(node.getGuestHostName());
         nodeInfo.setIpConfigs(node.convertToIpConfigInfo());
         nodeInfo.setRack(node.getRack());
         nodeInfo.setVolumes(node.getDataVolumnsMountPoint());
         nodeInfos.add(nodeInfo);
      }

      nodeGroupInfo.setNodes(nodeInfos);
      return nodeGroupInfo;

   }
View Full Code Here

   public void testValidateRoleDependency() {
      ClusterBlueprint blueprint = new ClusterBlueprint();
      List<String> failedMsgList = new ArrayList<String>();
      assertEquals(false, validator.validateRoleDependency(failedMsgList, blueprint));

      NodeGroupInfo compute = new NodeGroupInfo();
      NodeGroupInfo data = new NodeGroupInfo();
      List<NodeGroupInfo> nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      nodeGroupInfos.add(compute);
      nodeGroupInfos.add(data);
      blueprint.setNodeGroups(nodeGroupInfos);
      assertEquals(false, validator.validateRoleDependency(failedMsgList, blueprint));
      assertEquals(2, failedMsgList.size());
      failedMsgList.clear();
      blueprint.setExternalHDFS("hdfs://192.168.0.2:9000");
      compute.setRoles(Arrays.asList(HadoopRole.HADOOP_TASKTRACKER.toString()));
      data.setRoles(Arrays.asList(HadoopRole.HADOOP_DATANODE.toString()));
      assertEquals(false, validator.validateRoleDependency(failedMsgList, blueprint));
      assertEquals(2, failedMsgList.size());
      assertEquals("Duplicate NameNode or DataNode role.", failedMsgList.get(0));
      assertEquals("Missing JobTracker or ResourceManager role.",
            failedMsgList.get(1));
      failedMsgList.clear();
      blueprint.setExternalHDFS("");
      nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      nodeGroupInfos.add(compute);
      blueprint.setNodeGroups(nodeGroupInfos);
      assertEquals(false, validator.validateRoleDependency(failedMsgList, blueprint));
      assertEquals(1, failedMsgList.size());
      assertEquals("Missing role(s): hadoop_jobtracker for service: MAPRED.", failedMsgList.get(0));
      failedMsgList.clear();
      NodeGroupInfo master = new NodeGroupInfo();
      master.setRoles(Arrays.asList(HadoopRole.HADOOP_JOBTRACKER_ROLE
            .toString()));
      nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      nodeGroupInfos.add(master);
      nodeGroupInfos.add(compute);
      blueprint.setNodeGroups(nodeGroupInfos);
View Full Code Here

      List<String> hadpnets = new ArrayList<String>();
      hadpnets.add("nw2");
      networkConfig.put(NetTrafficType.HDFS_NETWORK, hadpnets);

      NodeGroupInfo master = new NodeGroupInfo();
      master.setName("master");
      master.setInstanceNum(1);
      master.setRoles(Arrays.asList(HadoopRole.HADOOP_NAMENODE_ROLE.toString(),
            HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()));
      NodeGroupInfo worker = new NodeGroupInfo();
      worker.setName("worker");
      worker.setRoles(Arrays.asList(HadoopRole.HADOOP_DATANODE.toString(),
            HadoopRole.HADOOP_NODEMANAGER_ROLE.toString()));
      worker.setInstanceNum(0);
      NodeGroupInfo client = new NodeGroupInfo();
      client.setName("client");
      client.setInstanceNum(0);
      client.setRoles(Arrays.asList(HadoopRole.HADOOP_CLIENT_ROLE.toString(),
            HadoopRole.HIVE_SERVER_ROLE.toString(),
            HadoopRole.HIVE_ROLE.toString()));
      List<String> failedMsgList = new ArrayList<String>();
      List<String> warningMsgList = new ArrayList<String>();
      List<NodeGroupInfo> groups = new ArrayList<NodeGroupInfo>();
View Full Code Here

   public void testHasComputeMasterGroup() {
      ClusterBlueprint blueprint = new ClusterBlueprint();
      HadoopStack hadoopStack = new HadoopStack();
      hadoopStack.setVendor(Constants.DEFAULT_VENDOR);
      blueprint.setHadoopStack(hadoopStack);
      NodeGroupInfo compute = new NodeGroupInfo();
      compute.setRoles(Arrays.asList(HadoopRole.HADOOP_NODEMANAGER_ROLE.toString()));
      List<NodeGroupInfo> nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      nodeGroupInfos.add(compute);
      blueprint.setNodeGroups(nodeGroupInfos);
      assertFalse(defaultSoftwareManager.hasComputeMasterGroup(blueprint));
      NodeGroupInfo master = new NodeGroupInfo();
      master.setRoles(Arrays.asList(HadoopRole.HADOOP_RESOURCEMANAGER_ROLE.toString()));
      nodeGroupInfos.add(master);
      blueprint.setNodeGroups(nodeGroupInfos);
      assertTrue(defaultSoftwareManager.hasComputeMasterGroup(blueprint));
   }
View Full Code Here

      hadoopStack.setDistro("CDH-5.0.2");
      blueprint.setHadoopStack(hadoopStack);

      List<NodeGroupInfo> groups = new ArrayList<NodeGroupInfo>();

      NodeGroupInfo group01 = new NodeGroupInfo();
      group01.setName("master");
      List<String> roles01 = new ArrayList<String>();
      roles01.add("HDFS_NAMENODE");
      roles01.add("HDFS_SECONDARY_NAMENODE");
      roles01.add("YARN_RESOURCE_MANAGER");
      roles01.add("YARN_JOB_HISTORY");
      group01.setRoles(roles01);
      group01.setInstanceNum(1);
      Map<String, Object> configs = new HashMap<String, Object>();

      Map<String, String> nnConfig = new HashMap<String, String>();
      nnConfig.put("namenode_java_heapsize", "1024");
      configs.put("HDFS_NAMENODE", nnConfig);

      Map<String, String> snnConfig = new HashMap<String, String>();
      snnConfig.put("secondary_namenode_java_heapsize", "1024");
      configs.put("HDFS_SECONDARY_NAMENODE", snnConfig);
      group01.setConfiguration(configs);

      NodeInfo node01 = new NodeInfo();
      node01.setRack("/rack01");
      List<NodeInfo> nodes01 = new ArrayList<>();
      nodes01.add(node01);
      group01.setNodes(nodes01);

      NodeGroupInfo group02 = new NodeGroupInfo();
      group02.setName("worker");
      List<String> roles02 = new ArrayList<>();
      roles02.add("HDFS_DATANODE");
      roles02.add("YARN_NODE_MANAGER");
      group02.setRoles(roles02);

      Map<String, Object> configs02 = new HashMap<String, Object>();
      Map<String, String> dnConfig = new HashMap<String, String>();
      dnConfig.put("dfs_datanode_failed_volumes_tolerated", "2");
      configs02.put("HDFS_DATANODE", dnConfig);
      group02.setConfiguration(configs02);

      NodeInfo node02 = new NodeInfo();
      node02.setRack("/rack02");
      List<NodeInfo> nodes02 = new ArrayList<>();
      nodes02.add(node02);
      group02.setNodes(nodes02);
      group02.setInstanceNum(3);

      groups.add(group01);
      groups.add(group02);
      blueprint.setNodeGroups(groups);
View Full Code Here

      this.configuration = group.configuration;
      this.vmFolderPath = group.vmFolderPath;
   }

   public NodeGroupInfo toNodeGroupInfo() {
      NodeGroupInfo nodeGroupInfo = new NodeGroupInfo();
      nodeGroupInfo.setName(name);
      nodeGroupInfo.setInstanceNum(instanceNum);
      nodeGroupInfo.setRoles(roles);
      nodeGroupInfo.setConfiguration(configuration);
      if (haFlag != null
            && (haFlag.equalsIgnoreCase(Constants.HA_FLAG_FT) || haFlag
                  .equalsIgnoreCase(Constants.HA_FLAG_ON))) {
         nodeGroupInfo.setHaEnabled(true);
      }
      nodeGroupInfo.setInstanceType(instanceType);
      nodeGroupInfo.setPlacement(placementPolicies);
      if (storage != null) {
         nodeGroupInfo.setStorageSize(storage.getSizeGB());
         nodeGroupInfo.setStorageType(storage.getType());
      }
      nodeGroupInfo.setNodes(null);
      return nodeGroupInfo;
   }
View Full Code Here

      // set nodes/nodegroups
      List<NodeGroupInfo> nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      if (nodeGroups != null) {
         for (NodeGroupCreate group : nodeGroups) {
            NodeGroupInfo nodeGroupInfo = group.toNodeGroupInfo();
            nodeGroupInfos.add(nodeGroupInfo);
         }
      }
      blueprint.setNodeGroups(nodeGroupInfos);
      return blueprint;
View Full Code Here

TOP

Related Classes of com.vmware.bdd.software.mgmt.plugin.model.NodeGroupInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.