Package com.vmware.bdd.apitypes

Examples of com.vmware.bdd.apitypes.ClusterCreate


   }

   @Override
   public List<DiskSpec> getReplacementDisks(String clusterName,
         String groupName, String nodeName, List<DiskSpec> badDisks) {
      ClusterCreate spec = configMgr.getClusterConfig(clusterName);

      NodeEntity nodeEntity =
            clusterEntityMgr.findByName(clusterName, groupName, nodeName);
      VcHost targetHost = VcResourceUtils.findHost(nodeEntity.getHostName());
View Full Code Here


   @Override
   @SuppressWarnings("unchecked")
   public VcVirtualMachine createReplacementVm(String clusterName,
         String groupName, String nodeName, List<DiskSpec> replacementDisks) {
      ClusterCreate spec = configMgr.getClusterConfig(clusterName);
      NodeEntity node =
            clusterEntityMgr.findByName(spec.getName(), groupName, nodeName);

      // replace bad disks with fixing disk, combining as a new disk set
      List<DiskSpec> fullDiskSet = new ArrayList<DiskSpec>();
      for (DiskEntity disk : clusterEntityMgr.getDisks(nodeName)) {
         fullDiskSet.add(disk.toDiskSpec());
View Full Code Here

      StatusUpdater statusUpdator = new DefaultStatusUpdater(jobExecutionStatusHolder,
            getJobExecutionId(chunkContext));
      List<BaseNode> nodes = getFromJobExecutionContext(chunkContext, JobConstants.CLUSTER_ADDED_NODES_JOB_PARAM,
            new TypeToken<List<BaseNode>>() {}.getType());
      ClusterCreate clusterSpec = getFromJobExecutionContext(chunkContext,JobConstants.CLUSTER_SPEC_JOB_PARAM, ClusterCreate.class);
      Map<String, Set<String>> usedIpSets = getFromJobExecutionContext(chunkContext,
            JobConstants.CLUSTER_USED_IP_JOB_PARAM,
            new TypeToken<Map<String, Set<String>>>() {}.getType());
      if (usedIpSets == null) {
         usedIpSets = new HashMap<String, Set<String>>();
      }
      boolean reserveRawDisks = clusterSpec.getDistroVendor().equalsIgnoreCase(Constants.MAPR_VENDOR);
      boolean success = clusteringService.createVcVms(clusterSpec.getNetworkings(), nodes, usedIpSets, reserveRawDisks, statusUpdator);
      putIntoJobExecutionContext(chunkContext, JobConstants.CLUSTER_CREATE_VM_OPERATION_SUCCESS, success);
      putIntoJobExecutionContext(chunkContext, JobConstants.CLUSTER_ADDED_NODES_JOB_PARAM, nodes);
      UUID reservationId = getFromJobExecutionContext(chunkContext, JobConstants.CLUSTER_RESOURCE_RESERVATION_ID_JOB_PARAM, UUID.class);
      if (reservationId != null) {
         // release the resource reservation since vm is created
View Full Code Here

   @Override
   public RepeatStatus executeStep(ChunkContext chunkContext, JobExecutionStatusHolder jobExecutionStatusHolder) {
      logger.info("set password for disk fix");

      String clusterName = getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM);
      ClusterCreate clusterSpec = configMgr.getClusterConfig(clusterName);

      String newPassword = clusterSpec.getPassword();

      String targetNode = getJobParameters(chunkContext).getString(JobConstants.SUB_JOB_NODE_NAME);
      NodeEntity nodeEntity = clusterEntityMgr.findNodeByName(targetNode);
      if (nodeEntity == null) {
         throw TaskException.EXECUTION_FAILED("No fixed node need to set password for.");
View Full Code Here

                     clusterName);
         Set<String> hostnames = new HashSet<String>();
         for (NodeEntity node : nodes) {
            hostnames.add(node.getHostName());
         }
         ClusterCreate clusterSpec = clusterManager.getClusterSpec(clusterName);
         SyncHostsUtils.SyncHosts(clusterSpec, hostnames, softwareMgr);
      }

      StatusUpdater statusUpdater =
            new DefaultStatusUpdater(jobExecutionStatusHolder,
View Full Code Here

   public ClusterCreate getClusterConfig(String clusterName, boolean needAllocIp) {
      ClusterEntity clusterEntity = clusterEntityMgr.findByName(clusterName);
      if (clusterEntity == null) {
         throw ClusterConfigException.CLUSTER_CONFIG_NOT_FOUND(clusterName);
      }
      ClusterCreate clusterConfig = new ClusterCreate();
      clusterConfig.setName(clusterEntity.getName());
      clusterConfig.setAppManager(clusterEntity.getAppManager());
      clusterConfig.setDistro(clusterEntity.getDistro());
      convertClusterConfig(clusterEntity, clusterConfig, needAllocIp);

      Gson gson =
            new GsonBuilder().excludeFieldsWithoutExposeAnnotation().create();
      String manifest = gson.toJson(clusterConfig);
View Full Code Here

      return swapDisk;
   }

   public VcDatastore getTargetDsForSwapDisk(NodeEntity node,
         DiskEntity swapDisk, long newSwapSizeInMB) {
      ClusterCreate clusterSpec =
            clusterConfigMgr.getClusterConfig(node.getNodeGroup().getCluster()
                  .getName());
      NodeGroupCreate ngSpec =
            clusterSpec.getNodeGroup(node.getNodeGroup().getName());

      // use current DS if it has enough space
      VcDatastore currentDs =
            VcResourceUtils.findDSInVcByName(swapDisk.getDatastoreName());
      if (!currentDs.isAccessible()) {
View Full Code Here

   @Test(groups = { "TestCommonClusterExpandPolicy" })
   public void testExpandDistro() {
      final String hadoopUrl = "apache/1.2.1/hadoop-1.2.1.tar.gz";
      final String zookeeperUrl = "apache/1.2.1/zookeeper-3.4.5.tar.gz";
      final String bigTopRepoUrl = "https://192.168.0.1/yum/bigtop.repo";
      ClusterCreate clusterConfig = new ClusterCreate();
      IronfanStack stack = new IronfanStack();
      stack.setPackagesExistStatus("TARBALL");
      Map<String, String> hadoopDistroMap = new HashMap<String, String>();
      hadoopDistroMap.put("HadoopUrl", hadoopUrl);
      hadoopDistroMap.put("ZookeeperUrl", zookeeperUrl);
      stack.setHadoopDistroMap(hadoopDistroMap);
      CommonClusterExpandPolicy.expandDistro(clusterConfig, stack);
      assertEquals(clusterConfig.getDistroMap().getHadoopUrl(), hadoopUrl);
      assertEquals(clusterConfig.getDistroMap().getZookeeperUrl(), zookeeperUrl);
      stack.setPackagesExistStatus("REPO");
      List<String> repos = new ArrayList<String>();
      repos.add(bigTopRepoUrl);
      stack.setPackageRepos(repos);
      CommonClusterExpandPolicy.expandDistro(clusterConfig, stack);
      assertEquals(clusterConfig.getPackageRepos().get(0), bigTopRepoUrl);
   }
View Full Code Here

         resumeCreateCluster(name);
         return;
      }

      // build ClusterCreate object
      ClusterCreate clusterCreate = new ClusterCreate();
      clusterCreate.setName(name);

      if (!CommandsUtils.isBlank(appManager)
            && !Constants.IRONFAN.equalsIgnoreCase(appManager)) {
         AppManagerRead appManagerRead = appManagerRestClient.get(appManager);
         if (appManagerRead == null) {
            CommandsUtils
                  .printCmdFailure(
                        Constants.OUTPUT_OBJECT_CLUSTER,
                        name,
                        Constants.OUTPUT_OP_CREATE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        appManager
                              + " cannot be found in the list of application managers.");
            return;
         }
      }

      if (CommandsUtils.isBlank(appManager)) {
         clusterCreate.setAppManager(Constants.IRONFAN);
      } else {
         clusterCreate.setAppManager(appManager);

         // local yum repo url for 3rd party app managers like ClouderaMgr, Ambari etc.
         if (!CommandsUtils.isBlank(localRepoURL)) {
            clusterCreate.setLocalRepoURL(localRepoURL);
         }
      }

      if (setClusterPassword) {
         String password = getPassword();
         //user would like to set password, but failed to enter
         //a valid one, quit cluster create
         if (password == null) {
            return;
         } else {
            clusterCreate.setPassword(password);
         }
      }

      if (type != null) {
         ClusterType clusterType = ClusterType.getByDescription(type);
         if (clusterType == null) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                  name, Constants.OUTPUT_OP_CREATE,
                  Constants.OUTPUT_OP_RESULT_FAIL, Constants.INVALID_VALUE
                        + " " + "type=" + type);
            return;
         }
         clusterCreate.setType(clusterType);
      } else if (specFilePath == null) {
         // create Hadoop (HDFS + MapReduce) cluster as default
         clusterCreate.setType(ClusterType.HDFS_MAPRED);
      }

      TopologyType policy = null;
      if (topology != null) {
         policy = validateTopologyValue(name, topology);
         if (policy == null) {
            return;
         }
      } else {
         policy = TopologyType.NONE;
      }
      clusterCreate.setTopologyPolicy(policy);


      DistroRead distroRead4Create;
      try {
         if (distro != null) {
            DistroRead[] distroReads =
                  appManagerRestClient
                        .getDistros(clusterCreate.getAppManager());
            distroRead4Create = getDistroByName(distroReads, distro);

            if (distroRead4Create == null) {
               CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                     name, Constants.OUTPUT_OP_CREATE,
                     Constants.OUTPUT_OP_RESULT_FAIL, Constants.PARAM_DISTRO
                           + Constants.PARAM_NOT_SUPPORTED
                           + getDistroNames(distroReads));
               return;
            }
         } else {
            distroRead4Create =
                  appManagerRestClient.getDefaultDistro(clusterCreate
                        .getAppManager());
            if (distroRead4Create == null
                  || CommandsUtils.isBlank(distroRead4Create.getName())) {
               CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                     name, Constants.OUTPUT_OP_CREATE,
                     Constants.OUTPUT_OP_RESULT_FAIL,
                     Constants.PARAM_NO_DEFAULT_DISTRO);
               return;
            }
         }
      } catch (CliRestException e) {
         CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, name,
               Constants.OUTPUT_OP_CREATE, Constants.OUTPUT_OP_RESULT_FAIL,
               e.getMessage());
         return;
      }
      clusterCreate.setDistro(distroRead4Create.getName());
      clusterCreate.setDistroVendor(distroRead4Create.getVendor());
      clusterCreate.setDistroVersion(distroRead4Create.getVersion());

      if (rpNames != null) {
         List<String> rpNamesList = CommandsUtils.inputsConvert(rpNames);
         if (rpNamesList.isEmpty()) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                  name, Constants.OUTPUT_OP_CREATE,
                  Constants.OUTPUT_OP_RESULT_FAIL,
                  Constants.INPUT_RPNAMES_PARAM + Constants.MULTI_INPUTS_CHECK);
            return;
         } else {
            clusterCreate.setRpNames(rpNamesList);
         }
      }
      if (dsNames != null) {
         List<String> dsNamesList = CommandsUtils.inputsConvert(dsNames);
         if (dsNamesList.isEmpty()) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                  name, Constants.OUTPUT_OP_CREATE,
                  Constants.OUTPUT_OP_RESULT_FAIL,
                  Constants.INPUT_DSNAMES_PARAM + Constants.MULTI_INPUTS_CHECK);
            return;
         } else {
            clusterCreate.setDsNames(dsNamesList);
         }
      }
      List<String> failedMsgList = new ArrayList<String>();
      List<String> warningMsgList = new ArrayList<String>();
      Set<String> allNetworkNames = new HashSet<String>();
      try {
         if (specFilePath != null) {
            ClusterCreate clusterSpec =
                  CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                        CommandsUtils.dataFromFile(specFilePath));
            clusterCreate.setSpecFile(true);
            clusterCreate.setExternalHDFS(clusterSpec.getExternalHDFS());
            clusterCreate.setExternalMapReduce(clusterSpec
                  .getExternalMapReduce());
            clusterCreate.setNodeGroups(clusterSpec.getNodeGroups());
            clusterCreate.setConfiguration(clusterSpec.getConfiguration());
            // TODO: W'd better merge validateConfiguration with validateClusterSpec to avoid repeated validation.
            if (CommandsUtils.isBlank(appManager)
                  || Constants.IRONFAN.equalsIgnoreCase(appManager)) {
               validateConfiguration(clusterCreate, skipConfigValidation,
                     warningMsgList, failedMsgList);
View Full Code Here

      try {
         if ((CommandsUtils.isBlank(specFileName) && CommandsUtils
               .isBlank(type))
               || !CommandsUtils.isBlank(specFileName)
               || Constants.EXPORT_TYPE_SPEC.equalsIgnoreCase(type)) {
            ClusterCreate cluster = restClient.getSpec(name);
            CommandsUtils.prettyJsonOutput(cluster, path);
         } else if (Constants.EXPORT_TYPE_RACK.equalsIgnoreCase(type)) {
            Map<String, String> rackTopology =
                  restClient.getRackTopology(name, topology);
            CommandsUtils.gracefulRackTopologyOutput(rackTopology, path,
View Full Code Here

TOP

Related Classes of com.vmware.bdd.apitypes.ClusterCreate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.