Package org.voltdb.catalog

Examples of org.voltdb.catalog.Host


    public void testGetSitesForHost() throws Exception {
        Map<Host, List<Site>> host_sites = new HashMap<Host, List<Site>>();
        Cluster catalog_clus = catalogContext.cluster;

        for (Site catalog_site : catalog_clus.getSites()) {
            Host catalog_host = catalog_site.getHost();
            assertNotNull(catalog_host);
            if (!host_sites.containsKey(catalog_host)) {
                host_sites.put(catalog_host, new ArrayList<Site>());
            }
            host_sites.get(catalog_host).add(catalog_site);
View Full Code Here


        Arrays.fill(this.defaultAffinity, true);
        for (int cpu : this.utilityBlacklist) {
            this.defaultAffinity[cpu] = false;
        } // FOR
       
        Host host = hstore_site.getHost();
        Collection<Partition> host_partitions = CatalogUtil.getPartitionsForHost(host);
       
        if (hstore_conf.site.cpu_affinity == false) {
            this.disable = true;
        }
        else if (this.num_cores <= host_partitions.size()) {
            LOG.warn(String.format("Unable to set CPU affinity on %s because there are %d partitions " +
                     "but only %d available CPU cores",
                     host.getIpaddr(), host_partitions.size(), this.num_cores));
            this.disable = true;
        }
       
        // Calculate what cores the partitions + utility threads are allowed
        // to execute on at this HStoreSite. We have to be careful about considering
View Full Code Here

        Cluster catalog_cluster = CatalogUtil.getCluster(catalog_db);
        assertNotNull(catalog_cluster);
        Cluster clone_cluster = CatalogUtil.getCluster(clone_db);
        assertNotNull(clone_cluster);
        for (Host catalog_host : catalog_cluster.getHosts()) {
            Host clone_host = clone_cluster.getHosts().get(catalog_host.getName());
            assertNotNull(clone_host);
            checkFields(Host.class, catalog_host, clone_host);
        } // FOR

        for (Site catalog_site : catalog_cluster.getSites()) {
View Full Code Here

        VoltTable result = constructFragmentResultsTable();
        if (fragmentId == SysProcFragmentId.PF_snapshotDelete)
        {
            // Choose the lowest site ID on this host to do the deletion.
            // All other sites should just return empty results tables.
            Host catalog_host = context.getHost();
            Site catalog_site = CollectionUtil.first(CatalogUtil.getSitesForHost(catalog_host));
            Integer lowest_site_id = catalog_site.getId();
           
            LOG.trace("Site id :"+context.getPartitionExecutor().getSiteId());                      
            int partition_id = context.getPartitionExecutor().getPartitionId();
View Full Code Here

    @Override
    public SynthesizedPlanFragment[] generateRestorePlan(Table catalogTable) {
        SystemProcedureExecutionContext context = this.getSystemProcedureExecutionContext();
        assert (context != null);
        Host catalog_host = context.getHost();
        Collection<Site> catalog_sites = CatalogUtil.getSitesForHost(catalog_host);

        LOG.info("Replicated :: Table: " + getTableName());

        Set<Integer> execution_site_ids = new TreeSet<Integer>();
View Full Code Here

    private SynthesizedPlanFragment[] generateReplicatedToReplicatedPlan() {
        SynthesizedPlanFragment[] restore_plan = null;

        SystemProcedureExecutionContext context = this.getSystemProcedureExecutionContext();
        assert (context != null);
        Host catalog_host = context.getHost();
        Collection<Site> catalog_sites = CatalogUtil.getSitesForHost(catalog_host);

        Set<Integer> execution_site_ids = new TreeSet<Integer>();
        Set<Integer> execution_partition_ids = new TreeSet<Integer>();
        for (Site catalog_site : catalog_sites) {           
View Full Code Here

                }
            }

            SystemProcedureExecutionContext context = this.getSystemProcedureExecutionContext();
            assert (context != null);
            Host catalog_host = context.getHost();
            Collection<Site> catalog_sites = CatalogUtil.getSitesForHost(catalog_host);

            List<Integer> sitesAtHost = new ArrayList<Integer>();
            List<Integer> partitionsAtHost = new ArrayList<Integer>();
View Full Code Here

        int partitionId = context.getPartitionExecutor().getPartitionId();
       
        for (int originalHostId : originalHostIds) {
            final File f = getSaveFileForPartitionedTable(filePath, fileNonce, tableName, originalHostId, siteId, partitionId);

            Host catalog_host = context.getHost();
            Collection<Site> catalog_sites = CatalogUtil.getSitesForHost(catalog_host);
           
            m_saveFiles.offer(getTableSaveFile(f, catalog_sites.size() * 4, relevantPartitionIds));
            assert (m_saveFiles.peekLast().getCompleted());
        }
View Full Code Here

            assert (params.toArray()[1] != null);
            VoltTable result = ClusterSaveFileState.constructEmptySaveFileStateVoltTable();
        
            // Choose the lowest site ID on this host to do the file scan
            // All other sites should just return empty results tables.           
            Host catalog_host = context.getHost();           
            Site catalog_site = CollectionUtil.first(CatalogUtil.getSitesForHost(catalog_host));
           
            CatalogMap<Partition> partition_map = catalog_site.getPartitions();
            Integer lowest_partition_id = Integer.MAX_VALUE, p_id;       
            for (Partition pt : partition_map) {
View Full Code Here

        // XXX This is all very similar to the splitting code in
        // LoadMultipartitionTable. Consider ways to consolidate later
        Map<Integer, Integer> sites_to_partitions = new HashMap<Integer, Integer>();

        // CHANGE : Up Sites
        Host catalog_host = context.getHost();
        Collection<Site> catalog_sites = CatalogUtil.getSitesForHost(catalog_host);
        Site catalog_site = context.getSite();
        Partition catalog_partition = context.getPartitionExecutor().getPartition();           

        LOG.trace("Table :" + tableName);

        for (Site site : catalog_sites) {
            for (Partition partition : site.getPartitions()) {
                sites_to_partitions.put(site.getId(), partition.getId());
            }
        }

        try {
            initializeTableSaveFiles(m_filePath, m_fileNonce, tableName, originalHostIds, relevantPartitionIds, context);
        } catch (IOException e) {
            VoltTable result = constructResultsTable();
            // e.printStackTrace();
            result.addRow(m_hostId, hostname, m_siteId, tableName, relevantPartitionIds[0], "FAILURE", "Unable to load table: " + tableName + " error: " + e.getMessage());
            return result;
        }

        int partition_id = context.getPartitionExecutor().getPartitionId();
        LOG.trace("Starting performLoadPartitionedTable " + tableName + " at partition - " + partition_id);

        String result_str = "SUCCESS";
        String error_msg = "";
        TableSaveFile savefile = null;

        /**
         * For partitioned tables
         */
        try {
            savefile = getTableSaveFile(getSaveFileForPartitionedTable(m_filePath, m_fileNonce, tableName,
                    catalog_host.getId(),
                    catalog_site.getId(),
                    catalog_partition.getId()),                            
                    3, null);
            assert (savefile.getCompleted());
        } catch (IOException e) {
View Full Code Here

TOP

Related Classes of org.voltdb.catalog.Host

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.