Package org.infinispan.util

Examples of org.infinispan.util.ReadOnlyDataContainerBackedKeySet


      }

      // gather all keys from cache store that belong to the segments that are being removed/moved to L1
      //todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys (ie. keys should belong to desired segments)
      try {
         CollectionKeyFilter filter = new CollectionKeyFilter(new ReadOnlyDataContainerBackedKeySet(dataContainer));
         persistenceManager.processOnAllStores(filter, new AdvancedCacheLoader.CacheLoaderTask() {
            @Override
            public void processEntry(MarshalledEntry marshalledEntry, AdvancedCacheLoader.TaskContext taskContext) throws InterruptedException {
               Object key = marshalledEntry.getKey();
               int keySegment = getSegment(key);
View Full Code Here


            AdvancedCacheLoader<Object, Object> stProvider = persistenceManager.getStateTransferProvider();
            if (stProvider != null) {
               if (debug) {
                  log.debugf("[X-Site State Transfer - %s] start Persistence iteration", xSiteBackup.getSiteName());
               }
               KeyFilter<Object> filter = new CacheLoaderFilter(new ReadOnlyDataContainerBackedKeySet(dataContainer));
               StateTransferCacheLoaderTask task = new StateTransferCacheLoaderTask(xSiteBackup, chunk, chunkSize,
                                                                                    backupResponseQueue, canceled);
               try {
                  stProvider.process(filter, task, EXECUTOR_SERVICE, true, true);
                  if (canceled.get()) {
View Full Code Here

      CacheStore cacheStore = getCacheStore();
      if (cacheStore != null) {
         //todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys (ie. keys should belong to desired segments)
         try {
            Set<Object> storedKeys = cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer));
            for (Object key : storedKeys) {
               int keySegment = getSegment(key);
               if (segmentsToRemove.contains(keySegment)) {
                  keysToL1.add(key);
               } else if (segmentsToL1.contains(keySegment)) {
View Full Code Here

         // send cache store entries if needed
         CacheStore cacheStore = getCacheStore();
         if (cacheStore != null) {
            try {
               //todo [anistor] need to extend CacheStore interface to be able to specify a filter when loading keys (ie. keys should belong to desired segments)
               Set<Object> storedKeys = cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer));
               for (Object key : storedKeys) {
                  int segmentId = readCh.getSegment(key);
                  if (segments.contains(segmentId)) {
                     try {
                        InternalCacheEntry ice = cacheStore.load(key);
View Full Code Here

            }

            // Only fetch the data from the cache store if the cache store is not shared
            CacheStore cacheStore = stateTransferManager.getCacheStoreForStateTransfer();
            if (cacheStore != null) {
               for (Object key : cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer))) {
                  replicate(key, null, chOld, joiners, cacheStore, state);
               }
            } else {
               if (trace) log.trace("No cache store or cache store is shared, not replicating stored keys");
            }
View Full Code Here

         checkIfCancelled();

         // Only fetch the data from the cache store if the cache store is not shared
         CacheStore cacheStore = stateTransferManager.getCacheStoreForStateTransfer();
         if (cacheStore != null) {
            for (Object key : cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer))) {
               rebalance(key, null, numOwners, chOld, chNew, cacheStore, states, keysToRemove);
            }
         } else {
            if (trace) log.trace("No cache store or cache store is shared, not rebalancing stored keys");
         }

         checkIfCancelled();

         // Push any remaining state chunks
         for (Map.Entry<Address, Collection<InternalCacheEntry>> entry : states.entrySet()) {
            pushPartialState(Collections.singleton(entry.getKey()), entry.getValue(), null);
         }
        
         // Push locks if the cache is transactional and it is distributed
         if (transactionTable != null) {
            log.debug("Starting lock migration");
            Map<Address, Collection<LockInfo>> locksToMigrate = new HashMap<Address, Collection<LockInfo>>();
            rebalanceLocks(numOwners, locksToMigrate, transactionTable.getRemoteTransactions());
            rebalanceLocks(numOwners, locksToMigrate, transactionTable.getLocalTransactions());
            for (Map.Entry<Address, Collection<LockInfo>> e : locksToMigrate.entrySet()) {
               pushPartialState(Collections.singleton(e.getKey()), null, e.getValue());
            }
         }
        
         // And wait for all the push RPCs to end
         finishPushingState();
      } else if (initialView) {
         // Only remove data from the cache store if the cache store is not shared
         CacheStore cacheStore = stateTransferManager.getCacheStoreForStateTransfer();
         if (cacheStore != null) {
            if (trace) log.trace("Non-shared cache store, cleaning up persisted entries that we don't own after we joined the cache");
            for (Object key : cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer))) {
               if (!chNew.isKeyLocalToAddress(self, key, numOwners)) {
                  keysToRemove.add(key);
               }
            }
         }
View Full Code Here

            }

            // Only fetch the data from the cache store if the cache store is not shared
            CacheStore cacheStore = stateTransferManager.getCacheStoreForStateTransfer();
            if (cacheStore != null) {
               for (Object key : cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer))) {
                  replicate(key, null, chOld, joiners, cacheStore, state);
               }
            } else {
               if (trace) log.trace("No cache store or cache store is shared, not replicating stored keys");
            }
View Full Code Here

            AdvancedCacheLoader<Object, Object> stProvider = persistenceManager.getStateTransferProvider();
            if (stProvider != null) {
               if (debug) {
                  log.debugf("[X-Site State Transfer - %s] start Persistence iteration", xSiteBackup.getSiteName());
               }
               KeyFilter<Object> filter = new CacheLoaderFilter(new ReadOnlyDataContainerBackedKeySet(dataContainer));
               StateTransferCacheLoaderTask task = new StateTransferCacheLoaderTask(xSiteBackup, chunk, chunkSize,
                                                                                    backupResponseQueue, canceled);
               try {
                  stProvider.process(filter, task, EXECUTOR_SERVICE, true, true);
                  if (canceled.get()) {
View Full Code Here

      }

      // gather all keys from cache store that belong to the segments that are being removed/moved to L1
      //todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys (ie. keys should belong to desired segments)
      try {
         CollectionKeyFilter filter = new CollectionKeyFilter(new ReadOnlyDataContainerBackedKeySet(dataContainer));
         persistenceManager.processOnAllStores(filter, new AdvancedCacheLoader.CacheLoaderTask() {
            @Override
            public void processEntry(MarshalledEntry marshalledEntry, AdvancedCacheLoader.TaskContext taskContext) throws InterruptedException {
               Object key = marshalledEntry.getKey();
               int keySegment = getSegment(key);
View Full Code Here

      // we also remove keys from the cache store
      CacheStore cacheStore = getCacheStore();
      if (cacheStore != null) {
         //todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys (ie. keys should belong to desired segments)
         try {
            Set<Object> storedKeys = cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer));
            for (Object key : storedKeys) {
               if (segments.contains(getSegment(key))) {
                  keysToRemove.add(key);
               }
            }
View Full Code Here

TOP

Related Classes of org.infinispan.util.ReadOnlyDataContainerBackedKeySet

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.