Package org.jboss.ha.framework.interfaces

Examples of org.jboss.ha.framework.interfaces.DistributedReplicantManager


    * @see org.jboss.ha.framework.server.HAServiceImpl#registerDRMListener()
    */
   @Override
   protected void registerDRMListener() throws Exception
   {
      DistributedReplicantManager drm = this.getHAPartition().getDistributedReplicantManager();
     
      final String key = this.getHAServiceKey();
     
      // Temporary drm listener
      RecordingReplicantListener listener = new RecordingReplicantListener();
     
      // record replicant changes, but don't handle them just yet
      drm.registerListener(key, listener);

      // this ensures that the DRM knows that this node has the singleton deployed
      drm.add(key, this.getReplicant());
     
      // Now register the real listener
      drm.registerListener(key, this);
     
      // ...and unregister our temporary one
      drm.unregisterListener(key, listener);
     
      ReplicantView view = this.viewReference.getAndSet(null);
     
      // Process the recorded replicant change
      // Typically this will be the replicant change from drm.add(...)
View Full Code Here


      this.callAsyncMethodOnPartition("stopOldMaster", new Object[0], new Class[0]);
   }

   protected boolean isDRMMasterReplica()
   {
      DistributedReplicantManager drm = this.getHAPartition().getDistributedReplicantManager();

      return drm.isMasterReplica(this.getHAServiceKey());
   }
View Full Code Here

   protected void updateHAPartition(HAPartition partition) throws Exception
   {
      cleanExistenceInCurrentHAPartition();
     
      this.partition = partition;
      DistributedReplicantManager drm = partition.getDistributedReplicantManager();
      drm.registerListener(this.replicantName, this);
      drm.add(this.replicantName, this.target);
   }
View Full Code Here

   {
      if (this.partition != null)
      {
         try
         {
            DistributedReplicantManager drm = partition.getDistributedReplicantManager();
            drm.unregisterListener(this.replicantName, this);
            drm.remove(this.replicantName);        
         }
         catch (Exception e)
         {
            log.error("failed to clean existence in current ha partition", e);
         }
View Full Code Here

   public void updateHAPartition(HAPartition partition) throws Exception
   {
      cleanExistenceInCurrentHAPartition();
     
      this.partition = partition;
      DistributedReplicantManager drm = partition.getDistributedReplicantManager();
      drm.registerListener(this.replicantName, this);
      drm.add(this.replicantName, this.target);
   }
View Full Code Here

   {
      if (this.partition != null)
      {
         try
         {
            DistributedReplicantManager drm = partition.getDistributedReplicantManager();
            drm.unregisterListener(this.replicantName, this);
            drm.remove(this.replicantName);        
         }
         catch (Exception e)
         {
            log.error("failed to clean existence in current ha partition", e);
         }
View Full Code Here

         partition1.setBindIntoJndi(false);
        
         partition1.create();        
         partition1.start();

         DistributedReplicantManager drm1 = partition1.getDistributedReplicantManager();

         Thread.sleep(10000);
        
         // Use a different stack name with the same config to avoid singleton conflicts
         stackName = "tunnel2";
        
         JChannelFactory factory2 = new JChannelFactory();
         factory2.setMultiplexerConfig(muxFile);
         factory2.setNamingServicePort(1099);
         factory2.setNodeName("node2");
         factory2.setExposeChannels(false);
         factory2.setExposeProtocols(false);
         factory2.create();
         factory2.start();
        
         Configuration cacheConfig2 = new Configuration();
         cacheConfig2.setMultiplexerStack(stackName);
         cacheConfig2.setCacheMode("REPL_SYNC");
        
         DependencyInjectedConfigurationRegistry registry2 = new DependencyInjectedConfigurationRegistry();
         registry2.registerConfiguration("config2", cacheConfig2);        
        
         CacheManager cacheManager2 = new CacheManager(registry2, factory2);
        
         HAPartitionCacheHandlerImpl cacheHandler2 = new HAPartitionCacheHandlerImpl();
         cacheHandler2.setCacheManager(cacheManager2);
         cacheHandler2.setCacheConfigName("config2");
        
         DistributedStateImpl ds2 = new DistributedStateImpl();
         ds2.setCacheHandler(cacheHandler2);
        
         partition2 = new ClusterPartition();
         partition2.setPartitionName(partitionName);
         partition2.setCacheHandler(cacheHandler2);
         partition2.setStateTransferTimeout(30000);
         partition2.setMethodCallTimeout(60000);
         partition2.setDistributedStateImpl(ds2);
         partition2.setBindIntoJndi(false);
        
         partition2.create();        
         partition2.start();

         DistributedReplicantManager drm2 = partition2.getDistributedReplicantManager();
        
         Thread.sleep(10000);
        
         // confirm that each partition contains two nodes  
         assertEquals("Partition1 should contain two nodes; ", 2, partition1.getCurrentView().size());
         assertEquals("Partition2 should contain two nodes; ", 2, partition2.getCurrentView().size());
        
         drm1.add(SERVICEA, "valueA1");
         drm2.add(SERVICEA, "valueA2");
         drm2.add(SERVICEB, "valueB2");
        
         // test that only one node is the master replica for serviceA
         assertTrue("ServiceA must have a master replica",
                 drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
         assertTrue("ServiceA must have a single master replica",
                 drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
         // ServiceB should only be a master replica on partition2
         assertFalse("ServiceB should not be a master replica on partition1",
                 drm1.isMasterReplica(SERVICEB));
         assertTrue("ServiceB must have a master replica on partition2",
                 drm2.isMasterReplica(SERVICEB));
        
         // confirm that each partition contains correct DRM replicants for services A and B 
         assertEquals("Partition1 should contain two DRM replicants for serviceA; ",
                 2, drm1.lookupReplicants(SERVICEA).size());
         assertEquals("Partition2 should contain two DRM replicants for serviceA; ",
                 2, drm2.lookupReplicants(SERVICEA).size());
         assertEquals("Partition1 should contain one DRM replicant for serviceB; ",
                 1, drm1.lookupReplicants(SERVICEB).size());
         assertEquals("Partition2 should contain one DRM replicant for serviceB; ",
                 1, drm2.lookupReplicants(SERVICEB).size());

         // simulate a split of the partition
         log.info("DRMTestCase.testIsMasterReplica() - stopping GossipRouter");
         router.stop();
         sleepThread(15000);
        
         // confirm that each partition contains one node  
         assertEquals("Partition1 should contain one node after split; ",
                 1, partition1.getCurrentView().size());
         assertEquals("Partition2 should contain one node after split; ",
                 1, partition2.getCurrentView().size());
       
         // confirm that each node is a master replica for serviceA after the split
         assertTrue("ServiceA should be a master replica on partition1 after split",
                 drm1.isMasterReplica(SERVICEA));
         assertTrue("ServiceA should be a master replica on partition2 after split",
                 drm2.isMasterReplica(SERVICEA));
        
         // ServiceB should still only be a master replica on partition2 after split
         assertFalse("ServiceB should not be a master replica on partition1 after split",
                 drm1.isMasterReplica(SERVICEB));
         assertTrue("ServiceB must have a master replica on partition2 after split",
                 drm2.isMasterReplica(SERVICEB));
        
         // Remove ServiceA replicant from partition1        
         drm1.remove(SERVICEA);
        
         // test that this node is not the master replica        
         assertFalse("partition1 is not master replica after dropping ServiceA replicant",
                 drm1.isMasterReplica(SERVICEA));
        
         //Restore the local replicant        
         drm1.add(SERVICEA, "valueA1a");
        
         // simulate a merge
         log.info("DRMTestCase.testIsMasterReplica() - restarting GossipRouter");
         router.start();
         // it seems to take more than 15 seconds for the merge to take effect
         sleepThread(30000);
        
         assertTrue(router.isStarted());

         // confirm that each partition contains two nodes again
         assertEquals("Partition1 should contain two nodes after merge; ",
               2, partition1.getCurrentView().size());
         assertEquals("Partition2 should contain two nodes after merge; ",
                 2, partition2.getCurrentView().size());
        
         // test that only one node is the master replica for serviceA after merge
         assertTrue("ServiceA must have a master replica after merge",
                 drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
         assertTrue("ServiceA must have a single master replica after merge",
                 drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
         // ServiceB should only be a master replica on partition2 after merge
         assertFalse("ServiceB should not be a master replica on partition1 after merge",
                 drm1.isMasterReplica(SERVICEB));
         assertTrue("ServiceB must have a master replica on partition2 after merge",
                 drm2.isMasterReplica(SERVICEB));
        
         // confirm that each partition contains correct DRM replicants for services A and B after merge
         assertEquals("Partition1 should contain two DRM replicants for serviceA after merge; ",
                 2, drm1.lookupReplicants(SERVICEA).size());
         assertEquals("Partition2 should contain two DRM replicants for serviceA after merge; ",
                 2, drm2.lookupReplicants(SERVICEA).size());
         assertEquals("Partition1 should contain one DRM replicant for serviceB after merge; ",
                 1, drm1.lookupReplicants(SERVICEB).size());
         assertEquals("Partition2 should contain one DRM replicant for serviceB after merge; ",
                 1, drm2.lookupReplicants(SERVICEB).size());
        
         partition1.stop();
         partition2.stop();
      }
      finally
View Full Code Here

   protected void updateHAPartition(HAPartition partition) throws Exception
   {
      cleanExistenceInCurrentHAPartition();
     
      this.partition = partition;
      DistributedReplicantManager drm = partition.getDistributedReplicantManager();
      drm.registerListener(this.replicantName, this);
      drm.add(this.replicantName, this.target);
   }
View Full Code Here

   {
      if (this.partition != null)
      {
         try
         {
            DistributedReplicantManager drm = partition.getDistributedReplicantManager();
            drm.unregisterListener(this.replicantName, this);
            drm.remove(this.replicantName);        
         }
         catch (Exception e)
         {
            log.error("failed to clean existence in current ha partition", e);
         }
View Full Code Here

   protected void updateHAPartition(HAPartition partition) throws Exception
   {
      cleanExistenceInCurrentHAPartition();
     
      this.partition = partition;
      DistributedReplicantManager drm = partition.getDistributedReplicantManager();
      drm.registerListener(this.replicantName, this);
      drm.add(this.replicantName, this.target);
   }
View Full Code Here

TOP

Related Classes of org.jboss.ha.framework.interfaces.DistributedReplicantManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.