Examples of MiniQJMHACluster


Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

  }

  @Test (timeout = 300000)
  public void testFinalize() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniQJMHACluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");

    try {
      cluster = new MiniQJMHACluster.Builder(conf).build();
      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
      dfsCluster.waitActive();

      // let NN1 tail editlog every 1s
      dfsCluster.getConfiguration(1).setInt(
          DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
      dfsCluster.restartNameNode(1);

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
      dfs.mkdirs(foo);

      FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();

      // start rolling upgrade
      RollingUpgradeInfo info = dfs
          .rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());
      dfs.mkdirs(bar);

      queryForPreparation(dfs);

      // The NN should have a copy of the fsimage in case of rollbacks.
      Assert.assertTrue(fsimage.hasRollbackFSImage());

      info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
      Assert.assertTrue(info.isFinalized());
      Assert.assertTrue(dfs.exists(foo));

      // Once finalized, there should be no more fsimage for rollbacks.
      Assert.assertFalse(fsimage.hasRollbackFSImage());

      // Should have no problem in restart and replaying edits that include
      // the FINALIZE op.
      dfsCluster.restartNameNode(0);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

  }

  @Test (timeout = 300000)
  public void testQuery() throws Exception {
    final Configuration conf = new Configuration();
    MiniQJMHACluster cluster = null;
    try {
      cluster = new MiniQJMHACluster.Builder(conf).build();
      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
      dfsCluster.waitActive();

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);

      dfsCluster.shutdownNameNode(1);

      // start rolling upgrade
      RollingUpgradeInfo info = dfs
          .rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());

      info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
      Assert.assertFalse(info.createdRollbackImages());

      dfsCluster.restartNameNode(1);

      queryForPreparation(dfs);

      // The NN should have a copy of the fsimage in case of rollbacks.
      Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage()
          .hasRollbackFSImage());
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

  public void testCheckpoint() throws IOException, InterruptedException {
    final Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);

    MiniQJMHACluster cluster = null;
    final Path foo = new Path("/foo");

    try {
      cluster = new MiniQJMHACluster.Builder(conf).build();
      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
      dfsCluster.waitActive();

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);

      // start rolling upgrade
      RollingUpgradeInfo info = dfs
          .rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());

      queryForPreparation(dfs);

      dfs.mkdirs(foo);
      long txid = dfs.rollEdits();
      Assert.assertTrue(txid > 0);

      int retries = 0;
      while (++retries < 5) {
        NNStorage storage = dfsCluster.getNamesystem(1).getFSImage()
            .getStorage();
        if (storage.getFsImageName(txid - 1) != null) {
          return;
        }
        Thread.sleep(1000);
      }
      Assert.fail("new checkpoint does not exist");

    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

  }

  @Test (timeout = 300000)
  public void testFinalize() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniQJMHACluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");

    try {
      cluster = new MiniQJMHACluster.Builder(conf).build();
      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
      dfsCluster.waitActive();

      // let NN1 tail editlog every 1s
      dfsCluster.getConfiguration(1).setInt(
          DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
      dfsCluster.restartNameNode(1);

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
      dfs.mkdirs(foo);

      FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();

      // start rolling upgrade
      RollingUpgradeInfo info = dfs
          .rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());
      dfs.mkdirs(bar);

      queryForPreparation(dfs);

      // The NN should have a copy of the fsimage in case of rollbacks.
      Assert.assertTrue(fsimage.hasRollbackFSImage());

      info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
      Assert.assertTrue(info.isFinalized());
      Assert.assertTrue(dfs.exists(foo));

      // Once finalized, there should be no more fsimage for rollbacks.
      Assert.assertFalse(fsimage.hasRollbackFSImage());
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

  }

  @Test (timeout = 300000)
  public void testQuery() throws Exception {
    final Configuration conf = new Configuration();
    MiniQJMHACluster cluster = null;
    try {
      cluster = new MiniQJMHACluster.Builder(conf).build();
      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
      dfsCluster.waitActive();

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);

      dfsCluster.shutdownNameNode(1);

      // start rolling upgrade
      RollingUpgradeInfo info = dfs
          .rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());

      info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
      Assert.assertFalse(info.createdRollbackImages());

      dfsCluster.restartNameNode(1);

      queryForPreparation(dfs);

      // The NN should have a copy of the fsimage in case of rollbacks.
      Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage()
          .hasRollbackFSImage());
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

  public void testCheckpoint() throws IOException, InterruptedException {
    final Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);

    MiniQJMHACluster cluster = null;
    final Path foo = new Path("/foo");

    try {
      cluster = new MiniQJMHACluster.Builder(conf).build();
      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
      dfsCluster.waitActive();

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);

      // start rolling upgrade
      RollingUpgradeInfo info = dfs
          .rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());

      queryForPreparation(dfs);

      dfs.mkdirs(foo);
      long txid = dfs.rollEdits();
      Assert.assertTrue(txid > 0);

      int retries = 0;
      while (++retries < 5) {
        NNStorage storage = dfsCluster.getNamesystem(1).getFSImage()
            .getStorage();
        if (storage.getFsImageName(txid - 1) != null) {
          return;
        }
        Thread.sleep(1000);
      }
      Assert.fail("new checkpoint does not exist");

    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

   * rolling upgrade.
   */
  @Test
  public void testRollbackWithHAQJM() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniQJMHACluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");

    try {
      cluster = new MiniQJMHACluster.Builder(conf).build();
      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
      dfsCluster.waitActive();

      // let NN1 tail editlog every 1s
      dfsCluster.getConfiguration(1).setInt(
          DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
      dfsCluster.restartNameNode(1);

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
      dfs.mkdirs(foo);

      // start rolling upgrade
      RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());

      // create new directory
      dfs.mkdirs(bar);
      dfs.close();

      TestRollingUpgrade.queryForPreparation(dfs);

      // If the query returns true, both active and the standby NN should have
      // rollback fsimage ready.
      Assert.assertTrue(dfsCluster.getNameNode(0).getFSImage()
          .hasRollbackFSImage());
      Assert.assertTrue(dfsCluster.getNameNode(1).getFSImage()
          .hasRollbackFSImage());
     
      // rollback NN0
      dfsCluster.restartNameNode(0, true, "-rollingUpgrade",
          "rollback");
      // shutdown NN1
      dfsCluster.shutdownNameNode(1);
      dfsCluster.transitionToActive(0);

      // make sure /foo is still there, but /bar is not
      dfs = dfsCluster.getFileSystem(0);
      Assert.assertTrue(dfs.exists(foo));
      Assert.assertFalse(dfs.exists(bar));

      // check the details of NNStorage
      NNStorage storage = dfsCluster.getNamesystem(0).getFSImage()
          .getStorage();
      // segments:(startSegment, mkdir, start upgrade endSegment),
      // (startSegment, mkdir, endSegment)
      checkNNStorage(storage, 4, 7);

      // check storage in JNs
      for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
        File dir = cluster.getJournalCluster().getCurrentDir(0,
            MiniQJMHACluster.NAMESERVICE);
        checkJNStorage(dir, 5, 7);
      }

      // restart NN0 again to make sure we can start using the new fsimage and
      // the corresponding md5 checksum
      dfsCluster.restartNameNode(0);
      // start the rolling upgrade again to make sure we do not load upgrade
      // status after the rollback
      dfsCluster.transitionToActive(0);
      dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

   * JournalNodes.
   */
  @Test
  public void testUpgradeWithJournalNodes() throws IOException,
      URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
      Builder builder = new MiniQJMHACluster.Builder(conf);
      builder.getDfsBuilder()
          .numDataNodes(0);
      qjCluster = builder.build();

      MiniDFSCluster cluster = qjCluster.getDfsCluster();
     
      // No upgrade is in progress at the moment.
      checkJnPreviousDirExistence(qjCluster, false);
      checkClusterPreviousDirExistence(cluster, false);
      assertCTimesEqual(cluster);
     
      // Transition NN0 to active and do some FS ops.
      cluster.transitionToActive(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);
      assertTrue(fs.mkdirs(new Path("/foo1")));
     
      // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
      // flag.
      cluster.shutdownNameNode(1);
      cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
      cluster.restartNameNode(0, false);
     
      checkNnPreviousDirExistence(cluster, 0, true);
      checkNnPreviousDirExistence(cluster, 1, false);
      checkJnPreviousDirExistence(qjCluster, true);
     
      // NN0 should come up in the active state when given the -upgrade option,
      // so no need to transition it to active.
      assertTrue(fs.mkdirs(new Path("/foo2")));
     
      // Restart NN0 without the -upgrade flag, to make sure that works.
      cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
      cluster.restartNameNode(0, false);
     
      // Make sure we can still do FS ops after upgrading.
      cluster.transitionToActive(0);
      assertTrue(fs.mkdirs(new Path("/foo3")));
     
      // Now bootstrap the standby with the upgraded info.
      int rc = BootstrapStandby.run(
          new String[]{"-force"},
          cluster.getConfiguration(1));
      assertEquals(0, rc);
     
      // Now restart NN1 and make sure that we can do ops against that as well.
      cluster.restartNameNode(1);
      cluster.transitionToStandby(0);
      cluster.transitionToActive(1);
      assertTrue(fs.mkdirs(new Path("/foo4")));
     
      assertCTimesEqual(cluster);
    } finally {
      if (fs != null) {
        fs.close();
      }
      if (qjCluster != null) {
        qjCluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

  }

  @Test
  public void testFinalizeWithJournalNodes() throws IOException,
      URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
      Builder builder = new MiniQJMHACluster.Builder(conf);
      builder.getDfsBuilder()
          .numDataNodes(0);
      qjCluster = builder.build();

      MiniDFSCluster cluster = qjCluster.getDfsCluster();
     
      // No upgrade is in progress at the moment.
      checkJnPreviousDirExistence(qjCluster, false);
      checkClusterPreviousDirExistence(cluster, false);
      assertCTimesEqual(cluster);
     
      // Transition NN0 to active and do some FS ops.
      cluster.transitionToActive(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);
      assertTrue(fs.mkdirs(new Path("/foo1")));
     
      // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
      // flag.
      cluster.shutdownNameNode(1);
      cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
      cluster.restartNameNode(0, false);
     
      assertTrue(fs.mkdirs(new Path("/foo2")));
     
      checkNnPreviousDirExistence(cluster, 0, true);
      checkNnPreviousDirExistence(cluster, 1, false);
      checkJnPreviousDirExistence(qjCluster, true);
     
      // Now bootstrap the standby with the upgraded info.
      int rc = BootstrapStandby.run(
          new String[]{"-force"},
          cluster.getConfiguration(1));
      assertEquals(0, rc);
     
      cluster.restartNameNode(1);
     
      runFinalizeCommand(cluster);
     
      checkClusterPreviousDirExistence(cluster, false);
      checkJnPreviousDirExistence(qjCluster, false);
      assertCTimesEqual(cluster);
    } finally {
      if (fs != null) {
        fs.close();
      }
      if (qjCluster != null) {
        qjCluster.shutdown();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster

   * state that we're allowed to finalize.
   */
  @Test
  public void testFinalizeFromSecondNameNodeWithJournalNodes()
      throws IOException, URISyntaxException {
    MiniQJMHACluster qjCluster = null;
    FileSystem fs = null;
    try {
      Builder builder = new MiniQJMHACluster.Builder(conf);
      builder.getDfsBuilder()
          .numDataNodes(0);
      qjCluster = builder.build();

      MiniDFSCluster cluster = qjCluster.getDfsCluster();
     
      // No upgrade is in progress at the moment.
      checkJnPreviousDirExistence(qjCluster, false);
      checkClusterPreviousDirExistence(cluster, false);
      assertCTimesEqual(cluster);
     
      // Transition NN0 to active and do some FS ops.
      cluster.transitionToActive(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);
      assertTrue(fs.mkdirs(new Path("/foo1")));
     
      // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
      // flag.
      cluster.shutdownNameNode(1);
      cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
      cluster.restartNameNode(0, false);
     
      checkNnPreviousDirExistence(cluster, 0, true);
      checkNnPreviousDirExistence(cluster, 1, false);
      checkJnPreviousDirExistence(qjCluster, true);
     
      // Now bootstrap the standby with the upgraded info.
      int rc = BootstrapStandby.run(
          new String[]{"-force"},
          cluster.getConfiguration(1));
      assertEquals(0, rc);
     
      cluster.restartNameNode(1);
     
      // Make the second NN (not the one that initiated the upgrade) active when
      // the finalize command is run.
      cluster.transitionToStandby(0);
      cluster.transitionToActive(1);
     
      runFinalizeCommand(cluster);
     
      checkClusterPreviousDirExistence(cluster, false);
      checkJnPreviousDirExistence(qjCluster, false);
      assertCTimesEqual(cluster);
    } finally {
      if (fs != null) {
        fs.close();
      }
      if (qjCluster != null) {
        qjCluster.shutdown();
      }
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.