Package org.apache.hadoop.hbase.mapreduce

Examples of org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles


      Path familyDir = new Path(dir, Bytes.toString(A));

      createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);

      //Bulk load
      new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName));

      verifyMethodResult(SimpleRegionObserver.class,
          new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
          tableName,
          new Boolean[] {true, true}
View Full Code Here


    Path familyDir = new Path(dir, Bytes.toString(A));

    createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);

    //Bulk load
    new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName));

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
        tableName,
        new Boolean[] {true, true}
View Full Code Here

   * @param conf Configuration object for the HFile loader.
   * @return the new HFile loader.
   */
  private static LoadIncrementalHFiles createHFileLoader(Configuration conf) {
    try {
      return new LoadIncrementalHFiles(conf); // throws Exception
    } catch (Exception exn) {
      throw new InternalKijiError(exn);
    }
  }
View Full Code Here

   *
   * @param hfilePath Path of the HFiles to load.
   * @throws IOException on I/O error.
   */
  public void bulkLoad(Path hfilePath) throws IOException {
    final LoadIncrementalHFiles loader = createHFileLoader(mConf);
    try {
      // LoadIncrementalHFiles.doBulkLoad() requires an HTable instance, not an HTableInterface:
      final HTable htable = (HTable) mHTableFactory.create(mConf, mHBaseTableName);
      try {
        final List<Path> hfilePaths = Lists.newArrayList();

        // Try to find any hfiles for partitions within the passed in path
        final FileStatus[] hfiles = FileSystem.get(mConf).globStatus(new Path(hfilePath, "*"));
        for (FileStatus hfile : hfiles) {
          String partName = hfile.getPath().getName();
          if (!partName.startsWith("_") && partName.endsWith(".hfile")) {
            Path partHFile = new Path(hfilePath, partName);
            hfilePaths.add(partHFile);
          }
        }
        if (hfilePaths.isEmpty()) {
          // If we didn't find any parts, add in the passed in parameter
          hfilePaths.add(hfilePath);
        }
        for (Path path : hfilePaths) {
          loader.doBulkLoad(path, htable);
          LOG.info("Successfully loaded: " + path.toString());
        }
      } finally {
        htable.close();
      }
View Full Code Here

      Path familyDir = new Path(dir, Bytes.toString(A));

      createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);

      //Bulk load
      new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName));

      verifyMethodResult(SimpleRegionObserver.class,
          new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
          tableName,
          new Boolean[] {true, true}
View Full Code Here

      HTable table = new HTable(conf, tableName);
      try {
        HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
        TEST_UTIL.waitTableEnabled(admin, tableName.getName());
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here

                  LOG.error("Import job failed, check JobTracker for details");
                  return false;
              }
 
              LOG.info("Loading HFiles from {}", outputPath);
              LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
              loader.doBulkLoad(outputPath, htable);
              htable.close();
 
              LOG.info("Incremental load complete for table=" + tableName);
 
              LOG.info("Removing output directory {}", outputPath);
View Full Code Here

      setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx"));

      HTable table = new HTable(conf, tableName);
      try {
        TEST_UTIL.waitTableAvailable(tableName, 30000);
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(loadPath, table);
      } finally {
        table.close();
      }
    }
View Full Code Here

    // Before we can load the HFiles, we need to set the permissions so that
    // HBase has write access to testDir's contents
    chmod(testDir.toString());

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
                 expectedRows, HBaseTestUtil.countRows(table));
View Full Code Here

            LOG.error("Import job failed, check JobTracker for details");
            return 1;
        }

        LOG.info("Loading HFiles from {}", outputPath);
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(getConf());
        loader.doBulkLoad(outputPath, htable);
        htable.close();

        LOG.info("Incremental load complete");

        LOG.info("Removing output directory {}", outputPath);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.