Examples of HTable


Examples of org.apache.hadoop.hbase.client.HTable

      HColumnDescriptor hcd = new HColumnDescriptor(family)
          .setMaxVersions(numVersions);
      desc.addFamily(hcd);
    }
    getHBaseAdmin().createTable(desc);
    return new HTable(c, tableName);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

      HColumnDescriptor hcd = new HColumnDescriptor(family)
          .setMaxVersions(numVersions);
      desc.addFamily(hcd);
    }
    getHBaseAdmin().createTable(desc);
    return new HTable(new Configuration(getConfiguration()), tableName);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

          .setMaxVersions(numVersions)
          .setBlocksize(blockSize);
      desc.addFamily(hcd);
    }
    getHBaseAdmin().createTable(desc);
    return new HTable(new Configuration(getConfiguration()), tableName);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

          .setMaxVersions(numVersions[i]);
      desc.addFamily(hcd);
      i++;
    }
    getHBaseAdmin().createTable(desc);
    return new HTable(new Configuration(getConfiguration()), tableName);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

   * @param tableName existing table
   * @return HTable to that new table
   * @throws IOException
   */
  public HTable truncateTable(byte [] tableName) throws IOException {
    HTable table = new HTable(getConfiguration(), tableName);
    Scan scan = new Scan();
    ResultScanner resScan = table.getScanner(scan);
    for(Result res : resScan) {
      Delete del = new Delete(res.getRow());
      table.delete(del);
    }
    resScan = table.getScanner(scan);
    resScan.close();
    return table;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

 
  public int createMultiRegions(final Configuration c, final HTable table,
          final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
  throws IOException {
    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
    HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
    HTableDescriptor htd = table.getTableDescriptor();
    if(!htd.hasFamily(columnFamily)) {
      HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
      htd.addFamily(hcd);
    }
    // remove empty region - this is tricky as the mini cluster during the test
    // setup already has the "<tablename>,,123456789" row with an empty start
    // and end key. Adding the custom regions below adds those blindly,
    // including the new start region from empty to "bbb". lg
    List<byte[]> rows = getMetaTableRows(htd.getName());
    String regionToDeleteInFS = table
        .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
        .getRegionInfo().getEncodedName();
    List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
    // add custom ones
    int count = 0;
    for (int i = 0; i < startKeys.length; i++) {
      int j = (i + 1) % startKeys.length;
      HRegionInfo hri = new HRegionInfo(table.getTableName(),
        startKeys[i], startKeys[j]);
      Put put = new Put(hri.getRegionName());
      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
        Writables.getBytes(hri));
      meta.put(put);
      LOG.info("createMultiRegions: inserted " + hri.toString());
      newRegions.add(hri);
      count++;
    }
    // see comment above, remove "old" (or previous) single region
    for (byte[] row : rows) {
      LOG.info("createMultiRegions: deleting meta row -> " +
        Bytes.toStringBinary(row));
      meta.delete(new Delete(row));
    }
    if (cleanupFS) {
      // see HBASE-7417 - this confused TestReplication
      // remove the "old" region from FS
      Path tableDir = new Path(getDefaultRootDirPath().toString()
          + System.getProperty("file.separator") + htd.getNameAsString()
          + System.getProperty("file.separator") + regionToDeleteInFS);
      getDFSCluster().getFileSystem().delete(tableDir);
    }
    // flush cache of regions
    HConnection conn = table.getConnection();
    conn.clearRegionCache();
    // assign all the new regions IF table is enabled.
    HBaseAdmin admin = getHBaseAdmin();
    if (admin.isTableEnabled(table.getTableName())) {
      for(HRegionInfo hri : newRegions) {
        admin.assign(hri.getRegionName());
      }
    }

    meta.close();

    return count;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

   * @throws IOException
   */
  public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
      final HTableDescriptor htd, byte [][] startKeys)
  throws IOException {
    HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
    List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
    // add custom ones
    for (int i = 0; i < startKeys.length; i++) {
      int j = (i + 1) % startKeys.length;
      HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
          startKeys[j]);
      Put put = new Put(hri.getRegionName());
      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
        Writables.getBytes(hri));
      meta.put(put);
      LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
      newRegions.add(hri);
    }

    meta.close();
    return newRegions;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

   *
   * @throws IOException When reading the rows fails.
   */
  public List<byte[]> getMetaTableRows() throws IOException {
    // TODO: Redo using MetaReader class
    HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
    List<byte[]> rows = new ArrayList<byte[]>();
    ResultScanner s = t.getScanner(new Scan());
    for (Result result : s) {
      LOG.info("getMetaTableRows: row -> " +
        Bytes.toStringBinary(result.getRow()));
      rows.add(result.getRow());
    }
    s.close();
    t.close();
    return rows;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

   *
   * @throws IOException When reading the rows fails.
   */
  public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
    // TODO: Redo using MetaReader.
    HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
    List<byte[]> rows = new ArrayList<byte[]>();
    ResultScanner s = t.getScanner(new Scan());
    for (Result result : s) {
      byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
      if (val == null) {
        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
        // TODO figure out what to do for this new hosed case.
        continue;
      }
      HRegionInfo info = Writables.getHRegionInfo(val);
      if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
        LOG.info("getMetaTableRows: row -> " +
            Bytes.toStringBinary(result.getRow()) + info);
        rows.add(result.getRow());
      }
    }
    s.close();
    t.close();
    return rows;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.HTable

     // Now closing & waiting to be sure that the clients get it.
     monitor.close();

    if (checkStatus) {
      new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.