Package org.apache.hadoop.hbase.io.hfile

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig


    String root_dir = TEST_UTIL.getDataTestDir().toString();
    Path p = new Path(root_dir, "test");

    Configuration conf = TEST_UTIL.getConfiguration();
    FileSystem fs = FileSystem.get(conf);
    CacheConfig cacheConf = new CacheConfig(conf);

    HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
        .withPath(fs, p)
        .withBlockSize(1024)
        .create();
View Full Code Here


  public void testHalfScanner() throws IOException {
      String root_dir = TEST_UTIL.getDataTestDir().toString();
      Path p = new Path(root_dir, "test");
      Configuration conf = TEST_UTIL.getConfiguration();
      FileSystem fs = FileSystem.get(conf);
      CacheConfig cacheConf = new CacheConfig(conf);

      HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
              .withPath(fs, p)
              .withBlockSize(1024)
              .create();
View Full Code Here

      FileStatus[] hfiles = fs.listStatus(cf.getPath());
      for (FileStatus hfile : hfiles) {
        byte[] start, end;
        HFile.Reader hf = null;
        try {
          CacheConfig cacheConf = new CacheConfig(getConf());
          hf = HFile.createReader(fs, hfile.getPath(), cacheConf);
          hf.loadFileInfo();
          KeyValue startKv = KeyValue.createKeyValueFromKey(hf.getFirstKey());
          start = startKv.getRow();
          KeyValue endKv = KeyValue.createKeyValueFromKey(hf.getLastKey());
View Full Code Here

  @Test
  public void testEncodedSeeker() throws IOException {
    System.err.println("Testing encoded seekers for encoding " + encoding);
    LruBlockCache cache =
      (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();
    // Need to disable default row bloom filter for this test to pass.
    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
        setDataBlockEncoding(encoding).
        setEncodeOnDisk(encodeOnDisk).
View Full Code Here

      super(conf, fs, mf, totalRows);
    }

    @Override
    void setUp() throws Exception {
      reader = HFile.createReader(this.fs, this.mf, new CacheConfig(this.conf));
      this.reader.loadFileInfo();
    }
View Full Code Here

    // Verify the block types of interest were cached on write
    readStoreFile(writer.getPath());
  }

  private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache();
    StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
        BloomType.ROWCOL, null);
    HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
    try {
      // Open a scanner with (on read) caching disabled
View Full Code Here

    // Find a home for our files (regiondir ("7e0102") and familyname).
    Path baseDir = new Path(new Path(this.testDir, "7e0102"),"twoCOWEOC");

    // Grab the block cache and get the initial hit/miss counts
    BlockCache bc = new CacheConfig(conf).getBlockCache();
    assertNotNull(bc);
    CacheStats cs = bc.getStats();
    long startHit = cs.getHitCount();
    long startMiss = cs.getMissCount();
    long startEvicted = cs.getEvictedCount();

    // Let's write a StoreFile with three blocks, with cache on write off
    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
    CacheConfig cacheConf = new CacheConfig(conf);
    Path pathCowOff = new Path(baseDir, "123456789");
    StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
    LOG.debug(hsf.getPath().toString());

    // Read this file, we should see 3 misses
    StoreFile.Reader reader = hsf.createReader();
    reader.loadFileInfo();
    StoreFileScanner scanner = reader.getStoreFileScanner(true, true);
    scanner.seek(KeyValue.LOWESTKEY);
    while (scanner.next() != null);
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss + 3, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startMiss += 3;
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());

    // Now write a StoreFile with three blocks, with cache on write on
    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
    cacheConf = new CacheConfig(conf);
    Path pathCowOn = new Path(baseDir, "123456788");
    writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
    hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

    // Read this file, we should see 3 hits
    reader = hsf.createReader();
    scanner = reader.getStoreFileScanner(true, true);
    scanner.seek(KeyValue.LOWESTKEY);
    while (scanner.next() != null);
    assertEquals(startHit + 3, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startHit += 3;
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());

    // Let's read back the two files to ensure the blocks exactly match
    hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
    StoreFile.Reader readerOne = hsf.createReader();
    readerOne.loadFileInfo();
    StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
    scannerOne.seek(KeyValue.LOWESTKEY);
    hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
    StoreFile.Reader readerTwo = hsf.createReader();
    readerTwo.loadFileInfo();
    StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
    scannerTwo.seek(KeyValue.LOWESTKEY);
    KeyValue kv1 = null;
    KeyValue kv2 = null;
    while ((kv1 = scannerOne.next()) != null) {
      kv2 = scannerTwo.next();
      assertTrue(kv1.equals(kv2));
      assertTrue(Bytes.compareTo(
          kv1.getBuffer(), kv1.getKeyOffset(), kv1.getKeyLength(),
          kv2.getBuffer(), kv2.getKeyOffset(), kv2.getKeyLength()) == 0);
      assertTrue(Bytes.compareTo(
          kv1.getBuffer(), kv1.getValueOffset(), kv1.getValueLength(),
          kv2.getBuffer(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
    }
    assertNull(scannerTwo.next());
    assertEquals(startHit + 6, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startHit += 6;
    scannerOne.close();
    readerOne.close(cacheConf.shouldEvictOnClose());
    scannerTwo.close();
    readerTwo.close(cacheConf.shouldEvictOnClose());

    // Let's close the first file with evict on close turned on
    conf.setBoolean("hbase.rs.evictblocksonclose", true);
    cacheConf = new CacheConfig(conf);
    hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
    reader = hsf.createReader();
    reader.close(cacheConf.shouldEvictOnClose());

    // We should have 3 new evictions
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted + 3, cs.getEvictedCount());
    startEvicted += 3;

    // Let's close the second file with evict on close turned off
    conf.setBoolean("hbase.rs.evictblocksonclose", false);
    cacheConf = new CacheConfig(conf);
    hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
    reader = hsf.createReader();
    reader.close(cacheConf.shouldEvictOnClose());

    // We expect no changes
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
View Full Code Here

        DataBlockEncoding.FAST_DIFF;
    HFileDataBlockEncoder dataBlockEncoder =
        new HFileDataBlockEncoderImpl(
            dataBlockEncoderAlgo,
            dataBlockEncoderAlgo);
    cacheConf = new CacheConfig(conf);
    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
        HConstants.DEFAULT_BLOCKSIZE)
            .withFilePath(path)
            .withDataBlockEncoder(dataBlockEncoder)
            .withMaxKeyCount(2000)
View Full Code Here

   * Create an HFile with the given number of rows with a specified value.
   */
  public static void createHFile(FileSystem fs, Path path, byte[] family,
      byte[] qualifier, byte[] value, int numRows) throws IOException {
    HFile.Writer writer = HFile
        .getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withBlockSize(BLOCKSIZE)
        .withCompression(COMPRESSION)
        .withComparator(KeyValue.KEY_COMPARATOR)
        .create();
View Full Code Here

    // Verify the block types of interest were cached on write
    readStoreFile(writer.getPath());
  }

  private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache();
    StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
        BloomType.ROWCOL, null);
    store.passSchemaMetricsTo(sf);
    HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
    try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.hfile.CacheConfig

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.