Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.StoreFile$Writer


        LOG.debug("Adding snapshot references for " + storeFiles  + " hfiles");
      }

      // 2.2. iterate through all the store's files and create "references".
      for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
        StoreFile storeFile = storeFiles.get(i);
        monitor.rethrowException();

        // create "reference" to this store file.
        LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath());
        visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
      }
      visitor.familyClose(regionData, familyData);
    }
    visitor.regionClose(regionData);
  }
View Full Code Here


  @Test
  public void testWithReferences() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeCompactor sc = mock(StripeCompactor.class);
    StoreFile ref = createFile();
    when(ref.isReference()).thenReturn(true);
    StripeInformationProvider si = mock(StripeInformationProvider.class);
    Collection<StoreFile> sfs = al(ref, createFile());
    when(si.getStorefiles()).thenReturn(sfs);

    assertTrue(policy.needsCompactions(si, al()));
View Full Code Here

    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
    long now = defaultTtl + 2;
    edge.setValue(now);
    EnvironmentEdgeManager.injectEdge(edge);
    try {
      StoreFile expiredFile = createFile(), notExpiredFile = createFile();
      when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
      when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
      List<StoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
      List<StoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);
      List<StoreFile> mixed = Lists.newArrayList(expiredFile, notExpiredFile);
View Full Code Here

    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
    long now = defaultTtl + 2;
    edge.setValue(now);
    EnvironmentEdgeManager.injectEdge(edge);
    try {
      StoreFile expiredFile = createFile(), notExpiredFile = createFile();
      when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
      when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
      List<StoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
      List<StoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);

      StripeCompactionPolicy policy =
View Full Code Here

    when(si.getLevel0Files()).thenReturn(l0Files);
    return si;
  }

  private static StoreFile createFile(long size) throws Exception {
    StoreFile sf = mock(StoreFile.class);
    when(sf.getPath()).thenReturn(new Path("moo"));
    StoreFile.Reader r = mock(StoreFile.Reader.class);
    when(r.getEntries()).thenReturn(size);
    when(r.length()).thenReturn(size);
    when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
    when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
    when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong())).thenReturn(
      mock(StoreFileScanner.class));
    when(sf.getReader()).thenReturn(r);
    when(sf.createReader()).thenReturn(r);
    return sf;
  }
View Full Code Here

      ArrayList<StoreFile> candidateFiles, final List<StoreFile> filesCompacting) {
    // candidates = all storefiles not already in compaction queue
    if (!filesCompacting.isEmpty()) {
      // exclude all files older than the newest file we're currently
      // compacting. this allows us to preserve contiguity (HBASE-2856)
      StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
      int idx = candidateFiles.indexOf(last);
      Preconditions.checkArgument(idx != -1);
      candidateFiles.subList(0, idx + 1).clear();
    }
    return candidateFiles;
View Full Code Here

    // get store file sizes for incremental compacting selection.
    final int countOfFiles = candidates.size();
    long[] fileSizes = new long[countOfFiles];
    long[] sumSize = new long[countOfFiles];
    for (int i = countOfFiles - 1; i >= 0; --i) {
      StoreFile file = candidates.get(i);
      fileSizes[i] = file.getReader().length();
      // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
      int tooFar = i + comConf.getMaxFilesToCompact() - 1;
      sumSize[i] = fileSizes[i]
        + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0)
        - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0);
View Full Code Here

    if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
      // Major compaction time has elapsed.
      long cfTtl = this.storeConfigInfo.getStoreFileTtl();
      if (filesToCompact.size() == 1) {
        // Single file
        StoreFile sf = filesToCompact.iterator().next();
        Long minTimestamp = sf.getMinimumTimestamp();
        long oldest = (minTimestamp == null)
            ? Long.MIN_VALUE
            : now - minTimestamp.longValue();
        if (sf.isMajorCompaction() &&
            (cfTtl == HConstants.FOREVER || oldest < cfTtl)) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Skipping major compaction of " + this +
                " because one (major) compacted file only and oldestTime " +
                oldest + "ms is < ttl=" + cfTtl);
View Full Code Here

  }

  private static CompactionRequest createDummyRequest() throws Exception {
    // "Files" are totally unused, it's Scanner class below that gives compactor fake KVs.
    // But compaction depends on everything under the sun, so stub everything with dummies.
    StoreFile sf = mock(StoreFile.class);
    StoreFile.Reader r = mock(StoreFile.Reader.class);
    when(r.length()).thenReturn(1L);
    when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
    when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
    when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong()))
      .thenReturn(mock(StoreFileScanner.class));
    when(sf.getReader()).thenReturn(r);
    when(sf.createReader()).thenReturn(r);
    return new CompactionRequest(Arrays.asList(sf));
  }
View Full Code Here

  @Test
  public void testWithReferences() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeCompactor sc = mock(StripeCompactor.class);
    StoreFile ref = createFile();
    when(ref.isReference()).thenReturn(true);
    StripeInformationProvider si = mock(StripeInformationProvider.class);
    Collection<StoreFile> sfs = al(ref, createFile());
    when(si.getStorefiles()).thenReturn(sfs);

    assertTrue(policy.needsCompactions(si, al()));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.StoreFile$Writer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.