Package org.htrace

Examples of org.htrace.TraceScope


      }
      if (!closeBarrier.beginOp()) {
        LOG.debug("HLog closing. Skipping rolling of writer");
        return regionsToFlush;
      }
      TraceScope scope = Trace.startSpan("FSHLog.rollWriter");
      try {
        Path oldPath = getOldPath();
        Path newPath = getNewPath();
        // Any exception from here on is catastrophic, non-recoverable so we currently abort.
        FSHLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
        FSDataOutputStream nextHdfsOut = null;
        if (nextWriter instanceof ProtobufLogWriter) {
          nextHdfsOut = ((ProtobufLogWriter)nextWriter).getStream();
          // If a ProtobufLogWriter, go ahead and try and sync to force setup of pipeline.
          // If this fails, we just keep going.... it is an optimization, not the end of the world.
          preemptiveSync((ProtobufLogWriter)nextWriter);
        }
        tellListenersAboutPreLogRoll(oldPath, newPath);
        // NewPath could be equal to oldPath if replaceWriter fails.
        newPath = replaceWriter(oldPath, newPath, nextWriter, nextHdfsOut);
        tellListenersAboutPostLogRoll(oldPath, newPath);
        // Can we delete any of the old log files?
        if (getNumRolledLogFiles() > 0) {
          cleanOldLogs();
          regionsToFlush = findRegionsToForceFlush();
        }
      } finally {
        closeBarrier.endOp();
        assert scope == NullScope.INSTANCE || !scope.isDetached();
        scope.close();
      }
      return regionsToFlush;
    } finally {
      rollWriterLock.unlock();
    }
View Full Code Here


    // constructor BEFORE the ring buffer is set running so it is null on first time through
    // here; allow for that.
    SyncFuture syncFuture = null;
    SafePointZigZagLatch zigzagLatch = (this.ringBufferEventHandler == null)?
      null: this.ringBufferEventHandler.attainSafePoint();
    TraceScope scope = Trace.startSpan("FSHFile.replaceWriter");
    try {
      // Wait on the safe point to be achieved.  Send in a sync in case nothing has hit the
      // ring buffer between the above notification of writer that we want it to go to
      // 'safe point' and then here where we are waiting on it to attain safe point.  Use
      // 'sendSync' instead of 'sync' because we do not want this thread to block waiting on it
      // to come back.  Cleanup this syncFuture down below after we are ready to run again.
      try {
        if (zigzagLatch != null) {
          Trace.addTimelineAnnotation("awaiting safepoint");
          syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer());
        }
      } catch (FailedSyncBeforeLogCloseException e) {
        if (isUnflushedEntries()) throw e;
        // Else, let is pass through to the close.
        LOG.warn("Failed last sync but no outstanding unsync edits so falling through to close; " +
          e.getMessage());
      }

      // It is at the safe point.  Swap out writer from under the blocked writer thread.
      // TODO: This is close is inline with critical section.  Should happen in background?
      try {
        if (this.writer != null) {
          Trace.addTimelineAnnotation("closing writer");
          this.writer.close();
          Trace.addTimelineAnnotation("writer closed");
        }
        this.closeErrorCount.set(0);
      } catch (IOException ioe) {
        int errors = closeErrorCount.incrementAndGet();
        if (!isUnflushedEntries() && (errors <= this.closeErrorsTolerated)) {
          LOG.warn("Riding over failed WAL close of " + oldPath + ", cause=\"" +
            ioe.getMessage() + "\", errors=" + errors +
            "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK");
        } else {
          throw ioe;
        }
      }
      this.writer = nextWriter;
      this.hdfs_out = nextHdfsOut;
      int oldNumEntries = this.numEntries.get();
      this.numEntries.set(0);
      if (oldPath != null) {
        this.byWalRegionSequenceIds.put(oldPath, this.highestRegionSequenceIds);
        this.highestRegionSequenceIds = new HashMap<byte[], Long>();
        long oldFileLen = this.fs.getFileStatus(oldPath).getLen();
        this.totalLogSize.addAndGet(oldFileLen);
        LOG.info("Rolled WAL " + FSUtils.getPath(oldPath) + " with entries=" + oldNumEntries +
          ", filesize=" + StringUtils.byteDesc(oldFileLen) + "; new WAL " +
          FSUtils.getPath(newPath));
      } else {
        LOG.info("New WAL " + FSUtils.getPath(newPath));
      }
    } catch (InterruptedException ie) {
      // Perpetuate the interrupt
      Thread.currentThread().interrupt();
    } catch (IOException e) {
      long count = getUnflushedEntriesCount();
      LOG.error("Failed close of HLog writer " + oldPath + ", unflushedEntries=" + count, e);
      throw new FailedLogCloseException(oldPath + ", unflushedEntries=" + count, e);
    } finally {
      try {
        // Let the writer thread go regardless, whether error or not.
        if (zigzagLatch != null) {
          zigzagLatch.releaseSafePoint();
          // It will be null if we failed our wait on safe point above.
          if (syncFuture != null) blockOnSync(syncFuture);
        }
      } finally {
        scope.close();
      }
    }
    return newPath;
  }
View Full Code Here

  throws IOException {
    if (!this.enabled) return this.highestUnsyncedSequence;
    if (this.closed) throw new IOException("Cannot append; log is closed");
    // Make a trace scope for the append.  It is closed on other side of the ring buffer by the
    // single consuming thread.  Don't have to worry about it.
    TraceScope scope = Trace.startSpan("FSHLog.append");

    // This is crazy how much it takes to make an edit.  Do we need all this stuff!!!!????  We need
    // all this to make a key and then below to append the edit, we need to carry htd, info,
    // etc. all over the ring buffer.
    FSWALEntry entry = null;
    long sequence = this.disruptor.getRingBuffer().next();
    try {
      RingBufferTruck truck = this.disruptor.getRingBuffer().get(sequence);
      // Construction of FSWALEntry sets a latch.  The latch is thrown just after we stamp the
      // edit with its edit/sequence id.  The below entry.getRegionSequenceId will wait on the
      // latch to be thrown.  TODO: reuse FSWALEntry as we do SyncFuture rather create per append.
      entry = new FSWALEntry(sequence, key, edits, sequenceId, inMemstore, htd, hri, memstoreCells);
      truck.loadPayload(entry, scope.detach());
    } finally {
      this.disruptor.getRingBuffer().publish(sequence);
    }
    // doSync is set in tests.  Usually we arrive in here via appendNoSync w/ the sync called after
    // all edits on a handler have been added.
View Full Code Here

    return this.getNumCurrentReplicas != null;
  }

  @Override
  public void hsync() throws IOException {
    TraceScope scope = Trace.startSpan("FSHLog.hsync");
    try {
      scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
    } finally {
      assert scope == NullScope.INSTANCE || !scope.isDetached();
      scope.close();
    }
  }
View Full Code Here

    }
  }

  @Override
  public void hflush() throws IOException {
    TraceScope scope = Trace.startSpan("FSHLog.hflush");
    try {
      scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
    } finally {
      assert scope == NullScope.INSTANCE || !scope.isDetached();
      scope.close();
    }
  }
View Full Code Here

      int numAfterDone = 0;
      int resetCount = 0;
      // Keep trying until the rs is back up and we've gotten a put through
      while (numAfterDone < maxIterations) {
        long start = System.nanoTime();
        TraceScope scope = null;
        try {
          scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE);
          boolean actionResult = doAction();
          if (actionResult && future.isDone()) {
            numAfterDone++;
          }

        // the following Exceptions derive from DoNotRetryIOException. They are considered
        // fatal for the purpose of this test. If we see one of these, it means something is
        // broken and needs investigation. This is not the case for all children of DNRIOE.
        // Unfortunately, this is an explicit enumeration and will need periodically refreshed.
        // See HBASE-9655 for further discussion.
        } catch (AccessDeniedException e) {
          throw e;
        } catch (CoprocessorException e) {
          throw e;
        } catch (FatalConnectionException e) {
          throw e;
        } catch (InvalidFamilyOperationException e) {
          throw e;
        } catch (NamespaceExistException e) {
          throw e;
        } catch (NamespaceNotFoundException e) {
          throw e;
        } catch (NoSuchColumnFamilyException e) {
          throw e;
        } catch (TableExistsException e) {
          throw e;
        } catch (TableNotFoundException e) {
          throw e;
        } catch (RetriesExhaustedException e){
          throw e;

        // Everything else is potentially recoverable on the application side. For instance, a CM
        // action kills the RS that hosted a scanner the client was using. Continued use of that
        // scanner should be terminated, but a new scanner can be created and the read attempted
        // again.
        } catch (Exception e) {
          resetCount++;
          if (resetCount < maxIterations) {
            LOG.info("Non-fatal exception while running " + this.toString()
              + ". Resetting loop counter", e);
            numAfterDone = 0;
          } else {
            LOG.info("Too many unexpected Exceptions. Aborting.", e);
            throw e;
          }
        } finally {
          if (scope != null) {
            scope.close();
          }
        }
        result.addResult(System.nanoTime() - start, scope.getSpan());
      }
      return result;
    }
View Full Code Here

      service.submit(runnable);
    }
  }

  private void createTable() throws IOException {
    TraceScope createScope = null;
    try {
      createScope = Trace.startSpan("createTable", Sampler.ALWAYS);
      util.createTable(tableName, familyName);
    } finally {
      if (createScope != null) createScope.close();
    }
  }
View Full Code Here

      if (createScope != null) createScope.close();
    }
  }

  private void deleteTable() throws IOException {
    TraceScope deleteScope = null;

    try {
      if (admin.tableExists(tableName)) {
        deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS);
        util.deleteTable(tableName);
      }
    } finally {
      if (deleteScope != null) deleteScope.close();
    }
  }
View Full Code Here

  private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
    LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<Long>(25000);
    HTable ht = new HTable(util.getConfiguration(), this.tableName);
    byte[] value = new byte[300];
    for (int x = 0; x < 5000; x++) {
      TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
      try {
        ht.setAutoFlush(false, true);
        for (int i = 0; i < 5; i++) {
          long rk = random.nextLong();
          rowKeys.add(rk);
          Put p = new Put(Bytes.toBytes(rk));
          for (int y = 0; y < 10; y++) {
            random.nextBytes(value);
            p.add(familyName, Bytes.toBytes(random.nextLong()), value);
          }
          ht.put(p);
        }
        if ((x % 1000) == 0) {
          admin.flush(tableName);
        }
      } finally {
        traceScope.close();
      }
    }
    admin.flush(tableName);
    return rowKeys;
  }
View Full Code Here

    FSUtils.setFsDefault(getConf(), FSUtils.getRootDir(getConf()));
    FileSystem fs = FileSystem.get(getConf());
    LOG.info("FileSystem: " + fs);

    SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
    TraceScope scope = Trace.startSpan("HLogPerfEval", trace ? Sampler.ALWAYS : Sampler.NEVER);

    try {
      if (rootRegionDir == null) {
        rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("HLogPerformanceEvaluation");
      }
      rootRegionDir = rootRegionDir.makeQualified(fs);
      cleanRegionRootDir(fs, rootRegionDir);
      // Initialize Table Descriptor
      HTableDescriptor htd = createHTableDescriptor(numFamilies);
      final long whenToRoll = roll;
      final HLog hlog = new FSHLog(fs, rootRegionDir, "wals", getConf()) {

        @Override
        public void postSync(final long timeInNanos, final int handlerSyncs) {
          super.postSync(timeInNanos, handlerSyncs);
          syncMeter.mark();
          syncHistogram.update(timeInNanos);
          syncCountHistogram.update(handlerSyncs);
        }

        @Override
        public long postAppend(final HLog.Entry entry, final long elapsedTime) {
          long size = super.postAppend(entry, elapsedTime);
          appendMeter.mark(size);
          return size;
        }
      };
      hlog.registerWALActionsListener(new WALActionsListener() {
        private int appends = 0;

        @Override
        public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
            WALEdit logEdit) {
          this.appends++;
          if (this.appends % whenToRoll == 0) {
            LOG.info("Rolling after " + appends + " edits");
            // We used to do explicit call to rollWriter but changed it to a request
            // to avoid dead lock (there are less threads going on in this class than
            // in the regionserver -- regionserver does not have the issue).
            ((FSHLog)hlog).requestLogRoll();
          }
        }

        @Override
        public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit) {
        }

        @Override
        public void preLogRoll(Path oldPath, Path newPath) throws IOException {
        }

        @Override
        public void preLogArchive(Path oldPath, Path newPath) throws IOException {
        }

        @Override
        public void postLogRoll(Path oldPath, Path newPath) throws IOException {
        }

        @Override
        public void postLogArchive(Path oldPath, Path newPath) throws IOException {
        }

        @Override
        public void logRollRequested() {
        }

        @Override
        public void logCloseRequested() {
        }
      });
      hlog.rollWriter();
      HRegion region = null;

      try {
        region = openRegion(fs, rootRegionDir, htd, hlog);
        ConsoleReporter.enable(this.metrics, 30, TimeUnit.SECONDS);
        long putTime =
          runBenchmark(Trace.wrap(
              new HLogPutBenchmark(region, htd, numIterations, noSync, syncInterval, traceFreq)),
            numThreads);
        logBenchmarkResult("Summary: threads=" + numThreads + ", iterations=" + numIterations +
          ", syncInterval=" + syncInterval, numIterations * numThreads, putTime);
       
        if (region != null) {
          closeRegion(region);
          region = null;
        }
        if (verify) {
          Path dir = ((FSHLog) hlog).getDir();
          long editCount = 0;
          FileStatus [] fsss = fs.listStatus(dir);
          if (fsss.length == 0) throw new IllegalStateException("No WAL found");
          for (FileStatus fss: fsss) {
            Path p = fss.getPath();
            if (!fs.exists(p)) throw new IllegalStateException(p.toString());
            editCount += verify(p, verbose);
          }
          long expected = numIterations * numThreads;
          if (editCount != expected) {
            throw new IllegalStateException("Counted=" + editCount + ", expected=" + expected);
          }
        }
      } finally {
        if (region != null) closeRegion(region);
        // Remove the root dir for this test region
        if (cleanup) cleanRegionRootDir(fs, rootRegionDir);
      }
    } finally {
      // We may be called inside a test that wants to keep on using the fs.
      if (!noclosefs) fs.close();
      scope.close();
      if (receiverHost != null) receiverHost.closeReceivers();
    }

    return(0);
  }
View Full Code Here

TOP

Related Classes of org.htrace.TraceScope

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.