Package org.apache.hadoop.hbase.regionserver.wal.HLog

Examples of org.apache.hadoop.hbase.regionserver.wal.HLog.Reader



      /* Produce a mock reader that generates fake entries */
      protected Reader getReader(FileSystem fs, Path curLogFile, Configuration conf)
      throws IOException {
        Reader mockReader = Mockito.mock(Reader.class);
        Mockito.doAnswer(new Answer<HLog.Entry>() {
          int index = 0;

          @Override
          public HLog.Entry answer(InvocationOnMock invocation) throws Throwable {
View Full Code Here


       Path logPath = log.getPath();
        long logLength = log.getLen();
        splitSize += logLength;
        logAndReport("Splitting hlog " + (i++ + 1) + " of " + logfiles.length
            + ": " + logPath + ", length=" + logLength);
        Reader in;
        try {
          in = getReader(fs, log, conf, skipErrors);
          if (in != null) {
            parseHLog(in, logPath, entryBuffers, fs, conf, skipErrors);
            try {
              in.close();
            } catch (IOException e) {
              LOG.warn("Close log reader threw exception -- continuing",
                  e);
            }
          }
View Full Code Here

      conf.getInt("hbase.splitlog.report.openedfiles", 3);
    Path logPath = logfile.getPath();
    long logLength = logfile.getLen();
    LOG.info("Splitting hlog: " + logPath + ", length=" + logLength);
    status.setStatus("Opening log file");
    Reader in = null;
    try {
      in = getReader(fs, logfile, conf, skipErrors);
    } catch (CorruptedLogFileException e) {
      LOG.warn("Could not get reader, corrupted log file " + logPath, e);
      ZKSplitLog.markCorrupted(rootDir, tmpname, fs);
      isCorrupted = true;
    }
    if (in == null) {
      status.markComplete("Was nothing to split in log file");
      LOG.warn("Nothing to split in log file " + logPath);
      return true;
    }
    long t = EnvironmentEdgeManager.currentTimeMillis();
    long last_report_at = t;
    if (reporter != null && reporter.progress() == false) {
      status.markComplete("Failed: reporter.progress asked us to terminate");
      return false;
    }
    // Report progress every so many edits and/or files opened (opening a file
    // takes a bit of time).
    int editsCount = 0;
    int numNewlyOpenedFiles = 0;
    Entry entry;
    try {
      while ((entry = getNextLogLine(in,logPath, skipErrors)) != null) {
        byte[] region = entry.getKey().getEncodedRegionName();
        Object o = logWriters.get(region);
        if (o == BAD_WRITER) {
          continue;
        }
        WriterAndPath wap = (WriterAndPath)o;
        if (wap == null) {
          wap = createWAP(region, entry, rootDir, tmpname, fs, conf);
          numNewlyOpenedFiles++;
          if (wap == null) {
            // ignore edits from this region. It doesn't exist anymore.
            // It was probably already split.
            logWriters.put(region, BAD_WRITER);
            continue;
          } else {
            logWriters.put(region, wap);
          }
        }
        wap.w.append(entry);
        editsCount++;
        // If sufficient edits have passed OR we've opened a few files, check if
        // we should report progress.
        if (editsCount % interval == 0 ||
            (numNewlyOpenedFiles > numOpenedFilesBeforeReporting)) {
          // Zero out files counter each time we fall in here.
          numNewlyOpenedFiles = 0;
          String countsStr = "edits=" + editsCount + ", files=" + logWriters.size();
          status.setStatus("Split " + countsStr);
          long t1 = EnvironmentEdgeManager.currentTimeMillis();
          if ((t1 - last_report_at) > period) {
            last_report_at = t;
            if (reporter != null && reporter.progress() == false) {
              status.markComplete("Failed: reporter.progress asked us to terminate; " + countsStr);
              progress_failed = true;
              return false;
            }
          }
        }
      }
    } catch (CorruptedLogFileException e) {
      LOG.warn("Could not parse, corrupted log file " + logPath, e);
      ZKSplitLog.markCorrupted(rootDir, tmpname, fs);
      isCorrupted = true;
    } catch (IOException e) {
      e = RemoteExceptionHandler.checkIOException(e);
      throw e;
    } finally {
      boolean allWritersClosed = false;
      try {
        int n = 0;
        for (Object o : logWriters.values()) {
          long t1 = EnvironmentEdgeManager.currentTimeMillis();
          if ((t1 - last_report_at) > period) {
            last_report_at = t;
            if ((progress_failed == false) && (reporter != null)
                && (reporter.progress() == false)) {
              progress_failed = true;
            }
          }
          if (o == BAD_WRITER) {
            continue;
          }
          n++;
          WriterAndPath wap = (WriterAndPath) o;
          wap.writerClosed = true;
          wap.w.close();
          LOG.debug("Closed " + wap.p);
          Path dst = getCompletedRecoveredEditsFilePath(wap.p);
          if (!dst.equals(wap.p) && fs.exists(dst)) {
            LOG.warn("Found existing old edits file. It could be the "
                + "result of a previous failed split attempt. Deleting " + dst
                + ", length=" + fs.getFileStatus(dst).getLen());
            if (!fs.delete(dst, false)) {
              LOG.warn("Failed deleting of old " + dst);
              throw new IOException("Failed deleting of old " + dst);
            }
          }
          // Skip the unit tests which create a splitter that reads and writes
          // the
          // data without touching disk. TestHLogSplit#testThreading is an
          // example.
          if (fs.exists(wap.p)) {
            if (!fs.rename(wap.p, dst)) {
              throw new IOException("Failed renaming " + wap.p + " to " + dst);
            }
          }
        }
        String msg = "Processed " + editsCount + " edits across " + n
            + " regions" + " threw away edits for " + (logWriters.size() - n)
            + " regions" + "; log file=" + logPath + " is corrupted="
            + isCorrupted + " progress failed=" + progress_failed;
        LOG.info(msg);
        status.markComplete(msg);
      } finally{
        if (!allWritersClosed) {
          for (Map.Entry<byte[], Object> logWritersEntry : logWriters.entrySet()) {
            Object o = logWritersEntry.getValue();
            if (o != BAD_WRITER) {
              WriterAndPath wap = (WriterAndPath) o;
              try {
                if (!wap.writerClosed) {
                  wap.writerClosed = true;
                  wap.w.close();
                }
              } catch (IOException e) {
                LOG.debug("Exception while closing the writer :", e);
              }
            }
          }
        }
        in.close();
      }
    }
    return !progress_failed;
  }
View Full Code Here

  protected Reader getReader(FileSystem fs, FileStatus file, Configuration conf,
      boolean skipErrors)
      throws IOException, CorruptedLogFileException {
    Path path = file.getPath();
    long length = file.getLen();
    Reader in;


    // Check for possibly empty file. With appends, currently Hadoop reports a
    // zero length even if the file has been sync'd. Revisit if HDFS-376 or
    // HDFS-878 is committed.
View Full Code Here

       Path logPath = log.getPath();
        long logLength = log.getLen();
        splitSize += logLength;
        logAndReport("Splitting hlog " + (i++ + 1) + " of " + logfiles.length
            + ": " + logPath + ", length=" + logLength);
        Reader in;
        try {
          in = getReader(fs, log, conf, skipErrors);
          if (in != null) {
            parseHLog(in, logPath, entryBuffers, fs, conf, skipErrors);
            try {
              in.close();
            } catch (IOException e) {
              LOG.warn("Close log reader threw exception -- continuing",
                  e);
            }
          }
View Full Code Here

      conf.getInt("hbase.splitlog.report.openedfiles", 3);
    Path logPath = logfile.getPath();
    long logLength = logfile.getLen();
    LOG.info("Splitting hlog: " + logPath + ", length=" + logLength);
    status.setStatus("Opening log file");
    Reader in = null;
    try {
      in = getReader(fs, logfile, conf, skipErrors);
    } catch (CorruptedLogFileException e) {
      LOG.warn("Could not get reader, corrupted log file " + logPath, e);
      ZKSplitLog.markCorrupted(rootDir, tmpname, fs);
View Full Code Here

  protected Reader getReader(FileSystem fs, FileStatus file, Configuration conf,
      boolean skipErrors)
      throws IOException, CorruptedLogFileException {
    Path path = file.getPath();
    long length = file.getLen();
    Reader in;


    // Check for possibly empty file. With appends, currently Hadoop reports a
    // zero length even if the file has been sync'd. Revisit if HDFS-376 or
    // HDFS-878 is committed.
View Full Code Here

       Path logPath = log.getPath();
        long logLength = log.getLen();
        splitSize += logLength;
        logAndReport("Splitting hlog " + (i++ + 1) + " of " + logfiles.length
            + ": " + logPath + ", length=" + logLength);
        Reader in;
        try {
          //actually, for meta-only hlogs, we don't need to go thru the process
          //of parsing and segregating by regions since all the logs are for
          //meta only. However, there is a sequence number that can be obtained
          //only by parsing.. so we parse for all files currently
          //TODO: optimize this part somehow
          in = getReader(fs, log, conf, skipErrors);
          if (in != null) {
            parseHLog(in, logPath, entryBuffers, fs, conf, skipErrors);
            try {
              in.close();
            } catch (IOException e) {
              LOG.warn("Close log reader threw exception -- continuing",
                  e);
            }
          }
View Full Code Here

      conf.getInt("hbase.splitlog.report.openedfiles", 3);
    Path logPath = logfile.getPath();
    long logLength = logfile.getLen();
    LOG.info("Splitting hlog: " + logPath + ", length=" + logLength);
    status.setStatus("Opening log file");
    Reader in = null;
    try {
      in = getReader(fs, logfile, conf, skipErrors);
    } catch (CorruptedLogFileException e) {
      LOG.warn("Could not get reader, corrupted log file " + logPath, e);
      ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
      isCorrupted = true;
    }
    if (in == null) {
      status.markComplete("Was nothing to split in log file");
      LOG.warn("Nothing to split in log file " + logPath);
      return true;
    }
    long t = EnvironmentEdgeManager.currentTimeMillis();
    long last_report_at = t;
    if (reporter != null && reporter.progress() == false) {
      status.markComplete("Failed: reporter.progress asked us to terminate");
      return false;
    }
    // Report progress every so many edits and/or files opened (opening a file
    // takes a bit of time).
    int editsCount = 0;
    int numNewlyOpenedFiles = 0;
    Entry entry;
    try {
      while ((entry = getNextLogLine(in,logPath, skipErrors)) != null) {
        byte[] region = entry.getKey().getEncodedRegionName();
        Object o = logWriters.get(region);
        if (o == BAD_WRITER) {
          continue;
        }
        WriterAndPath wap = (WriterAndPath)o;
        if (wap == null) {
          wap = createWAP(region, entry, rootDir, fs, conf);
          numNewlyOpenedFiles++;
          if (wap == null) {
            // ignore edits from this region. It doesn't exist anymore.
            // It was probably already split.
            logWriters.put(region, BAD_WRITER);
            continue;
          } else {
            logWriters.put(region, wap);
          }
        }
        wap.w.append(entry);
        outputSink.updateRegionMaximumEditLogSeqNum(entry);
        editsCount++;
        // If sufficient edits have passed OR we've opened a few files, check if
        // we should report progress.
        if (editsCount % interval == 0 ||
            (numNewlyOpenedFiles > numOpenedFilesBeforeReporting)) {
          // Zero out files counter each time we fall in here.
          numNewlyOpenedFiles = 0;
          String countsStr = "edits=" + editsCount + ", files=" + logWriters.size();
          status.setStatus("Split " + countsStr);
          long t1 = EnvironmentEdgeManager.currentTimeMillis();
          if ((t1 - last_report_at) > period) {
            last_report_at = t;
            if (reporter != null && reporter.progress() == false) {
              status.markComplete("Failed: reporter.progress asked us to terminate; " + countsStr);
              progress_failed = true;
              return false;
            }
          }
        }
      }
    } catch (CorruptedLogFileException e) {
      LOG.warn("Could not parse, corrupted log file " + logPath, e);
      ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
      isCorrupted = true;
    } catch (IOException e) {
      e = RemoteExceptionHandler.checkIOException(e);
      throw e;
    } finally {
      boolean allWritersClosed = false;
      try {
        int n = 0;
        for (Map.Entry<byte[], Object> logWritersEntry : logWriters.entrySet()) {
          Object o = logWritersEntry.getValue();
          long t1 = EnvironmentEdgeManager.currentTimeMillis();
          if ((t1 - last_report_at) > period) {
            last_report_at = t;
            if ((progress_failed == false) && (reporter != null) && (reporter.progress() == false)) {
              progress_failed = true;
            }
          }
          if (o == BAD_WRITER) {
            continue;
          }
          n++;
          WriterAndPath wap = (WriterAndPath) o;
          wap.writerClosed = true;
          wap.w.close();
          LOG.debug("Closed " + wap.p);
          Path dst = getCompletedRecoveredEditsFilePath(wap.p,
              outputSink.getRegionMaximumEditLogSeqNum(logWritersEntry.getKey()));
          if (!dst.equals(wap.p) && fs.exists(dst)) {
            LOG.warn("Found existing old edits file. It could be the "
                + "result of a previous failed split attempt. Deleting " + dst + ", length="
                + fs.getFileStatus(dst).getLen());
            if (!HBaseFileSystem.deleteFileFromFileSystem(fs, dst)) {
              LOG.warn("Failed deleting of old " + dst);
              throw new IOException("Failed deleting of old " + dst);
            }
          }
          // Skip the unit tests which create a splitter that reads and writes the
          // data without touching disk. TestHLogSplit#testThreading is an
          // example.
          if (fs.exists(wap.p)) {
            if (!HBaseFileSystem.renameDirForFileSystem(fs, wap.p, dst)) {
              throw new IOException("Failed renaming " + wap.p + " to " + dst);
            }
            LOG.debug("Rename " + wap.p + " to " + dst);
          }
        }
        allWritersClosed = true;
        String msg = "Processed " + editsCount + " edits across " + n + " regions"
            + " threw away edits for " + (logWriters.size() - n) + " regions" + "; log file="
            + logPath + " is corrupted = " + isCorrupted + " progress failed = " + progress_failed;
        LOG.info(msg);
        status.markComplete(msg);
      } finally {
        if (!allWritersClosed) {
          for (Map.Entry<byte[], Object> logWritersEntry : logWriters.entrySet()) {
            Object o = logWritersEntry.getValue();
            if (o != BAD_WRITER) {
              WriterAndPath wap = (WriterAndPath) o;
              try {
                if (!wap.writerClosed) {
                  wap.writerClosed = true;
                  wap.w.close();
                }
              } catch (IOException e) {
                LOG.debug("Exception while closing the writer :", e);
              }
            }
          }
        }
        in.close();
      }
    }
    return !progress_failed;
  }
View Full Code Here

  protected Reader getReader(FileSystem fs, FileStatus file, Configuration conf,
      boolean skipErrors)
      throws IOException, CorruptedLogFileException {
    Path path = file.getPath();
    long length = file.getLen();
    Reader in;


    // Check for possibly empty file. With appends, currently Hadoop reports a
    // zero length even if the file has been sync'd. Revisit if HDFS-376 or
    // HDFS-878 is committed.
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.wal.HLog.Reader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.