Package org.apache.hadoop.hbase.regionserver.wal.HLog

Examples of org.apache.hadoop.hbase.regionserver.wal.HLog.Reader


  protected Reader getReader(FileSystem fs, FileStatus file, Configuration conf,
      boolean skipErrors, CancelableProgressable reporter)
      throws IOException, CorruptedLogFileException {
    Path path = file.getPath();
    long length = file.getLen();
    Reader in;

    // Check for possibly empty file. With appends, currently Hadoop reports a
    // zero length even if the file has been sync'd. Revisit if HDFS-376 or
    // HDFS-878 is committed.
    if (length <= 0) {
View Full Code Here


      }

      /* Produce a mock reader that generates fake entries */
      protected Reader getReader(FileSystem fs, Path curLogFile,
          Configuration conf, CancelableProgressable reporter) throws IOException {
        Reader mockReader = Mockito.mock(Reader.class);
        Mockito.doAnswer(new Answer<HLog.Entry>() {
          int index = 0;

          @Override
          public HLog.Entry answer(InvocationOnMock invocation) throws Throwable {
View Full Code Here

  public void testAppend() throws IOException {
    final int COL_COUNT = 10;
    final TableName tableName =
        TableName.valueOf("tablename");
    final byte [] row = Bytes.toBytes("row");
    Reader reader = null;
    HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
    final AtomicLong sequenceId = new AtomicLong(1);
    try {
      // Write columns named 1, 2, 3, etc. and then values of single byte
      // 1, 2, 3...
      long timestamp = System.currentTimeMillis();
      WALEdit cols = new WALEdit();
      for (int i = 0; i < COL_COUNT; i++) {
        cols.add(new KeyValue(row, Bytes.toBytes("column"),
          Bytes.toBytes(Integer.toString(i)),
          timestamp, new byte[] { (byte)(i + '0') }));
      }
      HRegionInfo hri = new HRegionInfo(tableName,
          HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      HTableDescriptor htd = new HTableDescriptor();
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
      log.startCacheFlush(hri.getEncodedNameAsBytes());
      log.completeCacheFlush(hri.getEncodedNameAsBytes());
      log.close();
      Path filename = ((FSHLog) log).computeFilename();
      log = null;
      // Now open a reader on the log and assert append worked.
      reader = HLogFactory.createReader(fs, filename, conf);
      HLog.Entry entry = reader.next();
      assertEquals(COL_COUNT, entry.getEdit().size());
      int idx = 0;
      for (KeyValue val : entry.getEdit().getKeyValues()) {
        assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
          entry.getKey().getEncodedRegionName()));
        assertTrue(tableName.equals(entry.getKey().getTablename()));
        assertTrue(Bytes.equals(row, val.getRow()));
        assertEquals((byte)(idx + '0'), val.getValue()[0]);
        System.out.println(entry.getKey() + " " + val);
        idx++;
      }
    } finally {
      if (log != null) {
        log.closeAndDelete();
      }
      if (reader != null) {
        reader.close();
      }
    }
  }
View Full Code Here

       Path logPath = log.getPath();
        long logLength = log.getLen();
        splitSize += logLength;
        logAndReport("Splitting hlog " + (i++ + 1) + " of " + logfiles.length
            + ": " + logPath + ", length=" + logLength);
        Reader in;
        try {
          in = getReader(fs, log, conf, skipErrors);
          if (in != null) {
            parseHLog(in, logPath, entryBuffers, fs, conf, skipErrors);
            try {
              in.close();
            } catch (IOException e) {
              LOG.warn("Close log reader threw exception -- continuing",
                  e);
            }
          }
View Full Code Here

      conf.getInt("hbase.splitlog.report.openedfiles", 3);
    Path logPath = logfile.getPath();
    long logLength = logfile.getLen();
    LOG.info("Splitting hlog: " + logPath + ", length=" + logLength);
    status.setStatus("Opening log file");
    Reader in = null;
    try {
      in = getReader(fs, logfile, conf, skipErrors);
    } catch (CorruptedLogFileException e) {
      LOG.warn("Could not get reader, corrupted log file " + logPath, e);
      ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
      isCorrupted = true;
    }
    if (in == null) {
      status.markComplete("Was nothing to split in log file");
      LOG.warn("Nothing to split in log file " + logPath);
      return true;
    }
    long t = EnvironmentEdgeManager.currentTimeMillis();
    long last_report_at = t;
    if (reporter != null && reporter.progress() == false) {
      status.markComplete("Failed: reporter.progress asked us to terminate");
      return false;
    }
    // Report progress every so many edits and/or files opened (opening a file
    // takes a bit of time).
    int editsCount = 0;
    int numNewlyOpenedFiles = 0;
    Entry entry;
    try {
      while ((entry = getNextLogLine(in,logPath, skipErrors)) != null) {
        byte[] region = entry.getKey().getEncodedRegionName();
        Object o = logWriters.get(region);
        if (o == BAD_WRITER) {
          continue;
        }
        WriterAndPath wap = (WriterAndPath)o;
        if (wap == null) {
          wap = createWAP(region, entry, rootDir, fs, conf);
          numNewlyOpenedFiles++;
          if (wap == null) {
            // ignore edits from this region. It doesn't exist anymore.
            // It was probably already split.
            logWriters.put(region, BAD_WRITER);
            continue;
          } else {
            logWriters.put(region, wap);
          }
        }
        wap.w.append(entry);
        outputSink.updateRegionMaximumEditLogSeqNum(entry);
        editsCount++;
        // If sufficient edits have passed OR we've opened a few files, check if
        // we should report progress.
        if (editsCount % interval == 0 ||
            (numNewlyOpenedFiles > numOpenedFilesBeforeReporting)) {
          // Zero out files counter each time we fall in here.
          numNewlyOpenedFiles = 0;
          String countsStr = "edits=" + editsCount + ", files=" + logWriters.size();
          status.setStatus("Split " + countsStr);
          long t1 = EnvironmentEdgeManager.currentTimeMillis();
          if ((t1 - last_report_at) > period) {
            last_report_at = t;
            if (reporter != null && reporter.progress() == false) {
              status.markComplete("Failed: reporter.progress asked us to terminate; " + countsStr);
              progress_failed = true;
              return false;
            }
          }
        }
      }
    } catch (CorruptedLogFileException e) {
      LOG.warn("Could not parse, corrupted log file " + logPath, e);
      ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
      isCorrupted = true;
    } catch (IOException e) {
      e = RemoteExceptionHandler.checkIOException(e);
      throw e;
    } finally {
      boolean allWritersClosed = false;
      try {
        int n = 0;
        for (Map.Entry<byte[], Object> logWritersEntry : logWriters.entrySet()) {
          Object o = logWritersEntry.getValue();
          long t1 = EnvironmentEdgeManager.currentTimeMillis();
          if ((t1 - last_report_at) > period) {
            last_report_at = t;
            if ((progress_failed == false) && (reporter != null) && (reporter.progress() == false)) {
              progress_failed = true;
            }
          }
          if (o == BAD_WRITER) {
            continue;
          }
          n++;
          WriterAndPath wap = (WriterAndPath) o;
          wap.writerClosed = true;
          wap.w.close();
          LOG.debug("Closed " + wap.p);
          Path dst = getCompletedRecoveredEditsFilePath(wap.p,
              outputSink.getRegionMaximumEditLogSeqNum(logWritersEntry.getKey()));
          if (!dst.equals(wap.p) && fs.exists(dst)) {
            LOG.warn("Found existing old edits file. It could be the "
                + "result of a previous failed split attempt. Deleting " + dst + ", length="
                + fs.getFileStatus(dst).getLen());
            if (!fs.delete(dst, false)) {
              LOG.warn("Failed deleting of old " + dst);
              throw new IOException("Failed deleting of old " + dst);
            }
          }
          // Skip the unit tests which create a splitter that reads and writes the
          // data without touching disk. TestHLogSplit#testThreading is an
          // example.
          if (fs.exists(wap.p)) {
            if (!fs.rename(wap.p, dst)) {
              throw new IOException("Failed renaming " + wap.p + " to " + dst);
            }
            LOG.debug("Rename " + wap.p + " to " + dst);
          }
        }
        allWritersClosed = true;
        String msg = "Processed " + editsCount + " edits across " + n + " regions"
            + " threw away edits for " + (logWriters.size() - n) + " regions" + "; log file="
            + logPath + " is corrupted = " + isCorrupted + " progress failed = " + progress_failed;
        LOG.info(msg);
        status.markComplete(msg);
      } finally {
        if (!allWritersClosed) {
          for (Map.Entry<byte[], Object> logWritersEntry : logWriters.entrySet()) {
            Object o = logWritersEntry.getValue();
            if (o != BAD_WRITER) {
              WriterAndPath wap = (WriterAndPath) o;
              try {
                if (!wap.writerClosed) {
                  wap.writerClosed = true;
                  wap.w.close();
                }
              } catch (IOException e) {
                LOG.debug("Exception while closing the writer :", e);
              }
            }
          }
        }
        in.close();
      }
    }
    return !progress_failed;
  }
View Full Code Here

  protected Reader getReader(FileSystem fs, FileStatus file, Configuration conf,
      boolean skipErrors)
      throws IOException, CorruptedLogFileException {
    Path path = file.getPath();
    long length = file.getLen();
    Reader in;


    // Check for possibly empty file. With appends, currently Hadoop reports a
    // zero length even if the file has been sync'd. Revisit if HDFS-376 or
    // HDFS-878 is committed.
View Full Code Here

    }
    if (outputJSON && !persistentOutput) {
      out.print("[");
      firstTxn = true;
    }
    Reader log = HLogFactory.createReader(fs, p, conf);
    try {
      FSHLog.Entry entry;
      while ((entry = log.next()) != null) {
        HLogKey key = entry.getKey();
        WALEdit edit = entry.getEdit();
        // begin building a transaction structure
        Map<String, Object> txn = key.toStringMap();
        long writeTime = key.getWriteTime();
        // check output filters
        if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence)
          continue;
        if (region != null && !((String) txn.get("region")).equals(region))
          continue;
        // initialize list into which we will store atomic actions
        List<Map> actions = new ArrayList<Map>();
        for (Cell cell : edit.getCells()) {
          // add atomic operation to txn
          Map<String, Object> op = new HashMap<String, Object>(toStringMap(cell));
          if (outputValues) op.put("value", Bytes.toStringBinary(cell.getValue()));
          // check row output filter
          if (row == null || ((String) op.get("row")).equals(row))
            actions.add(op);
        }
        if (actions.size() == 0)
          continue;
        txn.put("actions", actions);
        if (outputJSON) {
          // JSON output is a straightforward "toString" on the txn object
          if (firstTxn)
            firstTxn = false;
          else
            out.print(",");
          // encode and print JSON
          out.print(MAPPER.writeValueAsString(txn));
        } else {
          // Pretty output, complete with indentation by atomic action
          out.println("Sequence " + txn.get("sequence") + " "
              + "from region " + txn.get("region") + " " + "in table "
              + txn.get("table") + " at write timestamp: " + new Date(writeTime));
          for (int i = 0; i < actions.size(); i++) {
            Map op = actions.get(i);
            out.println("  Action:");
            out.println("    row: " + op.get("row"));
            out.println("    column: " + op.get("family") + ":"
                + op.get("qualifier"));
            out.println("    timestamp: "
                + (new Date((Long) op.get("timestamp"))));
            if(op.get("tag") != null) {
              out.println("    tag: " + op.get("tag"));
            }
            if (outputValues)
              out.println("    value: " + op.get("value"));
          }
        }
      }
    } finally {
      log.close();
    }
    if (outputJSON && !persistentOutput) {
      out.print("]");
    }
  }
View Full Code Here

  public void testAppend() throws IOException {
    final int COL_COUNT = 10;
    final TableName tableName =
        TableName.valueOf("tablename");
    final byte [] row = Bytes.toBytes("row");
    Reader reader = null;
    HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
    final AtomicLong sequenceId = new AtomicLong(1);
    try {
      // Write columns named 1, 2, 3, etc. and then values of single byte
      // 1, 2, 3...
      long timestamp = System.currentTimeMillis();
      WALEdit cols = new WALEdit();
      for (int i = 0; i < COL_COUNT; i++) {
        cols.add(new KeyValue(row, Bytes.toBytes("column"),
          Bytes.toBytes(Integer.toString(i)),
          timestamp, new byte[] { (byte)(i + '0') }));
      }
      HRegionInfo hri = new HRegionInfo(tableName,
          HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      HTableDescriptor htd = new HTableDescriptor();
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
      log.startCacheFlush(hri.getEncodedNameAsBytes());
      log.completeCacheFlush(hri.getEncodedNameAsBytes());
      log.close();
      Path filename = ((FSHLog) log).computeFilename();
      log = null;
      // Now open a reader on the log and assert append worked.
      reader = HLogFactory.createReader(fs, filename, conf);
      HLog.Entry entry = reader.next();
      assertEquals(COL_COUNT, entry.getEdit().size());
      int idx = 0;
      for (Cell val : entry.getEdit().getCells()) {
        assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
          entry.getKey().getEncodedRegionName()));
        assertTrue(tableName.equals(entry.getKey().getTablename()));
        assertTrue(Bytes.equals(row, val.getRow()));
        assertEquals((byte)(idx + '0'), val.getValue()[0]);
        System.out.println(entry.getKey() + " " + val);
        idx++;
      }
    } finally {
      if (log != null) {
        log.closeAndDelete();
      }
      if (reader != null) {
        reader.close();
      }
    }
  }
View Full Code Here

      status.setStatus("Opening log file");
      if (reporter != null && !reporter.progress()) {
        progress_failed = true;
        return false;
      }
      Reader in = null;
      try {
        in = getReader(fs, logfile, conf, skipErrors, reporter);
      } catch (CorruptedLogFileException e) {
        LOG.warn("Could not get reader, corrupted log file " + logPath, e);
        ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
View Full Code Here

  protected Reader getReader(FileSystem fs, FileStatus file, Configuration conf,
      boolean skipErrors, CancelableProgressable reporter)
      throws IOException, CorruptedLogFileException {
    Path path = file.getPath();
    long length = file.getLen();
    Reader in;

    // Check for possibly empty file. With appends, currently Hadoop reports a
    // zero length even if the file has been sync'd. Revisit if HDFS-376 or
    // HDFS-878 is committed.
    if (length <= 0) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.wal.HLog.Reader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.