Package org.apache.accumulo.core.client

Examples of org.apache.accumulo.core.client.BatchWriter


    deleter.setRanges(Collections.singletonList(new Range(("r1"))));
    deleter.delete();
    this.checkRemaining(c, "test", 0);
   
    // test deleting just one row
    BatchWriter writer = c.createBatchWriter("test", new BatchWriterConfig());
    Mutation m = new Mutation("r1");
    m.put("fam", "qual", "value");
    writer.addMutation(m);
   
    // make sure the write goes through
    writer.flush();
    writer.close();
   
    deleter.setRanges(Collections.singletonList(new Range(("r1"))));
    deleter.delete();
    this.checkRemaining(c, "test", 0);
   
    // test multi row deletes
    writer = c.createBatchWriter("test", new BatchWriterConfig());
    m = new Mutation("r1");
    m.put("fam", "qual", "value");
    writer.addMutation(m);
    Mutation m2 = new Mutation("r2");
    m2.put("fam", "qual", "value");
    writer.addMutation(m2);
   
    // make sure the write goes through
    writer.flush();
    writer.close();
   
    deleter.setRanges(Collections.singletonList(new Range(("r1"))));
    deleter.delete();
    checkRemaining(c, "test", 1);
  }
View Full Code Here


      all.putAll(assigned);
      for (Entry<Key, Value> entry : all.entrySet()) {
        TServerInstance alive = tserverSet.find(entry.getValue().toString());
        if (alive == null) {
          Master.log.info("Removing entry " + entry);
          BatchWriter bw = getConnector().createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
          Mutation m = new Mutation(entry.getKey().getRow());
          m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
          bw.addMutation(m);
          bw.close();
          return;
        }
      }
      Master.log.error("Metadata table is inconsistent at " + row + " and all assigned/future tservers are still online.");
    } catch (Throwable e) {
View Full Code Here

              datafiles.clear();
            }
          }
        }
        MetadataTable.addDeleteEntries(range, datafiles, SecurityConstants.getSystemCredentials());
        BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
        try {
          deleteTablets(deleteRange, bw, conn);
        } finally {
          bw.close();
        }
       
        if (followingTablet != null) {
          log.debug("Updating prevRow of " + followingTablet + " to " + range.getPrevEndRow());
          bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
          try {
            Mutation m = new Mutation(followingTablet.getMetadataEntry());
            Constants.METADATA_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(range.getPrevEndRow()));
            Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
            bw.addMutation(m);
            bw.flush();
          } finally {
            bw.close();
          }
        } else {
          // Recreate the default tablet to hold the end of the table
          log.debug("Recreating the last tablet to point to " + range.getPrevEndRow());
          MetadataTable.addTablet(new KeyExtent(range.getTableId(), null, range.getPrevEndRow()), Constants.DEFAULT_TABLET_LOCATION,
View Full Code Here

      }
      Range scanRange = new Range(KeyExtent.getMetadataEntry(range.getTableId(), start), false, stopRow, false);
      if (range.isMeta())
        scanRange = scanRange.clip(Constants.METADATA_ROOT_TABLET_KEYSPACE);
     
      BatchWriter bw = null;
      try {
        long fileCount = 0;
        Connector conn = getConnector();
        // Make file entries in highest tablet
        bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
        Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
        scanner.setRange(scanRange);
        Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
        Constants.METADATA_TIME_COLUMN.fetch(scanner);
        Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
        Mutation m = new Mutation(stopRow);
        String maxLogicalTime = null;
        for (Entry<Key,Value> entry : scanner) {
          Key key = entry.getKey();
          Value value = entry.getValue();
          if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
            m.put(key.getColumnFamily(), key.getColumnQualifier(), value);
            fileCount++;
          } else if (Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
            log.debug("prevRow entry for lowest tablet is " + value);
            firstPrevRowValue = new Value(value);
          } else if (Constants.METADATA_TIME_COLUMN.hasColumns(key)) {
            maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString());
          } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
            if (!range.isMeta())
              bw.addMutation(MetadataTable.createDeleteMutation(range.getTableId().toString(), entry.getValue().toString()));
          }
        }
       
        // read the logical time from the last tablet in the merge range, it is not included in
        // the loop above
        scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
        Range last = new Range(stopRow);
        if (range.isMeta())
          last = last.clip(Constants.METADATA_ROOT_TABLET_KEYSPACE);
        scanner.setRange(last);
        Constants.METADATA_TIME_COLUMN.fetch(scanner);
        for (Entry<Key,Value> entry : scanner) {
          if (Constants.METADATA_TIME_COLUMN.hasColumns(entry.getKey())) {
            maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, entry.getValue().toString());
          }
        }
       
        if (maxLogicalTime != null)
          Constants.METADATA_TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes(Constants.UTF8)));
       
        if (!m.getUpdates().isEmpty()) {
          bw.addMutation(m);
        }
       
        bw.flush();
       
        log.debug("Moved " + fileCount + " files to " + stop);
       
        if (firstPrevRowValue == null) {
          log.debug("tablet already merged");
          return;
        }
       
        stop.setPrevEndRow(KeyExtent.decodePrevEndRow(firstPrevRowValue));
        Mutation updatePrevRow = stop.getPrevRowUpdateMutation();
        log.debug("Setting the prevRow for last tablet: " + stop);
        bw.addMutation(updatePrevRow);
        bw.flush();
       
        deleteTablets(scanRange, bw, conn);
       
        // Clean-up the last chopped marker
        m = new Mutation(stopRow);
        Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
        bw.addMutation(m);
        bw.flush();
       
      } catch (Exception ex) {
        throw new AccumuloException(ex);
      } finally {
        if (bw != null)
          try {
            bw.close();
          } catch (Exception ex) {
            throw new AccumuloException(ex);
          }
      }
    }
View Full Code Here

    }
   
  }
 
  private void insertData(long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
    BatchWriter bw = getConnector().createBatchWriter("foo", new BatchWriterConfig());
   
    for (int i = 0; i < 10000; i++) {
      String row = String.format("%09d", i);
     
      Mutation m = new Mutation(new Text(row));
      m.put(new Text("cf1"), new Text("cq1"), ts, new Value(Integer.toString(i).getBytes(Constants.UTF8)));
      bw.addMutation(m);
    }
   
    bw.close();
  }
View Full Code Here

    return Collections.singletonList(new TableSetup("de"));
  }
 
  @Override
  public void run() throws Exception {
    BatchWriter bw = getConnector().createBatchWriter("de", new BatchWriterConfig());
    Mutation m = new Mutation(new Text("foo"));
    m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(Constants.UTF8)));
    bw.addMutation(m);
    bw.flush();
   
    getConnector().tableOperations().flush("de", null, null, true);
   
    checkRFiles("de", 1, 1, 1, 1);
   
    m = new Mutation(new Text("foo"));
    m.putDelete(new Text("bar"), new Text("1910"));
    bw.addMutation(m);
    bw.flush();
   
    Scanner scanner = getConnector().createScanner("de", Constants.NO_AUTHS);
    scanner.setRange(new Range());
   
    int count = 0;
    for (@SuppressWarnings("unused")
    Entry<Key,Value> entry : scanner) {
      count++;
    }
   
    if (count != 0)
      throw new Exception("count == " + count);
   
    getConnector().tableOperations().flush("de", null, null, true);
   
    getConnector().tableOperations().setProperty("de", Property.TABLE_MAJC_RATIO.getKey(), "1.0");
    UtilWaitThread.sleep(4000);
   
    checkRFiles("de", 1, 1, 0, 0);
   
    bw.close();
   
    count = 0;
    for (@SuppressWarnings("unused")
    Entry<Key,Value> entry : scanner) {
      count++;
View Full Code Here

   
    fs.mkdirs(new Path(bulkDir));
    fs.mkdirs(new Path(bulkDir + "_f"));
   
    try {
      BatchWriter bw = new RFileBatchWriter(conf, fs, bulkDir + "/file01.rf");
      try {
        TreeSet<Long> rows = new TreeSet<Long>();
        int numRows = rand.nextInt(100000);
        for (int i = 0; i < numRows; i++) {
          rows.add(rand.nextLong() & 0x7fffffffffffffffl);
        }
       
        for (Long row : rows) {
          Mutation m = new Mutation(String.format("%016x", row));
          long val = rand.nextLong() & 0x7fffffffffffffffl;
          for (int j = 0; j < 10; j++) {
            m.put("cf", "cq" + j, new Value(String.format("%016x", val).getBytes(Constants.UTF8)));
          }
         
          bw.addMutation(m);
        }
      } finally {
        bw.close();
      }
     
      conn.tableOperations().importDirectory(tableName, bulkDir, bulkDir + "_f", rand.nextBoolean());
     
      log.debug("BulkImported to " + tableName);
View Full Code Here

    String dataTableName = (String) state.get("docTableName");
    int numPartitions = (Integer) state.get("numPartitions");
    Random rand = (Random) state.get("rand");
    long nextDocID = (Long) state.get("nextDocID");
   
    BatchWriter dataWriter = state.getMultiTableBatchWriter().getBatchWriter(dataTableName);
    BatchWriter indexWriter = state.getMultiTableBatchWriter().getBatchWriter(indexTableName);
   
    String docID = insertRandomDocument(nextDocID++, dataWriter, indexWriter, indexTableName, dataTableName, numPartitions, rand);
   
    log.debug("Inserted document " + docID);
   
View Full Code Here

   
    if (!conn.tableOperations().exists(opts.getTableName())) {
      throw new TableNotFoundException(null, opts.getTableName(), "Consult the README and create the table before starting ingest.");
    }

    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
    bw = Trace.wrapAll(bw, new CountSampler(1024));
   
    Random r = new Random();
   
    byte[] ingestInstanceId = UUID.randomUUID().toString().getBytes(Constants.UTF8);
   
    System.out.printf("UUID %d %s%n", System.currentTimeMillis(), new String(ingestInstanceId, Constants.UTF8));
   
    long count = 0;
    final int flushInterval = 1000000;
    final int maxDepth = 25;
   
    // always want to point back to flushed data. This way the previous item should
    // always exist in accumulo when verifying data. To do this make insert N point
    // back to the row from insert (N - flushInterval). The array below is used to keep
    // track of this.
    long prevRows[] = new long[flushInterval];
    long firstRows[] = new long[flushInterval];
    int firstColFams[] = new int[flushInterval];
    int firstColQuals[] = new int[flushInterval];
   
    long lastFlushTime = System.currentTimeMillis();
   
    out: while (true) {
      // generate first set of nodes
      ColumnVisibility cv = getVisibility(r);

      for (int index = 0; index < flushInterval; index++) {
        long rowLong = genLong(opts.min, opts.max, r);
        prevRows[index] = rowLong;
        firstRows[index] = rowLong;
       
        int cf = r.nextInt(opts.maxColF);
        int cq = r.nextInt(opts.maxColQ);
       
        firstColFams[index] = cf;
        firstColQuals[index] = cq;
       
        Mutation m = genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, null, r, opts.checksum);
        count++;
        bw.addMutation(m);
      }
     
      lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
      if (count >= opts.num)
        break out;
     
      // generate subsequent sets of nodes that link to previous set of nodes
      for (int depth = 1; depth < maxDepth; depth++) {
        for (int index = 0; index < flushInterval; index++) {
          long rowLong = genLong(opts.min, opts.max, r);
          byte[] prevRow = genRow(prevRows[index]);
          prevRows[index] = rowLong;
          Mutation m = genMutation(rowLong, r.nextInt(opts.maxColF), r.nextInt(opts.maxColQ), cv, ingestInstanceId, count, prevRow, r, opts.checksum);
          count++;
          bw.addMutation(m);
        }
       
        lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
        if (count >= opts.num)
          break out;
      }
     
      // create one big linked list, this makes all of the first inserts
      // point to something
      for (int index = 0; index < flushInterval - 1; index++) {
        Mutation m = genMutation(firstRows[index], firstColFams[index], firstColQuals[index], cv, ingestInstanceId, count, genRow(prevRows[index + 1]), r,
            opts.checksum);
        count++;
        bw.addMutation(m);
      }
      lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
      if (count >= opts.num)
        break out;
    }
   
    bw.close();
    opts.stopTracing();
  }
View Full Code Here

    }
   
    Random rand = new Random();
    String tableName = tables.get(rand.nextInt(tables.size()));
   
    BatchWriter bw = null;
    try {
      bw = state.getMultiTableBatchWriter().getBatchWriter(tableName);
    } catch (TableOfflineException e) {
      log.error("Table " + tableName + " is offline!");
      return;
    } catch (TableNotFoundException e) {
      log.error("Table " + tableName + " not found!");
      return;
    }
   
    Text meta = new Text("meta");
    String uuid = UUID.randomUUID().toString();
   
    Mutation m = new Mutation(new Text(uuid));
   
    // create a fake payload between 4KB and 16KB
    int numBytes = rand.nextInt(12000) + 4000;
    byte[] payloadBytes = new byte[numBytes];
    rand.nextBytes(payloadBytes);
    m.put(meta, new Text("payload"), new Value(payloadBytes));
   
    // store size
    m.put(meta, new Text("size"), new Value(String.format("%d", numBytes).getBytes(Constants.UTF8)));
   
    // store hash
    MessageDigest alg = MessageDigest.getInstance("SHA-1");
    alg.update(payloadBytes);
    m.put(meta, new Text("sha1"), new Value(alg.digest()));
   
    // add mutation
    bw.addMutation(m);
   
    state.set("numWrites", state.getLong("numWrites") + 1);
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.client.BatchWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.