Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.Increment


  private static final byte[] CQ1 = "20120816".getBytes();
  private static final byte[] CQ2 = "20120817".getBytes();

  @Test
  public void testAddIncrement() {
    Increment i = new Increment(KEY);
    i.addColumn(CF, CQ1, 1); // set counter to 1
    i.addColumn(CF, CQ1, 1); // overrides counter, so its still 1

    Assert.assertEquals(1L, (long) i.getFamilyMap().get(CF).get(CQ1));

    TupleTableConfig.addIncrement(i, CF, CQ1, 2L); // increment counter by 2
    TupleTableConfig.addIncrement(i, CF, CQ2, 2L); // increment different
                                                   // qualifier by 2

    Assert.assertEquals(3L, (long) i.getFamilyMap().get(CF).get(CQ1));
    Assert.assertEquals(2L, (long) i.getFamilyMap().get(CF).get(CQ2));
  }
View Full Code Here


     
   
     
      if(inc!=null)
      {
        Increment incment=new Increment();
        for(Entry<String, Object> e:inc.getMap().entrySet())
        {
          incment.addColumn(MdrillRealTimeHbaseImpl.DATA_FAMILY,  Bytes.toBytes(e.getKey()), Long.parseLong(String.valueOf(e.getValue())));
        }
        table.increment(incment);
      }
     
      this.queue.put(Message.INSTANCE(shard, partion, long2Bytes(higo_uuid), this.hbase.getConfig()));
View Full Code Here

    // Reconstruct list of Increments per unique row/family/qualifier.
    List<Increment> coalesced = Lists.newLinkedList();
    for (Map.Entry<byte[], Map<byte[],NavigableMap<byte[], Long>>> rowEntry : counters.entrySet()) {
      byte[] row = rowEntry.getKey();
      Map <byte[], NavigableMap<byte[], Long>> families = rowEntry.getValue();
      Increment inc = new Increment(row);
      for (Map.Entry<byte[], NavigableMap<byte[], Long>> familyEntry : families.entrySet()) {
        byte[] family = familyEntry.getKey();
        NavigableMap<byte[], Long> qualifiers = familyEntry.getValue();
        for (Map.Entry<byte[], Long> qualifierEntry : qualifiers.entrySet()) {
          byte[] qualifier = qualifierEntry.getKey();
          long count = qualifierEntry.getValue();
          inc.addColumn(family, qualifier, count);
        }
      }
      coalesced.add(inc);
    }
View Full Code Here

        return;
      }

      try {
        HTable table = getTable(tincrement.getTable());
        Increment inc = ThriftUtilities.incrementFromThrift(tincrement);
        table.increment(inc);
      } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
        throw new IOError(e.getMessage());
      }
View Full Code Here

    }
    requestCount.incrementAndGet();
    try {
      HRegion region = getRegion(regionName);
      Integer lock = getLockFromId(increment.getLockId());
      Increment incVal = increment;
      Result resVal;
      if (region.getCoprocessorHost() != null) {
        resVal = region.getCoprocessorHost().preIncrement(incVal);
        if (resVal != null) {
          return resVal;
View Full Code Here

    @Override
    public void run() {
      int count = 0;
      while (count < incCounter) {
        Increment inc = new Increment(incRow);
        inc.addColumn(family, qualifier, ONE);
        count++;
        try {
          region.increment(inc, null, true);
        } catch (IOException e) {
          e.printStackTrace();
View Full Code Here

  public void testIncrWithReadOnlyTable() throws Exception {
    byte[] TABLE = Bytes.toBytes("readOnlyTable");
    this.region = initHRegion(TABLE, getName(), conf, true, Bytes.toBytes("somefamily"));
    boolean exceptionCaught = false;   
    Increment inc = new Increment(Bytes.toBytes("somerow"));
    inc.addColumn(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), 1L);
    try {
      region.increment(inc, false);
    } catch (IOException e) {
      exceptionCaught = true;
    } finally {
View Full Code Here

      int row1Field2 = 1;
      Put put1 = new Put(row1);
      put1.add(fam1, qual1, Bytes.toBytes(row1Field1));
      put1.add(fam1, qual2, Bytes.toBytes(row1Field2));
      region.put(put1);
      Increment increment = new Increment(row1);
      increment.addColumn(fam1, qual1, 1);

      //here we should be successful as normal
      region.increment(increment, null, true);
      assertICV(row1, fam1, qual1, row1Field1 + 1);

      //failed to increment
      increment = new Increment(row1);
      increment.addColumn(fam1, qual2, 1);
      try {
        region.increment(increment, null, true);
        fail("Expected to fail here");
      } catch (Exception exception) {
        // Expected.
View Full Code Here

        return;
      }

      try {
        HTable table = getTable(tincrement.getTable());
        Increment inc = ThriftUtilities.incrementFromThrift(tincrement);
        table.increment(inc);
      } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
        throw new IOError(e.getMessage());
      }
View Full Code Here

   * From a {@link TIncrement} create an {@link Increment}.
   * @param tincrement the Thrift version of an increment
   * @return an increment that the {@link TIncrement} represented.
   */
  public static Increment incrementFromThrift(TIncrement tincrement) {
    Increment inc = new Increment(tincrement.getRow());
    byte[][] famAndQf = KeyValue.parseColumn(tincrement.getColumn());
    if (famAndQf.length <1 ) return null;
    byte[] qual = famAndQf.length == 1 ? new byte[0]: famAndQf[1];
    inc.addColumn(famAndQf[0], qual, tincrement.getAmmount());
    return inc;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.Increment

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.