Package org.apache.flume

Examples of org.apache.flume.EventDeliveryException


      LOG.warn("HDFS IO error", eIO);
      return Status.BACKOFF;
    } catch (Exception e) {
      transaction.rollback();
      LOG.error("process failed", e);
      throw new EventDeliveryException(e.getMessage());
    } finally {
      // clear any leftover writes in the given transaction
      for (Entry<String, BucketWriter> e : batchMap.entrySet()) {
        e.getValue().abort();
      }
View Full Code Here


            try {
                connect();
                LOGGER.info("Reconnect succeeded.");
            } catch (final TTransportException e1) {
                LOGGER.warn("Trying to reconnect failed when recovering from " + e.getMessage(), e1);
                throw new EventDeliveryException(e1);
            }
        } catch (final Throwable e) {
            txn.rollback();
            throw new EventDeliveryException(e);
        } finally {
            txn.close();
        }

        return status;
View Full Code Here

     * the next even if errbacks for the current txn get called while
     * the next one is being processed.
     *
     */
    if (!open) {
      throw new EventDeliveryException("Sink was never opened. " +
          "Please fix the configuration.");
    }
    AtomicBoolean txnFail = new AtomicBoolean(false);
    AtomicInteger callbacksReceived = new AtomicInteger(0);
    AtomicInteger callbacksExpected = new AtomicInteger(0);
    final Lock lock = new ReentrantLock();
    final Condition condition = lock.newCondition();
    if (incrementBuffer != null) {
      incrementBuffer.clear();
    }
    /*
     * Callbacks can be reused per transaction, since they share the same
     * locks and conditions.
     */
    Callback<Object, Object> putSuccessCallback =
            new SuccessCallback<Object, Object>(
            lock, callbacksReceived, condition);
    Callback<Object, Object> putFailureCallback =
            new FailureCallback<Object, Object>(
            lock, callbacksReceived, txnFail, condition);

    Callback<Long, Long> incrementSuccessCallback =
            new SuccessCallback<Long, Long>(
            lock, callbacksReceived, condition);
    Callback<Long, Long> incrementFailureCallback =
            new FailureCallback<Long, Long>(
            lock, callbacksReceived, txnFail, condition);

    Status status = Status.READY;
    Channel channel = getChannel();
    int i = 0;
    try {
      txn = channel.getTransaction();
      txn.begin();
      for (; i < batchSize; i++) {
        Event event = channel.take();
        if (event == null) {
          status = Status.BACKOFF;
          if (i == 0) {
            sinkCounter.incrementBatchEmptyCount();
          } else {
            sinkCounter.incrementBatchUnderflowCount();
          }
          break;
        } else {
          serializer.setEvent(event);
          List<PutRequest> actions = serializer.getActions();
          List<AtomicIncrementRequest> increments = serializer.getIncrements();
          callbacksExpected.addAndGet(actions.size());
          if (!batchIncrements) {
            callbacksExpected.addAndGet(increments.size());
          }

          for (PutRequest action : actions) {
            action.setDurable(enableWal);
            client.put(action).addCallbacks(putSuccessCallback, putFailureCallback);
          }
          for (AtomicIncrementRequest increment : increments) {
            if (batchIncrements) {
              CellIdentifier identifier = new CellIdentifier(increment.key(),
                increment.qualifier());
              AtomicIncrementRequest request
                = incrementBuffer.get(identifier);
              if (request == null) {
                incrementBuffer.put(identifier, increment);
              } else {
                request.setAmount(request.getAmount() + increment.getAmount());
              }
            } else {
              client.atomicIncrement(increment).addCallbacks(
                incrementSuccessCallback, incrementFailureCallback);
            }
          }
        }
      }
      if (batchIncrements) {
        Collection<AtomicIncrementRequest> increments = incrementBuffer.values();
        for (AtomicIncrementRequest increment : increments) {
          client.atomicIncrement(increment).addCallbacks(
            incrementSuccessCallback, incrementFailureCallback);
        }
        callbacksExpected.addAndGet(increments.size());
      }
      client.flush();
    } catch (Throwable e) {
      this.handleTransactionFailure(txn);
      this.checkIfChannelExceptionAndThrow(e);
    }
    if (i == batchSize) {
      sinkCounter.incrementBatchCompleteCount();
    }
    sinkCounter.addToEventDrainAttemptCount(i);

    lock.lock();
    long startTime = System.nanoTime();
    long timeRemaining;
    try {
      while ((callbacksReceived.get() < callbacksExpected.get())
              && !txnFail.get()) {
        timeRemaining = timeout - (System.nanoTime() - startTime);
        timeRemaining = (timeRemaining >= 0) ? timeRemaining : 0;
        try {
          if (!condition.await(timeRemaining, TimeUnit.NANOSECONDS)) {
            txnFail.set(true);
            logger.warn("HBase callbacks timed out. "
                    + "Transaction will be rolled back.");
          }
        } catch (Exception ex) {
          logger.error("Exception while waiting for callbacks from HBase.");
          this.handleTransactionFailure(txn);
          Throwables.propagate(ex);
        }
      }
    } finally {
      lock.unlock();
    }

    if (isCoalesceTest) {
      totalCallbacksReceived += callbacksReceived.get();
    }

    /*
     * At this point, either the txn has failed
     * or all callbacks received and txn is successful.
     *
     * This need not be in the monitor, since all callbacks for this txn
     * have been received. So txnFail will not be modified any more(even if
     * it is, it is set from true to true only - false happens only
     * in the next process call).
     *
     */
    if (txnFail.get()) {
      this.handleTransactionFailure(txn);
      throw new EventDeliveryException("Could not write events to Hbase. " +
          "Transaction failed, and rolled back.");
    } else {
      try {
        txn.commit();
        txn.close();
View Full Code Here

            "Transaction rolled back.", e);
        Throwables.propagate(e);
      } else {
        logger.error("Failed to commit transaction." +
            "Transaction rolled back.", e);
        throw new EventDeliveryException("Failed to commit transaction." +
            "Transaction rolled back.", e);
      }
    } finally {
      txn.close();
    }
View Full Code Here

  }

  private void checkIfChannelExceptionAndThrow(Throwable e)
          throws EventDeliveryException {
    if (e instanceof ChannelException) {
      throw new EventDeliveryException("Error in processing transaction.", e);
    } else if (e instanceof Error || e instanceof RuntimeException) {
      Throwables.propagate(e);
    }
    throw new EventDeliveryException("Error in processing transaction.", e);
  }
View Full Code Here

            ex);
        Throwables.propagate(ex);
      } else {
        logger.error("Failed to commit transaction. Transaction rolled back.",
            ex);
        throw new EventDeliveryException(
            "Failed to commit transaction. Transaction rolled back.", ex);
      }
    } finally {
      txn.close();
    }
View Full Code Here

  }

  @Override
  public Status process() throws EventDeliveryException {
    if (writer == null) {
      throw new EventDeliveryException(
          "Cannot recover after previous failure");
    }

    // handle file rolling
    if ((System.currentTimeMillis() - lastRolledMs) / 1000 > rollIntervalS) {
      // close the current writer and get a new one
      writer.close();
      this.writer = openWriter(targetDataset);
      this.lastRolledMs = System.currentTimeMillis();
      LOG.info("Rolled writer for dataset: " + datasetName);
    }

    Channel channel = getChannel();
    Transaction transaction = null;
    try {
      long processedEvents = 0;

      transaction = channel.getTransaction();
      transaction.begin();
      for (; processedEvents < batchSize; processedEvents += 1) {
        Event event = channel.take();
        if (event == null) {
          // no events available in the channel
          break;
        }

        this.datum = deserialize(event, datum);

        // writeEncoded would be an optimization in some cases, but HBase
        // will not support it and partitioned Datasets need to get partition
        // info from the entity Object. We may be able to avoid the
        // serialization round-trip otherwise.
        writer.write(datum);
      }

      // TODO: Add option to sync, depends on CDK-203
      writer.flush();

      // commit after data has been written and flushed
      transaction.commit();

      if (processedEvents == 0) {
        counter.incrementBatchEmptyCount();
        return Status.BACKOFF;
      } else if (processedEvents < batchSize) {
        counter.incrementBatchUnderflowCount();
      } else {
        counter.incrementBatchCompleteCount();
      }

      counter.addToEventDrainSuccessCount(processedEvents);

      return Status.READY;

    } catch (Throwable th) {
      // catch-all for any unhandled Throwable so that the transaction is
      // correctly rolled back.
      if (transaction != null) {
        try {
          transaction.rollback();
        } catch (Exception ex) {
          LOG.error("Transaction rollback failed", ex);
          throw Throwables.propagate(ex);
        }
      }

      // close the writer and remove the its reference
      writer.close();
      this.writer = null;
      this.lastRolledMs = System.currentTimeMillis();

      // handle the exception
      Throwables.propagateIfInstanceOf(th, Error.class);
      Throwables.propagateIfInstanceOf(th, EventDeliveryException.class);
      throw new EventDeliveryException(th);

    } finally {
      if (transaction != null) {
        transaction.close();
      }
View Full Code Here

    // no checked exception is thrown in the CacheLoader
    ReflectDatumReader<Object> reader = readers.getUnchecked(schema(event));
    try {
      return reader.read(reuse, decoder);
    } catch (IOException ex) {
      throw new EventDeliveryException("Cannot deserialize event", ex);
    }
  }
View Full Code Here

      } else {
        return schemasFromLiteral.get(
            headers.get(DatasetSinkConstants.AVRO_SCHEMA_LITERAL_HEADER));
      }
    } catch (ExecutionException ex) {
      throw new EventDeliveryException("Cannot get schema", ex.getCause());
    }
  }
View Full Code Here

  public void execute() throws Exception {
    try {
      logger.info("Sending bulk to elasticsearch cluster");
      BulkResponse bulkResponse = bulkRequestBuilder.execute().actionGet();
      if (bulkResponse.hasFailures()) {
        throw new EventDeliveryException(bulkResponse.buildFailureMessage());
      }
    } finally {
      bulkRequestBuilder = client.prepareBulk();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.flume.EventDeliveryException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.