Package org.jclouds.aws.s3

Examples of org.jclouds.aws.s3.S3Client


      if (errors.get() > maxRetries) {
         activeParts.remove(part); // remove part from the bounded-queue without blocking
         latch.countDown();
         return;
      }
      final AWSS3AsyncClient client = ablobstore.getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getAsyncApi();
      Payload chunkedPart = slicer.slice(payload, offset, size);
      logger.debug(String.format("async uploading part %s of %s to container %s with uploadId %s", part, key, container, uploadId));
      final long start = System.currentTimeMillis();
      final ListenableFuture<String> futureETag = client.uploadPart(container, key, part, uploadId, chunkedPart);
      futureETag.addListener(new Runnable() {
         @Override
         public void run() {
            try {
               etags.put(part, futureETag.get());
View Full Code Here


      if (errors.get() > maxRetries) {
         activeParts.remove(part); // remove part from the bounded-queue without blocking
         latch.countDown();
         return;
      }
      final AWSS3AsyncClient client = ablobstore.getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getAsyncApi();
      Payload chunkedPart = slicer.slice(payload, offset, size);
      logger.debug(String.format("async uploading part %s of %s to container %s with uploadId %s", part, key, container, uploadId));
      final long start = System.currentTimeMillis();
      final ListenableFuture<String> futureETag = client.uploadPart(container, key, part, uploadId, chunkedPart);
      futureETag.addListener(new Runnable() {
         @Override
         public void run() {
            try {
               etags.put(part, futureETag.get());
View Full Code Here

      if (errors.get() > maxRetries) {
         activeParts.remove(part); // remove part from the bounded-queue without blocking
         latch.countDown();
         return;
      }
      final AWSS3AsyncClient client = ablobstore.getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getAsyncApi();
      Payload chunkedPart = slicer.slice(payload, offset, size);
      logger.debug(String.format("async uploading part %s of %s to container %s with uploadId %s", part, key, container, uploadId));
      final long start = System.currentTimeMillis();
      final ListenableFuture<String> futureETag = client.uploadPart(container, key, part, uploadId, chunkedPart);
      futureETag.addListener(new Runnable() {
         @Override
         public void run() {
            try {
               etags.put(part, futureETag.get());
View Full Code Here

                        .getContentLength());
                  int parts = algorithm.getParts();
                  long chunkSize = algorithm.getChunkSize();
                  long remaining = algorithm.getRemaining();
                  if (parts > 0) {
                     AWSS3Client client = ablobstore
                           .getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi();
                     String uploadId = null;
                     final Map<Integer, ListenableFuture<String>> futureParts =
                        new ConcurrentHashMap<Integer, ListenableFuture<String>>();
                     final Map<Integer, Exception> errorMap = Maps.newHashMap();
                     AtomicInteger errors = new AtomicInteger(0);
                     int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
                     int effectiveParts = remaining > 0 ? parts + 1 : parts;
                     try {
                        uploadId = client.initiateMultipartUpload(container,
                                 ObjectMetadataBuilder.create().key(key).build()); // TODO md5
                        logger.debug(String.format("initiated multipart upload of %s to container %s" +
                              " with uploadId %s consisting from %s part (possible max. retries: %d)",
                              key, container, uploadId, effectiveParts, maxRetries));
                        // we need a bounded-blocking queue to control the amount of parallel jobs
                        ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
                        Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
                        SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
                        CountDownLatch latch = new CountDownLatch(effectiveParts);
                        int part;
                        while ((part = algorithm.getNextPart()) <= parts) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), chunkSize, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        if (remaining > 0) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), remaining, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        latch.await();
                        // handling retries
                        while (errors.get() <= maxRetries && toRetry.size() > 0) {
                           int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
                           CountDownLatch retryLatch = new CountDownLatch(atOnce);
                           for (int i = 0; i < atOnce; i++) {
                              Part failedPart = toRetry.poll();
                              Integer partKey = Integer.valueOf(failedPart.getPart());
                              activeParts.put(partKey);
                              prepareUploadPart(container, key, uploadId, partKey, payload,
                                    failedPart.getOffset(), failedPart.getSize(), etags,
                                    activeParts, futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
                           }
                           retryLatch.await();
                        }
                        if (errors.get() > maxRetries) {
                           throw new BlobRuntimeException(String.format(
                                 "Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
                                 errors.get(), key, container, uploadId));
                        }
                        String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
                        logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" +
                            " successfully finished with %s retries", key, container, uploadId, errors.get()));
                        return eTag;
                     } catch (Exception ex) {
                        RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
                        if (rtex == null) {
                           rtex = new RuntimeException(ex);
                        }
                        for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
                           entry.getValue().cancel(false);
                        }
                        if (uploadId != null) {
                           client.abortMultipartUpload(container, key, uploadId);
                        }
                        throw rtex;
                     }
                  } else {
                     // Issue 936: don't just call putBlob, as that will see options=multiPart and
View Full Code Here

                        .getContentLength());
                  int parts = algorithm.getParts();
                  long chunkSize = algorithm.getChunkSize();
                  long remaining = algorithm.getRemaining();
                  if (parts > 0) {
                     AWSS3Client client = ablobstore
                           .getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi();
                     String uploadId = null;
                     final Map<Integer, ListenableFuture<String>> futureParts =
                        new ConcurrentHashMap<Integer, ListenableFuture<String>>();
                     final Map<Integer, Exception> errorMap = Maps.newHashMap();
                     AtomicInteger errors = new AtomicInteger(0);
                     int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
                     int effectiveParts = remaining > 0 ? parts + 1 : parts;
                     try {
                        uploadId = client.initiateMultipartUpload(container,
                                 ObjectMetadataBuilder.create().key(key).build()); // TODO md5
                        logger.debug(String.format("initiated multipart upload of %s to container %s" +
                              " with uploadId %s consisting from %s part (possible max. retries: %d)",
                              key, container, uploadId, effectiveParts, maxRetries));
                        // we need a bounded-blocking queue to control the amount of parallel jobs
                        ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
                        Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
                        SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
                        CountDownLatch latch = new CountDownLatch(effectiveParts);
                        int part;
                        while ((part = algorithm.getNextPart()) <= parts) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), chunkSize, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        if (remaining > 0) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), remaining, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        latch.await();
                        // handling retries
                        while (errors.get() <= maxRetries && toRetry.size() > 0) {
                           int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
                           CountDownLatch retryLatch = new CountDownLatch(atOnce);
                           for (int i = 0; i < atOnce; i++) {
                              Part failedPart = toRetry.poll();
                              Integer partKey = Integer.valueOf(failedPart.getPart());
                              activeParts.put(partKey);
                              prepareUploadPart(container, key, uploadId, partKey, payload,
                                    failedPart.getOffset(), failedPart.getSize(), etags,
                                    activeParts, futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
                           }
                           retryLatch.await();
                        }
                        if (errors.get() > maxRetries) {
                           throw new BlobRuntimeException(String.format(
                                 "Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
                                 errors.get(), key, container, uploadId));
                        }
                        String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
                        logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" +
                            " successfully finished with %s retries", key, container, uploadId, errors.get()));
                        return eTag;
                     } catch (Exception ex) {
                        RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
                        if (rtex == null) {
                           rtex = new RuntimeException(ex);
                        }
                        for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
                           entry.getValue().cancel(false);
                        }
                        if (uploadId != null) {
                           client.abortMultipartUpload(container, key, uploadId);
                        }
                        throw rtex;
                     }
                  } else {
                     // Issue 936: don't just call putBlob, as that will see options=multiPart and
View Full Code Here

                        .getContentLength());
                  int parts = algorithm.getParts();
                  long chunkSize = algorithm.getChunkSize();
                  long remaining = algorithm.getRemaining();
                  if (parts > 0) {
                     AWSS3Client client = ablobstore
                           .getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi();
                     String uploadId = null;
                     final Map<Integer, ListenableFuture<String>> futureParts =
                        new ConcurrentHashMap<Integer, ListenableFuture<String>>();
                     final Map<Integer, Exception> errorMap = Maps.newHashMap();
                     AtomicInteger errors = new AtomicInteger(0);
                     int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
                     int effectiveParts = remaining > 0 ? parts + 1 : parts;
                     try {
                        uploadId = client.initiateMultipartUpload(container,
                                 ObjectMetadataBuilder.create().key(key).build()); // TODO md5
                        logger.debug(String.format("initiated multipart upload of %s to container %s" +
                              " with uploadId %s consisting from %s part (possible max. retries: %d)",
                              key, container, uploadId, effectiveParts, maxRetries));
                        // we need a bounded-blocking queue to control the amount of parallel jobs
                        ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
                        Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
                        SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
                        CountDownLatch latch = new CountDownLatch(effectiveParts);
                        int part;
                        while ((part = algorithm.getNextPart()) <= parts) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), chunkSize, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        if (remaining > 0) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), remaining, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        latch.await();
                        // handling retries
                        while (errors.get() <= maxRetries && toRetry.size() > 0) {
                           int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
                           CountDownLatch retryLatch = new CountDownLatch(atOnce);
                           for (int i = 0; i < atOnce; i++) {
                              Part failedPart = toRetry.poll();
                              Integer partKey = Integer.valueOf(failedPart.getPart());
                              activeParts.put(partKey);
                              prepareUploadPart(container, key, uploadId, partKey, payload,
                                    failedPart.getOffset(), failedPart.getSize(), etags,
                                    activeParts, futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
                           }
                           retryLatch.await();
                        }
                        if (errors.get() > maxRetries) {
                           throw new BlobRuntimeException(String.format(
                                 "Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
                                 errors.get(), key, container, uploadId));
                        }
                        String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
                        logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" +
                            " successfully finished with %s retries", key, container, uploadId, errors.get()));
                        return eTag;
                     } catch (Exception ex) {
                        RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
                        if (rtex == null) {
                           rtex = new RuntimeException(ex);
                        }
                        for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
                           entry.getValue().cancel(false);
                        }
                        if (uploadId != null) {
                           client.abortMultipartUpload(container, key, uploadId);
                        }
                        throw rtex;
                     }
                  } else {
                     // Issue 936: don't just call putBlob, as that will see options=multiPart and
View Full Code Here

      if (errors.get() > maxRetries) {
         activeParts.remove(part); // remove part from the bounded-queue without blocking
         latch.countDown();
         return;
      }
      final AWSS3Client client = blobstore.getContext().unwrapApi(AWSS3Client.class);
      final Payload chunkedPart = slicer.slice(payload, offset, size);
      logger.debug(String.format("async uploading part %s of %s to container %s with uploadId %s", part, key, container, uploadId));
      final long start = System.currentTimeMillis();
      final ListenableFuture<String> futureETag = executor.submit(new Callable<String>() {
         @Override public String call() throws Exception {
            return client.uploadPart(container, key, part, uploadId, chunkedPart);
         }
      });
      futureETag.addListener(new Runnable() {
         @Override
         public void run() {
View Full Code Here

                        .getContentLength());
                  int parts = algorithm.getParts();
                  long chunkSize = algorithm.getChunkSize();
                  long remaining = algorithm.getRemaining();
                  if (parts > 0) {
                     final AWSS3Client client = blobstore.getContext().unwrapApi(AWSS3Client.class);
                     String uploadId = null;
                     final Map<Integer, ListenableFuture<String>> futureParts =
                        new ConcurrentHashMap<Integer, ListenableFuture<String>>();
                     final Map<Integer, Exception> errorMap = Maps.newHashMap();
                     AtomicInteger errors = new AtomicInteger(0);
                     int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
                     int effectiveParts = remaining > 0 ? parts + 1 : parts;
                     try {
                        uploadId = client.initiateMultipartUpload(container,
                                 ObjectMetadataBuilder.create().key(key).build()); // TODO md5
                        logger.debug(String.format("initiated multipart upload of %s to container %s" +
                              " with uploadId %s consisting from %s part (possible max. retries: %d)",
                              key, container, uploadId, effectiveParts, maxRetries));
                        // we need a bounded-blocking queue to control the amount of parallel jobs
                        ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
                        Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
                        SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
                        CountDownLatch latch = new CountDownLatch(effectiveParts);
                        int part;
                        while ((part = algorithm.getNextPart()) <= parts) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), chunkSize, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        if (remaining > 0) {
                           Integer partKey = Integer.valueOf(part);
                           activeParts.put(partKey);
                           prepareUploadPart(container, key, uploadId, partKey, payload,
                                 algorithm.getNextChunkOffset(), remaining, etags,
                                 activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
                        }
                        latch.await();
                        // handling retries
                        while (errors.get() <= maxRetries && !toRetry.isEmpty()) {
                           int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
                           CountDownLatch retryLatch = new CountDownLatch(atOnce);
                           for (int i = 0; i < atOnce; i++) {
                              Part failedPart = toRetry.poll();
                              Integer partKey = Integer.valueOf(failedPart.getPart());
                              activeParts.put(partKey);
                              prepareUploadPart(container, key, uploadId, partKey, payload,
                                    failedPart.getOffset(), failedPart.getSize(), etags,
                                    activeParts, futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
                           }
                           retryLatch.await();
                        }
                        if (errors.get() > maxRetries) {
                           throw new BlobRuntimeException(String.format(
                                 "Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
                                 errors.get(), key, container, uploadId));
                        }
                        String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
                        logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" +
                            " successfully finished with %s retries", key, container, uploadId, errors.get()));
                        return eTag;
                     } catch (Exception ex) {
                        RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
                        if (rtex == null) {
                           rtex = new RuntimeException(ex);
                        }
                        for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
                           entry.getValue().cancel(false);
                        }
                        if (uploadId != null) {
                           client.abortMultipartUpload(container, key, uploadId);
                        }
                        throw rtex;
                     }
                  } else {
                     // Issue 936: don't just call putBlob, as that will see options=multiPart and
View Full Code Here

      provider = "aws-s3";
   }
  
   @Override
   public ProviderMetadata createProviderMetadata() {
      return new AWSS3ProviderMetadata();
   }
View Full Code Here

      provider = "aws-s3";
   }
  
   @Override
   public ProviderMetadata createProviderMetadata() {
      return new AWSS3ProviderMetadata();
   }
View Full Code Here

TOP

Related Classes of org.jclouds.aws.s3.S3Client

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.