Package freenet.client

Examples of freenet.client.Metadata


        private static final long serialVersionUID = 1L;

        /** a normal ( freeform) redirect */
    public JokerPutHandler(BaseManifestPutter bmp,   String name, FreenetURI targetURI2, ClientMetadata cm2) {
      super(bmp, null, name, null, (Metadata)null, cm2);
      Metadata m = new Metadata(DocumentType.SIMPLE_REDIRECT, null, null, targetURI2, cm2);
      metadata = m;
    }
View Full Code Here


    }

    /** a short symlink */
    public JokerPutHandler(BaseManifestPutter bmp, PutHandler parent, String name, String target) {
      super(bmp, parent, name, name, (Metadata)null, null);
      Metadata m = new Metadata(DocumentType.SYMBOLIC_SHORTLINK, null, null, target, null);
      metadata = m;
    }
View Full Code Here

      }
      if(reportMetadataOnly) {
        SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, cb, metadata, (int)origSize, -1, true, true, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
        if(logMINOR)
          Logger.minor(this, "Inserting with metadata: "+dataPutter+" for "+this);
        Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
        cb.onMetadata(meta, this, context);
        cb.onTransition(this, dataPutter, context);
        dataPutter.schedule(context);
        if(!isUSK)
          cb.onBlockSetFinished(this, context);
        synchronized(this) {
          // Don't delete them because they are being passed on.
          origHashes = null;
        }
      } else {
        MultiPutCompletionCallback mcb =
          new MultiPutCompletionCallback(cb, parent, token, persistent, false, ctx.earlyEncode);
        SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, mcb, metadata, (int)origSize, -1, true, false, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
        if(logMINOR)
          Logger.minor(this, "Inserting data: "+dataPutter+" for "+this);
        Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
        RandomAccessBucket metadataBucket;
        try {
          metadataBucket = meta.toBucket(context.getBucketFactory(persistent));
        } catch (IOException e) {
          Logger.error(this, "Caught "+e, e);
          throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
        } catch (MetadataUnresolvedException e) {
          // Impossible, we're not inserting a manifest.
View Full Code Here

      });
    }
  }
 
  private Metadata makeMetadata(ARCHIVE_TYPE archiveType, FreenetURI uri, HashResult[] hashes) {
    Metadata meta = null;
    boolean allowTopBlocks = origDataLength != 0;
    int req = 0;
    int total = 0;
    long data = 0;
    long compressed = 0;
    boolean topDontCompress = false;
    CompatibilityMode topCompatibilityMode = CompatibilityMode.COMPAT_UNKNOWN;
    if(allowTopBlocks) {
      req = parent.getMinSuccessFetchBlocks();
      total = parent.totalBlocks;
      topDontCompress = ctx.dontCompress;
      topCompatibilityMode = ctx.getCompatibilityMode();
      data = origDataLength;
      compressed = origCompressedDataLength;
    }
    if(archiveType != null)
      meta = new Metadata(DocumentType.ARCHIVE_MANIFEST, archiveType, null, uri, block.clientMetadata, data, compressed, req, total, topDontCompress, topCompatibilityMode, hashes);
    else // redirect
      meta = new Metadata(DocumentType.SIMPLE_REDIRECT, archiveType, null, uri, block.clientMetadata, data, compressed, req, total, topDontCompress, topCompatibilityMode, hashes);
    if(targetFilename != null) {
      HashMap<String, Object> hm = new HashMap<String, Object>();
      hm.put(targetFilename, meta);
      meta = Metadata.mkRedirectionManifestWithMetadata(hm);
    }
View Full Code Here

  private void makeMetadata(ClientContext context) {

    Bucket bucket = null;
    int x = 0;

    Metadata md = makeManifest(origMetadata, "");

    while(true) {
      try {
        bucket = md.toBucket(context.getBucketFactory(persistent));
        containerItems.add(new ContainerElement(bucket, ".metadata"));
        return;
      } catch (MetadataUnresolvedException e) {
        try {
          x = resolve(e, x, null, null, context);
View Full Code Here

        ClientMetadata cm;
        if(mimeType == null || mimeType.equals(DefaultMIMETypes.DEFAULT_MIME_TYPE))
          cm = null;
        else
          cm = new ClientMetadata(mimeType);
        Metadata m;
        if(element.targetURI != null) {
          //System.out.println("Decompose: "+name+" (ManifestElement, Redirect)");
          m = new Metadata(DocumentType.SIMPLE_REDIRECT, null, null, element.targetURI, cm);
        } else {
          //System.out.println("Decompose: "+name+" (ManifestElement, Data)");
          containerItems.add(new ContainerElement(element.getData(), archivePrefix+name));
          m = new Metadata(DocumentType.ARCHIVE_INTERNAL_REDIRECT, null, null, archivePrefix+element.fullName, cm);
        }
        smc.addItem(name, m);
      }
    }
    return smc.getMetadata();
View Full Code Here

            context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() {
               
                @Override
                public boolean run(ClientContext context) {
                    try {
                        Metadata metadata = storage.encodeMetadata();
                        reportMetadata(metadata);
                        if(ctx.getCHKOnly)
                            onSucceeded(metadata);
                    } catch (IOException e) {
                        storage.fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null));
View Full Code Here

                checkKeys[checkPtr++] = segment.readKey(i+segment.dataBlockCount+segment.crossCheckBlockCount);
            }
        }
        assert(dataPtr == dataKeys.length);
        assert(checkPtr == checkKeys.length);
        return new Metadata(splitfileType, dataKeys, checkKeys, segmentSize, checkSegmentSize,
                deductBlocksFromSegments, clientMetadata, dataLength, archiveType, compressionCodec,
                decompressedLength, isMetadata, hashes, hashThisLayerOnly, origDataSize,
                origCompressedDataSize, topRequiredBlocks, topTotalBlocks, topDontCompress,
                cmode, splitfileCryptoAlgorithm, splitfileCryptoKey,
                specifySplitfileKeyInMetadata, crossCheckBlocks);
View Full Code Here

                        if(hasFinished()) return false;
                        status = Status.GENERATING_METADATA;
                    }
                    if(logMINOR) Logger.minor(this, "Generating metadata...");
                    try {
                        Metadata metadata = encodeMetadata();
                        synchronized(this) {
                            status = Status.SUCCEEDED;
                        }
                        callback.onSucceeded(metadata);
                    } catch (IOException e) {
View Full Code Here

        if(logMINOR) Logger.minor(this, "Next meta-string: "+name+" length "+name.length()+" for "+this);
        if(name == null) {
          if(!persistent) {
            metadata = metadata.getDefaultDocument();
          } else {
            Metadata newMeta = metadata.grabDefaultDocument();
            metadata = newMeta;
          }
          if(metadata == null)
            throw new FetchException(FetchExceptionMode.NOT_ENOUGH_PATH_COMPONENTS, -1, false, null, uri.addMetaStrings(new String[] { "" }));
        } else {
          if(!persistent) {
            Metadata origMd = metadata;
            metadata = origMd.getDocument(name);
            if (metadata != null && metadata.isSymbolicShortlink()) {
              String oldName = name;
              name = metadata.getSymbolicShortlinkTargetName();
              if (oldName.equals(name)) throw new FetchException(FetchExceptionMode.INVALID_METADATA, "redirect loop: "+name);
              metadata = origMd.getDocument(name);
            }
            thisKey = thisKey.pushMetaString(name);
          } else {
            Metadata newMeta = metadata.grabDocument(name);
            if (newMeta != null && newMeta.isSymbolicShortlink()) {
              String oldName = name;
              name = newMeta.getSymbolicShortlinkTargetName();
              if (oldName.equals(name)) throw new FetchException(FetchExceptionMode.INVALID_METADATA, "redirect loop: "+name);
              newMeta = metadata.getDocument(name);
            }
            metadata = newMeta;
            FreenetURI oldThisKey = thisKey;
            thisKey = thisKey.pushMetaString(name);
          }
          if(metadata == null)
            throw new FetchException(FetchExceptionMode.NOT_IN_ARCHIVE, "can't find "+name);
        }
        continue; // loop
      } else if(metadata.isArchiveManifest()) {
        if(logMINOR) Logger.minor(this, "Is archive manifest (type="+metadata.getArchiveType()+" codec="+metadata.getCompressionCodec()+')');
        if(metaStrings.isEmpty() && ctx.returnZIPManifests) {
          // Just return the archive, whole.
          metadata.setSimpleRedirect();
          continue;
        }
        // First we need the archive metadata.
        // Then parse it. Then we may need to fetch something from inside the archive.
        // It's more efficient to keep the existing ah if we can, and it is vital in
        // the case of binary blobs.
        if(ah == null || !ah.getKey().equals(thisKey)) {
          // Do loop detection on the archive that we are about to fetch.
          actx.doLoopDetection(thisKey);
          ah = context.archiveManager.makeHandler(thisKey, metadata.getArchiveType(), metadata.getCompressionCodec(),
              (parent instanceof ClientGetter ? ((ClientGetter)parent).collectingBinaryBlob() : false), persistent);
        }
        archiveMetadata = metadata;
        metadata = null; // Copied to archiveMetadata, so do not need to clear it
        // ah is set. This means we are currently handling an archive.
        Bucket metadataBucket;
        metadataBucket = ah.getMetadata(actx, context.archiveManager);
        if(metadataBucket != null) {
          try {
            metadata = Metadata.construct(metadataBucket);
            metadataBucket.free();
          } catch (InsufficientDiskSpaceException e) {
              throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
          } catch (IOException e) {
            // Bucket error?
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
          }
        } else {
          final boolean persistent = this.persistent;
          fetchArchive(false, archiveMetadata, ArchiveManager.METADATA_NAME, new ArchiveExtractCallback() {
                        private static final long serialVersionUID = 1L;
                        @Override
            public void gotBucket(Bucket data, ClientContext context) {
              if(logMINOR) Logger.minor(this, "gotBucket on "+SingleFileFetcher.this+" persistent="+persistent);
              try {
                metadata = Metadata.construct(data);
                data.free();
                innerWrapHandleMetadata(true, context);
              } catch (MetadataParseException e) {
                // Invalid metadata
                onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, e), false, context);
                return;
              } catch (IOException e) {
                // Bucket error?
                onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, e), false, context);
                return;
              }
            }
            @Override
            public void notInArchive(ClientContext context) {
              onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, "No metadata in container! Cannot happen as ArchiveManager should synthesise some!"), false, context);
            }
            @Override
            public void onFailed(ArchiveRestartException e, ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
            @Override
            public void onFailed(ArchiveFailureException e, ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
          }, context); // will result in this function being called again
          return;
        }
        metadataBucket.free();
        continue;
      } else if(metadata.isArchiveMetadataRedirect()) {
        if(logMINOR) Logger.minor(this, "Is archive-metadata");
        // Fetch it from the archive
        if(ah == null)
          throw new FetchException(FetchExceptionMode.UNKNOWN_METADATA, "Archive redirect not in an archive manifest");
        String filename = metadata.getArchiveInternalName();
        if(logMINOR) Logger.minor(this, "Fetching "+filename);
        Bucket dataBucket = ah.get(filename, actx, context.archiveManager);
        if(dataBucket != null) {
          if(logMINOR) Logger.minor(this, "Returning data");
          final Metadata newMetadata;
          try {
           
            newMetadata = Metadata.construct(dataBucket);
            dataBucket.free();
          } catch (InsufficientDiskSpaceException e) {
              throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
          } catch (IOException e) {
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR);
          }
          synchronized(this) {
            metadata = newMetadata;
          }
          continue;
        } else {
          if(logMINOR) Logger.minor(this, "Fetching archive (thisKey="+thisKey+ ')');
          // Metadata cannot contain pointers to files which don't exist.
          // We enforce this in ArchiveHandler.
          // Therefore, the archive needs to be fetched.
          final boolean persistent = this.persistent;
          fetchArchive(true, archiveMetadata, filename, new ArchiveExtractCallback() {
                        private static final long serialVersionUID = 1L;
                        @Override
            public void gotBucket(Bucket data, ClientContext context) {
              if(logMINOR) Logger.minor(this, "Returning data");
              final Metadata newMetadata;
              try {
                newMetadata = Metadata.construct(data);
                synchronized(SingleFileFetcher.this) {
                  metadata = newMetadata;
                }
View Full Code Here

TOP

Related Classes of freenet.client.Metadata

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.