Examples of LoadPlan


Examples of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan

   
    boolean needToSave = inspector.needToSave();
   
    // Plan our load. This will throw if it's impossible to load from the
    // data that's available.
    LoadPlan loadPlan = inspector.createLoadPlan();   
    LOG.debug("Planning to load image using following plan:\n" + loadPlan);

   
    // Recover from previous interrupted checkpoint, if any
    needToSave |= loadPlan.doRecovery();

    //
    // Load in bits
    //
    StorageDirectory sdForProperties =
      loadPlan.getStorageDirectoryForProperties();
    storage.readProperties(sdForProperties);
    File imageFile = loadPlan.getImageFile();

    try {
      if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT,
                                 getLayoutVersion())) {
        // For txid-based layout, we should have a .md5 file
        // next to the image file
        loadFSImage(imageFile);
      } else if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM,
                                        getLayoutVersion())) {
        // In 0.22, we have the checksum stored in the VERSION file.
        String md5 = storage.getDeprecatedProperty(
            NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
        if (md5 == null) {
          throw new InconsistentFSStateException(sdForProperties.getRoot(),
              "Message digest property " +
              NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
              " not set for storage directory " + sdForProperties.getRoot());
        }
        loadFSImage(imageFile, new MD5Hash(md5));
      } else {
        // We don't have any record of the md5sum
        loadFSImage(imageFile, null);
      }
    } catch (IOException ioe) {
      throw new IOException("Failed to load image from " + loadPlan.getImageFile(), ioe);
    }
   
    long numLoaded = loadEdits(loadPlan.getEditsFiles());
    needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile, numLoaded);
   
    // update the txid for the edit log
    editLog.setNextTxId(storage.getMostRecentCheckpointTxId() + numLoaded + 1);
    return needToSave;
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan

    FSImageFile latestImage = inspector.getLatestImage();
    assertEquals(456, latestImage.txId);
    assertSame(mockDir, latestImage.sd);
    assertTrue(inspector.isUpgradeFinalized());
   
    LoadPlan plan = inspector.createLoadPlan();
    LOG.info("Plan: " + plan);
   
    assertEquals(new File("/foo/current/"+getImageFileName(456)),
                 plan.getImageFile());
    assertArrayEquals(new File[] {
        new File("/foo/current/" + getInProgressEditsFileName(457)) },
        plan.getEditsFiles().toArray(new File[0]));
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan

    inspector.inspectDirectory(mockDir);
    mockLogValidation(inspector,
        "/foo/current/" + getInProgressEditsFileName(901), 51);

    LoadPlan plan = inspector.createLoadPlan();
    LOG.info("Plan: " + plan);
   
    assertEquals(new File("/foo/current/" + getImageFileName(456)),
                 plan.getImageFile());
    assertArrayEquals(new File[] {
        new File("/foo/current/" + getFinalizedEditsFileName(457,900)),
        new File("/foo/current/" + getInProgressEditsFileName(901)),
        new File("/foo/current/" + getFinalizedEditsFileName(952,1000)) },
        plan.getEditsFiles().toArray(new File[0]));

  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan

    LogGroup lg = inspector.logGroups.get(123L);
    assertEquals(3, lg.logs.size());
    EditLogFile inProgressLog = lg.logs.get(2);
    assertTrue(inProgressLog.isInProgress());
   
    LoadPlan plan = inspector.createLoadPlan();

    // Check that it was marked corrupt.
    assertFalse(lg.logs.get(0).isCorrupt());
    assertFalse(lg.logs.get(1).isCorrupt());
    assertTrue(lg.logs.get(2).isCorrupt());

   
    // Calling recover should move it aside
    inProgressLog = spy(inProgressLog);
    Mockito.doNothing().when(inProgressLog).moveAsideCorruptFile();
    lg.logs.set(2, inProgressLog);
   
    plan.doRecovery();
   
    Mockito.verify(inProgressLog).moveAsideCorruptFile();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan

   
    boolean needToSave = inspector.needToSave();
   
    // Plan our load. This will throw if it's impossible to load from the
    // data that's available.
    LoadPlan loadPlan = inspector.createLoadPlan();   
    LOG.debug("Planning to load image using following plan:\n" + loadPlan);

   
    // Recover from previous interrupted checkpoint, if any
    needToSave |= loadPlan.doRecovery();

    //
    // Load in bits
    //
    StorageDirectory sdForProperties =
      loadPlan.getStorageDirectoryForProperties();
    storage.readProperties(sdForProperties);
    File imageFile = loadPlan.getImageFile();

    try {
      if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT,
                                 getLayoutVersion())) {
        // For txid-based layout, we should have a .md5 file
        // next to the image file
        loadFSImage(imageFile);
      } else if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM,
                                        getLayoutVersion())) {
        // In 0.22, we have the checksum stored in the VERSION file.
        String md5 = storage.getDeprecatedProperty(
            NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
        if (md5 == null) {
          throw new InconsistentFSStateException(sdForProperties.getRoot(),
              "Message digest property " +
              NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
              " not set for storage directory " + sdForProperties.getRoot());
        }
        loadFSImage(imageFile, new MD5Hash(md5));
      } else {
        // We don't have any record of the md5sum
        loadFSImage(imageFile, null);
      }
    } catch (IOException ioe) {
      throw new IOException("Failed to load image from " + loadPlan.getImageFile(), ioe);
    }
   
    long numLoaded = loadEdits(loadPlan.getEditsFiles());
    needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile, numLoaded);
   
    // update the txid for the edit log
    editLog.setNextTxId(storage.getMostRecentCheckpointTxId() + numLoaded + 1);
    return needToSave;
View Full Code Here

Examples of org.hibernate.loader.plan.spi.LoadPlan

            buildingParameters.getLockMode() != null
                ? buildingParameters.getLockMode()
                : buildingParameters.getLockOptions().getLockMode()
    );

    final LoadPlan plan = MetamodelDrivenLoadPlanBuilder.buildRootCollectionLoadPlan( strategy, collectionPersister );
    this.staticLoadQuery = BatchingLoadQueryDetailsFactory.makeCollectionLoadQueryDetails(
        collectionPersister,
        plan,
        buildingParameters
    );
View Full Code Here

Examples of org.hibernate.loader.plan.spi.LoadPlan

      strategy = new FetchStyleLoadPlanBuildingAssociationVisitationStrategy(
          factory, buildingParameters.getQueryInfluencers(),buildingParameters.getLockMode()
      );
    }

    final LoadPlan plan = MetamodelDrivenLoadPlanBuilder.buildRootEntityLoadPlan( strategy, entityPersister );
    this.staticLoadQuery = BatchingLoadQueryDetailsFactory.makeEntityLoadQueryDetails(
        plan,
        uniqueKeyColumnNames,
        buildingParameters,
        factory
View Full Code Here

Examples of org.hibernate.loader.plan.spi.LoadPlan

      boolean returnProxies,
      boolean readOnly,
      ResultTransformer forcedResultTransformer,
      List<AfterLoadAction> afterLoadActionList) throws SQLException {

    final LoadPlan loadPlan = loadPlanAdvisor.advise( this.baseLoadPlan );
    if ( loadPlan == null ) {
      throw new IllegalStateException( "LoadPlanAdvisor returned null" );
    }

    handlePotentiallyEmptyCollectionRootReturns( loadPlan, queryParameters.getCollectionKeys(), resultSet, session );

    final int maxRows;
    final RowSelection selection = queryParameters.getRowSelection();
    if ( LimitHelper.hasMaxRows( selection ) ) {
      maxRows = selection.getMaxRows();
      LOG.tracef( "Limiting ResultSet processing to just %s rows", maxRows );
    }
    else {
      maxRows = Integer.MAX_VALUE;
    }

    final ResultSetProcessingContextImpl context = new ResultSetProcessingContextImpl(
        resultSet,
        session,
        loadPlan,
        readOnly,
//        true, // use optional entity key?  for now, always say yes
        false, // use optional entity key?  actually for now always say no since in the simple test cases true causes failures because there is no optional key
        queryParameters,
        namedParameterContext,
        aliasResolutionContext,
        hadSubselectFetches
    );

    final List loadResults = new ArrayList();

    final int rootReturnCount = loadPlan.getReturns().size();

    LOG.trace( "Processing result set" );
    int count;
    for ( count = 0; count < maxRows && resultSet.next(); count++ ) {
      LOG.debugf( "Starting ResultSet row #%s", count );

      Object logicalRow;
      if ( rootReturnCount == 1 ) {
        loadPlan.getReturns().get( 0 ).hydrate( resultSet, context );
        loadPlan.getReturns().get( 0 ).resolve( resultSet, context );

        logicalRow = loadPlan.getReturns().get( 0 ).read( resultSet, context );
        context.readCollectionElements( new Object[] { logicalRow } );
      }
      else {
        for ( Return rootReturn : loadPlan.getReturns() ) {
          rootReturn.hydrate( resultSet, context );
        }
        for ( Return rootReturn : loadPlan.getReturns() ) {
          rootReturn.resolve( resultSet, context );
        }

        logicalRow = new Object[ rootReturnCount ];
        int pos = 0;
        for ( Return rootReturn : loadPlan.getReturns() ) {
          ( (Object[]) logicalRow )[pos] = rootReturn.read( resultSet, context );
          pos++;
        }
        context.readCollectionElements( (Object[]) logicalRow );
      }
View Full Code Here

Examples of org.hibernate.loader.plan.spi.LoadPlan

            buildingParameters.getLockMode() != null
                ? buildingParameters.getLockMode()
                : buildingParameters.getLockOptions().getLockMode()
    );

    final LoadPlan plan = MetamodelDrivenLoadPlanBuilder.buildRootCollectionLoadPlan( strategy, collectionPersister );
    this.staticLoadQuery = BatchingLoadQueryDetailsFactory.makeCollectionLoadQueryDetails(
        collectionPersister,
        plan,
        buildingParameters
    );
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.