Package org.apache.derby.impl.store.raw.data

Examples of org.apache.derby.impl.store.raw.data.BasePage


    StoredRecordHeader recordHeader = getHeaderAtSlot(slot);

    if (!recordHeader.hasOverflow())
      return super.fetchNumFieldsAtSlot(slot);

    BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());
    int count = overflowPage.fetchNumFieldsAtSlot(getOverflowSlot(overflowPage, recordHeader));
    overflowPage.unlatch();
    return count;
  }
View Full Code Here


    FormatableBitSet                 validColumns,
    StoredRecordHeader      recordHeader)
    throws StandardException
  {

    BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());

    try {

      int overflowSlot = getOverflowSlot(overflowPage, recordHeader);

      overflowPage.doUpdateAtSlot(t, overflowSlot, recordHeader.getOverflowId(), row, validColumns);
      overflowPage.unlatch();
      overflowPage = null;

      return;

    } finally {
      if (overflowPage != null) {
        overflowPage.unlatch();
        overflowPage = null;
      }
    }
  }
View Full Code Here

                    // 0 this will automatically handle any overflows in
          // this new portion

          // BasePage op = getNewOverflowPage();

                    BasePage op =
                        curPage.getOverflowPageForInsert(
                            slot,
                            newRow,
                            newColumnList,
                            nextColumn);

          // We have all the information from this page so unlatch it
          if (curPage != this)
                    {
            curPage.unlatch();
            curPage = null;
          }

          byte mode = Page.INSERT_OVERFLOW;
          if (nextPage != 0)
            mode |= Page.INSERT_FOR_SPLIT;

          RecordHandle nextPortionHandle =
            nextPage == 0 ? null :
            owner.makeRecordHandle(nextPage, id);

          // RESOLVED (sku):  even though we would like to roll back
                    // these inserts with PURGE rather than with delete,
                    // we have to delete because if we purge the last row
          // from an overflow page, the purge will queue a post
                    // commit to remove the page.
          // While this is OK with long columns, we cannot do this
                    // for long rows because long row overflow pages can be
                    // shared by more than one long rows, and thus it is unsafe
          // to remove the page without first latching the head page.
                    // However, the insert log record do not have the head
                    // row's page number so the rollback cannot put that
          // information into the post commit work.
          RecordHandle portionHandle =
            op.insertAllowOverflow(
                            0, newRow, newColumnList, nextColumn, mode, 100,
                            nextPortionHandle);

          // Update the previous record header to point to new portion
          if (curPage == this)
            updateOverflowDetails(this, handle, portionHandle);
          else
            updateOverflowDetails(handle, portionHandle);
          op.unlatch();
        }
                else
                {

          // See earlier comments on checking row reserved space.
View Full Code Here

    {
        boolean backupCompleted = false;
        File backupFile = null;
        RandomAccessFile backupRaf = null;
        boolean isStub = false;
        BasePage page = null;

        while(!backupCompleted) {
            try {

                synchronized (this) {
                    // wait if some one is removing the
                    // container because of a drop.
                    while (inRemove)
                    {
                        try  {
                            wait();
                        }
                        catch (InterruptedException ie)
                        {
                            throw StandardException.interrupt(ie);
                       
                    }

                    if (getCommittedDropState())
                        isStub = true;
                    inBackup = true;
                }
     
                // create container at the backup location.
                if (isStub) {
                    // get the stub ( it is a committted drop table container )
                    StorageFile file = privGetFileName((ContainerKey)getIdentity(),
                                                       true, false, true);
                    backupFile = new File(backupLocation, file.getName());

          // directly copy the stub to the backup
          if(!FileUtil.copyFile(dataFactory.getStorageFactory(),
                                          file, backupFile))
                    {
                        throw StandardException.newException(
                                              SQLState.RAWSTORE_ERROR_COPYING_FILE,
                                              file, backupFile);
                    }
                }else {
                    // regular container file
                    long lastPageNumber= getLastPageNumber(handle);
                    if (lastPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) {
                        // last page number is invalid if there are no pages in
                        // the container yet. No need to backup this container,
                        // this container creation is yet to complete.The reason
                        // backup is getting called on such a container is
                        // because container handle appears in the cache after
                        // the file is created on the disk but before it's
                        // first page is allocated.
                        return;
                    }

                    StorageFile file =
                        privGetFileName(
                            (ContainerKey)getIdentity(), false, false, true);

                    backupFile = new File(backupLocation , file.getName());
                    backupRaf  = new RandomAccessFile(backupFile,  "rw");

                    byte[] encryptionBuf = null;
                    if (dataFactory.databaseEncrypted()) {
                        // Backup uses seperate encryption buffer to encrypt the
                        // page instead of encryption buffer used by the regular
                        // conatiner writes. Otherwise writes to the backup
                        // has to be synchronized with regualar database writes
                        // because backup can run in parallel to container
                        // writes.
                        encryptionBuf = new byte[pageSize];
                    }

                    // copy all the pages of the container from the database
                    // to the backup location by reading through the page cache.
                    for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER;
                         pageNumber <= lastPageNumber; pageNumber++) {
                        page = getLatchedPage(handle, pageNumber);
                       
                        // update the page array before writing to the disk
                        // with container header and encrypt it if the database
                        // is encrypted.
                       
                        byte[] dataToWrite = updatePageArray(pageNumber,
                                                             page.getPageArray(),
                                                             encryptionBuf, false);
                        backupRaf.write(dataToWrite, 0, pageSize);

                        // unlatch releases page from cache, see
                        // StoredPage.releaseExclusive()
                        page.unlatch();
                        page = null;

                        // check if some one wants to commit drop the table while
                        // conatiner is being written to the backup. If so,
                        // abort  the backup and restart it once the drop
                        // is complete.

            synchronized (this)
            {
              if (inRemove) {
                break;
              }
            }
          }
        } 

                // sync and close the backup conatiner. Incase of a stub,
                // it is already synced and closed while doing the copy.
                if(!isStub) {
                    backupRaf.getFD().sync();
                    backupRaf.close();
                    backupRaf = null;
                }
               
                // backup of the conatiner is complete.
                backupCompleted = true;

            }catch (IOException ioe) {
                throw StandardException.newException(
                                                SQLState.BACKUP_FILE_IO_ERROR,
                                                ioe,
                                                backupFile);
            } finally {
                synchronized (this) {
                    inBackup = false;
                    notifyAll();
                }

                if (page != null) {
                    page.unlatch();
                    page = null;
                }

                // if backup of container is not complete, close the file
                // handles and  remove the container file from the backup
View Full Code Here

     */
    protected void encryptContainer(BaseContainerHandle handle,
                                    String newFilePath
        throws StandardException
    {
        BasePage page = null;
        StorageFile newFile =
            dataFactory.getStorageFactory().newStorageFile(newFilePath);
        StorageRandomAccessFile newRaf = null;
        try {
            long lastPageNumber= getLastPageNumber(handle);
            newRaf = privGetRandomAccessFile(newFile);

            byte[] encryptionBuf = null;
            encryptionBuf = new byte[pageSize];

            // copy all the pages from the current container to the
            // new container file after encryting the pages.
            for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER;
                 pageNumber <= lastPageNumber; pageNumber++)
            {

                page = getLatchedPage(handle, pageNumber);
                       
                // update the page array before writing to the disk
                // with container header and encrypt it.
                       
                byte[] dataToWrite = updatePageArray(pageNumber,
                                                     page.getPageArray(),
                                                     encryptionBuf,
                                                     true);
                newRaf.write(dataToWrite, 0, pageSize);

                // unlatch releases page from cache.
                page.unlatch();
                page = null;
            }

            // sync the new version of the container.
            newRaf.sync(true);
            newRaf.close();
            newRaf = null;
           
        }catch (IOException ioe) {
            throw StandardException.newException(
                                    SQLState.FILE_CONTAINER_EXCEPTION,
                                    ioe,
                                    newFile);
        } finally {

            if (page != null) {
                page.unlatch();
                page = null;
            }
           
            if (newRaf != null) {
                try {
View Full Code Here

    StoredRecordHeader recordHeader = getHeaderAtSlot(slot);

    if (!recordHeader.hasOverflow())
      return super.fetchNumFieldsAtSlot(slot);

    BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());
    int count = overflowPage.fetchNumFieldsAtSlot(getOverflowSlot(overflowPage, recordHeader));
    overflowPage.unlatch();
    return count;
  }
View Full Code Here

    FormatableBitSet                 validColumns,
    StoredRecordHeader      recordHeader)
    throws StandardException
  {

    BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());

    try {

      int overflowSlot = getOverflowSlot(overflowPage, recordHeader);

      overflowPage.doUpdateAtSlot(t, overflowSlot, recordHeader.getOverflowId(), row, validColumns);
      overflowPage.unlatch();
      overflowPage = null;

      return;

    } finally {
      if (overflowPage != null) {
        overflowPage.unlatch();
        overflowPage = null;
      }
    }
  }
View Full Code Here

                    // 0 this will automatically handle any overflows in
          // this new portion

          // BasePage op = getNewOverflowPage();

                    BasePage op =
                        curPage.getOverflowPageForInsert(
                            slot,
                            newRow,
                            newColumnList,
                            nextColumn);

          // We have all the information from this page so unlatch it
          if (curPage != this)
                    {
            curPage.unlatch();
            curPage = null;
          }

          byte mode = Page.INSERT_OVERFLOW;
          if (nextPage != 0)
            mode |= Page.INSERT_FOR_SPLIT;

          RecordHandle nextPortionHandle =
            nextPage == 0 ? null :
            owner.makeRecordHandle(nextPage, id);

          // RESOLVED (sku):  even though we would like to roll back
                    // these inserts with PURGE rather than with delete,
                    // we have to delete because if we purge the last row
          // from an overflow page, the purge will queue a post
                    // commit to remove the page.
          // While this is OK with long columns, we cannot do this
                    // for long rows because long row overflow pages can be
                    // shared by more than one long rows, and thus it is unsafe
          // to remove the page without first latching the head page.
                    // However, the insert log record do not have the head
                    // row's page number so the rollback cannot put that
          // information into the post commit work.
          RecordHandle portionHandle =
            op.insertAllowOverflow(
                            0, newRow, newColumnList, nextColumn, mode, 100,
                            nextPortionHandle);

          // Update the previous record header to point to new portion
          if (curPage == this)
            updateOverflowDetails(this, handle, portionHandle);
          else
            updateOverflowDetails(handle, portionHandle);
          op.unlatch();
        }
                else
                {

          // See earlier comments on checking row reserved space.
View Full Code Here

     throws StandardException
  {
    // findpage will have the page latched.
    // CompensationOperation.doMe must call this.releaseResource the page
    // when it is done
    BasePage undoPage = findpage(xact);

    // Needs to pre-dirty this page so that if a checkpoint is taken any
    // time after the CLR is sent to the log stream, it will wait for the
    // actual undo to happen on the page.  We need this to preserve the
    // integrity of the redoLWM.
    undoPage.preDirty();

    return new PhysicalUndoOperation(undoPage, this);
  }
View Full Code Here

    @exception StandardException Standard Cloudscape error policy
  */
  public void restoreLoggedRow(Object[] row, LimitObjectInput in)
    throws StandardException, IOException
  {
    BasePage p = null;

    try {
      // the optional data is written by the page in the same format it
      // stores record on the page,
      // only a page knows how to restore a logged row back to a storable row
      // first get the page where the insert went even though the row may no
      // longer be there
      p = (BasePage)(getContainer().getPage(getPageId().getPageNumber()));

      // skip over the before and after image of the column, position the
      // input stream at the entire row
      p.skipField(in)// AI of the column
      p.skipField(in)// BI of the column

      p.restoreRecordFromStream(in, row);

      // RESOLVE: this returns the BI of the row, what we need is the AI
      // of the row.  We need to someone splice in the AI of the column
      // into the storable row.

    } finally {

      if (p != null) {
        p.unlatch();
        p = null;
      }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.derby.impl.store.raw.data.BasePage

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.