Package org.apache.derby.iapi.store.raw

Examples of org.apache.derby.iapi.store.raw.RecordHandle


  {
    // If this is a head page, the recordHandle is the head row handle.
    // If this is not a head page, we are calling updateAtSlot inside some
    // convoluted loop that updates an overflow chain.  There is nothing we
    // can doing about it anyway.
    RecordHandle headRowHandle =
            isOverflowPage() ? null : getRecordHandleAtSlot(slot);
   
    // RESOLVE: djd/yyz what does a null row means? (sku)
    if (row == null)
        {
      owner.getActionSet().actionUpdate(
                t, this, slot, id, row, validColumns, -1,
                (DynamicByteArrayOutputStream) null, -1, headRowHandle);

      return;
    }

    // startColumn is the first column to be updated.
    int startColumn = RowUtil.nextColumn(row, validColumns, 0);
    if (startColumn == -1)
      return;

    if (SanityManager.DEBUG)
    {
      // make sure that if N bits are set in the validColumns that
      // exactly N columns are passed in via the row array.
      if (!isOverflowPage() && validColumns != null)
      {
        if (RowUtil.getNumberOfColumns(-1, validColumns) > row.length)
          SanityManager.THROWASSERT("updating slot " + slot +
             " on page " + getIdentity() + " " +
              RowUtil.getNumberOfColumns(-1, validColumns) +
              " bits are set in validColumns but only " +
              row.length + " columns in row[]");
      }
    }


    // Keep track of row shrinkage in the head row piece.  If any row piece
    // shrinks, file a post commit work to clear all reserved space for the
    // entire row chain.
    boolean rowHasReservedSpace = false;

    StoredPage curPage = this;
    for (;;)
        {
      StoredRecordHeader rh = curPage.getHeaderAtSlot(slot);

      int startField          = rh.getFirstField();
      int endFieldExclusive   = startField + rh.getNumberFields();

      // curPage contains column[startField] to column[endFieldExclusive-1]

      // Need to cope with an update that is increasing the number of
            // columns.  If this occurs we want to make sure that we perform a
            // single update to the last portion of a record, and not an update
            // of the current columns and then an update to append a column.

      long nextPage        = -1;
      int  realStartColumn = -1;
      int  realSpaceOnPage = -1;

      if (!rh.hasOverflow() ||
                ((startColumn >= startField) &&
                 (startColumn <  endFieldExclusive)))
      {
        boolean                 hitLongColumn;
        int                     nextColumn      = -1;
        Object[]   savedFields     = null;
        DynamicByteArrayOutputStream  logBuffer       = null;

        do
                {
          try
                    {
            // Update this portion of the record.
            // Pass in headRowHandle in case we are to update any
            // long column and they need to be cleaned up by post
            // commit processing.  We don't want to purge the
            // columns right now because in order to reclaim the
            // page, we need to remove them.  But it would be bad
            // to remove them now because the transaction may not
            // commit for a long time.  We can do both purging of
            // the long column and page removal together in the
            // post commit.
            nextColumn =
                            owner.getActionSet().actionUpdate(
                                t, curPage, slot, id, row, validColumns,
                  realStartColumn, logBuffer,
                                realSpaceOnPage, headRowHandle);

            hitLongColumn = false;

          }
                    catch (LongColumnException lce)
                    {
 
            if (lce.getRealSpaceOnPage() == -1)
                        {
              // an update that has caused the row to increase
                            // in size *and* push some fields off the page
                            // that need to be inserted in an overflow page

              // no need to make a copy as we are going to use
                            // this buffer right away
              logBuffer = lce.getLogBuffer();

              savedFields     =
                                (Object[]) lce.getColumn();
                           
              realStartColumn = lce.getNextColumn();
              realSpaceOnPage = -1;

              hitLongColumn   = true;

              continue;
            }

           
            // we caught a real long column exception
            // three things should happen here:
            // 1. insert the long column into overflow pages.
            // 2. append the overflow field header in the main chain.
            // 3. continue the update in the main data chain.
            logBuffer =
                            new DynamicByteArrayOutputStream(lce.getLogBuffer());

            // step 1: insert the long column ... if this update
                        // operation rolls back, purge the after image column
                        // chain and reclaim the overflow page because the
                        // whole chain will be orphaned anyway.
            RecordHandle longColumnHandle =
              insertLongColumn(
                                curPage, lce, Page.INSERT_UNDO_WITH_PURGE);

            // step 2: append overflow field header to log buffer
            int overflowFieldLen = 0;
            try
                        {
              overflowFieldLen +=
                appendOverflowFieldHeader(
                                    logBuffer, longColumnHandle);

            }
                        catch (IOException ioe)
                        {
              throw StandardException.newException(
                                SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
            }

            // step 3: continue the insert in the main data chain
            // need to pass the log buffer, and start column to the
                        // next insert.
            realStartColumn = lce.getNextColumn() + 1;
            realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
            hitLongColumn = true;

          }

        } while (hitLongColumn);


        // See if we completed all the columns that are on this page.
        int validColumnsSize =
                    (validColumns == null) ? 0 : validColumns.getLength();

        if (nextColumn != -1)
                {

          if (SanityManager.DEBUG)
                    {
            // note nextColumn might be less than the the first
                        // column we started updating. This is because the
                        // update might force the record header to grow and
                        // push fields before the one we are updating off the
                        // page and into this insert.

            if ((nextColumn < startField) ||
                            (rh.hasOverflow() && (nextColumn >= endFieldExclusive)))
                        {
              SanityManager.THROWASSERT(
                                "nextColumn out of range = " + nextColumn +
                " expected between " +
                                startField + " and " + endFieldExclusive);
                        }
          }

          // Need to insert rows from nextColumn to endFieldExclusive
                    // onto a new overflow page.
          // If the column is not being updated we
          // pick it up from the current page. If it is being updated
          // we take it from the new value.
          int possibleLastFieldExclusive = endFieldExclusive;
                   
          if (!rh.hasOverflow())
                    {
            // we might be adding a field here
            if (validColumns == null)
                        {
              if (row.length > possibleLastFieldExclusive)
                possibleLastFieldExclusive = row.length;
            }
                        else
                        {
              if (validColumnsSize > possibleLastFieldExclusive)
                possibleLastFieldExclusive = validColumnsSize;
            }
          }


                    // use a sparse row
          Object[] newRow =
                        new Object[possibleLastFieldExclusive];

          FormatableBitSet  newColumnList =
                        new FormatableBitSet(possibleLastFieldExclusive);

          ByteArrayOutputStream fieldStream = null;

          for (int i = nextColumn; i < possibleLastFieldExclusive; i++)
                    {
            if ((validColumns == null) ||
                            (validColumnsSize > i && validColumns.isSet(i)))
                        {
              newColumnList.set(i);
              // use the new value
              newRow[i] = RowUtil.getColumn(row, validColumns, i);

            }
                        else if (i < endFieldExclusive)
                        {
              newColumnList.set(i);

              // use the old value
              newRow[i] = savedFields[i - nextColumn];
            }
          }

          RecordHandle handle = curPage.getRecordHandleAtSlot(slot);

          // If the portion we just updated is the last portion then
                    // there cannot be any updates to do.
          if (rh.hasOverflow())
                    {
            // We have to carry across the overflow information
            // from the current record, if any.
            nextPage = rh.getOverflowPage();
            id = rh.getOverflowId();

            // find the next starting column before unlatching page
            startColumn =
                            RowUtil.nextColumn(
                                row, validColumns, endFieldExclusive);
          }
                    else
                    {
            startColumn = -1;
            nextPage = 0;
          }


          // After the update is done, see if this row piece has
          // shrunk in curPage if no other row pieces have shrunk so
          // far.  In head page, need to respect minimumRecordSize.
          // In overflow page, only need to respect
          // RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT
          // Don't bother with temp container.
          if (!rowHasReservedSpace && headRowHandle != null &&
            curPage != null && !owner.isTemporaryContainer())
          {
            rowHasReservedSpace =
                            curPage.checkRowReservedSpace(slot);
          }


          // insert the record portion on a new overflow page at slot
                    // 0 this will automatically handle any overflows in
          // this new portion

          // BasePage op = getNewOverflowPage();

                    BasePage op =
                        curPage.getOverflowPageForInsert(
                            slot,
                            newRow,
                            newColumnList,
                            nextColumn);

          // We have all the information from this page so unlatch it
          if (curPage != this)
                    {
            curPage.unlatch();
            curPage = null;
          }

          byte mode = Page.INSERT_OVERFLOW;
          if (nextPage != 0)
            mode |= Page.INSERT_FOR_SPLIT;

          RecordHandle nextPortionHandle =
            nextPage == 0 ? null :
            owner.makeRecordHandle(nextPage, id);

          // RESOLVED (sku):  even though we would like to roll back
                    // these inserts with PURGE rather than with delete,
                    // we have to delete because if we purge the last row
          // from an overflow page, the purge will queue a post
                    // commit to remove the page.
          // While this is OK with long columns, we cannot do this
                    // for long rows because long row overflow pages can be
                    // shared by more than one long rows, and thus it is unsafe
          // to remove the page without first latching the head page.
                    // However, the insert log record do not have the head
                    // row's page number so the rollback cannot put that
          // information into the post commit work.
          RecordHandle portionHandle =
            op.insertAllowOverflow(
                            0, newRow, newColumnList, nextColumn, mode, 100,
                            nextPortionHandle);

          // Update the previous record header to point to new portion
View Full Code Here


          // page is not held.  We only need a zero duration lock on
          // the new page because the allocPage is latched and this
          // is the only thread which can be looking at this
          // pageNumber.

          RecordHandle deallocLock = BasePage.MakeRecordHandle(pkey,
                 RecordHandle.DEALLOCATE_PROTECTION_HANDLE);

          if (!getDeallocLock(allocHandle, deallocLock,
                    false /* nowait */,
                    true /* zeroDuration */))
 
View Full Code Here

                    haveAllColumns = true;
                }
            }

            if (!haveAllColumns) {
                RecordHandle rh = page.fetchFromSlot(
                        (RecordHandle) null,
                        scan_position.current_slot,
                        fullKey,
                        fetchDescriptor,
                        true);
View Full Code Here

    private boolean fetchMaxRowFromBeginning(
    DataValueDescriptor[]   fetch_row)
        throws StandardException
  {
        int                 ret_row_count     = 0;
        RecordHandle        max_rh            = null;

        // we need to scan until we hit the end of the table or until we
        // run into a null.  Use this template to probe the "next" row so
        // that if we need to finish, fetch_row will have the right value.
        DataValueDescriptor[] check_row_template = new DataValueDescriptor[1];
        check_row_template[0] = fetch_row[0].getClone();
        FetchDescriptor check_row_desc = RowUtil.getFetchDescriptorConstant(1);

        // reopen the scan for reading from the beginning of the table.
        reopenScan(
            (DataValueDescriptor[]) null,
            ScanController.NA,
            (Qualifier[][]) null,
            (DataValueDescriptor[]) null,
            ScanController.NA);

        BTreeRowPosition pos = scan_position;

        positionAtStartForForwardScan(pos);

        // At this point:
        // current_page is latched.  current_slot is the slot on current_page
        // just before the "next" record this routine should process.

        // loop through successive leaf pages and successive slots on those
        // leaf pages.  Stop when either the last leaf is reached. At any
        // time in the scan fetch_row will contain "last" non-deleted row
        // seen.

        boolean nulls_not_reached = true;
        leaf_loop:
    while ((pos.current_leaf != null) && nulls_not_reached)
    {
            slot_loop:
      while ((pos.current_slot + 1) < pos.current_leaf.page.recordCount())
      {
                // unlock the previous row if doing read.
                if (pos.current_rh != null)
                {
                    this.getLockingPolicy().unlockScanRecordAfterRead(
                        pos, init_forUpdate);

                    // current_rh is used to track which row we need to unlock,
                    // at this point no row needs to be unlocked.
                    pos.current_rh = null;
                }

                // move scan current position forward.
                pos.current_slot++;
                this.stat_numrows_visited++;

                // get current record handle for positioning but don't read
                // data until we verify it is not deleted.  rh is needed
                // for repositioning if we lose the latch.
                RecordHandle rh =
                    pos.current_leaf.page.fetchFromSlot(
                        (RecordHandle) null,
                        pos.current_slot,
                        check_row_template,
                        null,
View Full Code Here

                this.stat_numrows_visited++;

                // get current record handle for positioning but don't read
                // data until we verify it is not deleted.  rh is needed
                // for repositioning if we lose the latch.
                RecordHandle rh =
                    pos.current_leaf.page.fetchFromSlot(
                        (RecordHandle) null,
                        pos.current_slot, fetch_row, init_fetchDesc,
                        true);
View Full Code Here

    private int comparePreviousRecord (int slot,
                                    LeafControlRow  leaf,
                                    DataValueDescriptor [] rows,
                                    DataValueDescriptor [] oldRows)
                                        throws StandardException {
        RecordHandle rh = null;
        boolean newLeaf = false;
        LeafControlRow originalLeaf = leaf;
        while (leaf != null) {
            if (slot == 0) {
                try {
View Full Code Here

    private int compareNextRecord (int slot,
                                    LeafControlRow  leaf,
                                    DataValueDescriptor [] rows,
                                    DataValueDescriptor [] oldRows)
                                        throws StandardException {
        RecordHandle rh = null;
        boolean newLeaf = false;
        LeafControlRow originalLeaf = leaf;
        while (leaf != null) {
            if (slot >= leaf.page.recordCount()) {
                //slot is pointing to last slot
View Full Code Here

    if (undo != null) {
      t.checkLogicalOperationOk();
    }

        int recordId;
        RecordHandle handle;

        do {

            // loop until we get a new record id we can get a lock on.
View Full Code Here

                    SQLState.DATA_CONTAINER_READ_ONLY);
        }


    // Handle of the first portion of the chain
    RecordHandle headHandle = null;
    RecordHandle handleToUpdate = null;

    RawTransaction t = curPage.owner.getTransaction();

    for (;;) {

      if (SanityManager.DEBUG) {
        SanityManager.ASSERT(curPage.isLatched());
      }

      if (!curPage.allowInsert())
        return null;

      // 'this' is the head page
      if (curPage != this)
        slot = curPage.recordCount;

      boolean isLongColumns   = false;
      int     realStartColumn = -1;
      int     realSpaceOnPage = -1;

      DynamicByteArrayOutputStream logBuffer = null;

            // allocate new record id and handle
            int          recordId = curPage.newRecordIdAndBump();
      RecordHandle handle   =
                new RecordId(curPage.getPageId(), recordId, slot);

      if (curPage == this) {


        // Lock the row, if it is the very first portion of the record.
        if (handleToUpdate == null) {

                    while (!owner.getLockingPolicy().lockRecordForWrite(
                                t, handle,
                                true  /* lock is for insert */,
                                false /* don't wait for grant */)) {

                        // loop until we get a new record id we can get a lock
                        // on.  If we can't get the lock without waiting then
                        // assume the record id is owned by another xact.  The
                        // current heap overflow algorithm makes this likely,
                        // as it first try's to insert a row telling raw store
                        // to fail if it doesn't fit on the page getting a lock
                        // on an id that never makes it to disk.   The
                        // inserting transaction will hold a lock on this
                        // "unused" record id until it commits.  The page can
                        // leave the cache at this point, and the inserting
                        // transaction has not dirtied the page (it failed
                        // after getting the lock but before logging anything),
                        // another inserting transaction will then get the
                        // same id as the previous inserter - thus the loop on
                        // lock waits.
                        //
                        // The lock we request indicates that this is a lock
                        // for insert, which the locking policy may use to
                        // perform locking concurrency optimizations.

                        // allocate new record id and handle
                        recordId = curPage.newRecordIdAndBump();
                        handle   =
                            new RecordId(curPage.getPageId(), recordId, slot);
                    }
        }

        headHandle = handle;
      }

      do {

        // do this loop at least once.  If we caught a long Column,
        // then, we redo the insert with saved logBuffer.
        try {

          startColumn =
                        owner.getActionSet().actionInsert(
                            t, curPage, slot, recordId,
                            row, validColumns, (LogicalUndo) null,
                            insertFlag, startColumn, false,
                            realStartColumn, logBuffer, realSpaceOnPage,
                            overflowThreshold);
          isLongColumns = false;

        } catch (LongColumnException lce) {


          // we caught a long column exception
          // three things should happen here:
          // 1. insert the long column into overflow pages.
          // 2. append the overflow field header in the main chain.
          // 3. continue the insert in the main data chain.
          logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());

          // step 1: insert the long column ... use the same
          // insertFlag as the rest of the row.
          RecordHandle longColumnHandle =
            insertLongColumn(curPage, lce, insertFlag);

          // step 2: append the overflow field header to the log buffer
          int overflowFieldLen = 0;
          try {
View Full Code Here

    // Object[] row = new Object[1];
    // row[0] = (Object) lce.getColumn();
    Object[] row = new Object[1];
    row[0] = lce.getColumn();

    RecordHandle firstHandle = null;
    RecordHandle handle = null;
    RecordHandle prevHandle = null;
    BasePage curPage = mainChainPage;
    BasePage prevPage = null;
    boolean isFirstPage = true;

    // when inserting a long column startCOlumn is just used
View Full Code Here

TOP

Related Classes of org.apache.derby.iapi.store.raw.RecordHandle

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.