Package org.exist.storage.lock

Examples of org.exist.storage.lock.Lock


        public void remove() {
            //Return early
            if (doc == null)
                {return;}
            final int collectionId = this.doc.getCollection().getId();
            final Lock lock = dbTokens.getLock();
            for (byte currentSection = 0; currentSection <= QNAME_SECTION; currentSection++) {
                //Not very necessary, but anyway...
                switch (currentSection) {
                case TEXT_SECTION :
                case ATTRIBUTE_SECTION :
                case QNAME_SECTION :
                    break;
                default :
                    throw new IllegalArgumentException("Invalid section type in '" +
                        dbTokens.getFile().getName() + "' (inverted index)");
                }
                for (final Iterator i = words[currentSection].entrySet().iterator(); i.hasNext();) {
                    //Compute a key for the token
                    final Map.Entry entry = (Map.Entry) i.next();
                    final OccurrenceList storedOccurencesList = (OccurrenceList) entry.getValue();
                    final Object token = entry.getKey();
                    Value key;
                    if (currentSection == QNAME_SECTION) {
                        final QNameTerm term = (QNameTerm) token;
                        key = new QNameWordRef(collectionId, term.qname, term.term,
                                broker.getBrokerPool().getSymbols());
                    } else {
                        key = new WordRef(collectionId, token.toString());
                    }
                    final OccurrenceList newOccurencesList = new OccurrenceList();
                    os.clear();
                    try {
                        lock.acquire(Lock.WRITE_LOCK);
                        final Value value = dbTokens.get(key);
                        if (value == null)
                            {continue;}
                        //Add its data to the new list
                        final VariableByteArrayInput is = new VariableByteArrayInput(value.getData());
                        while (is.available() > 0) {
                            final int storedDocId = is.readInt();
                            final byte storedSection = is.readByte();
                            final int termCount = is.readInt();
                            //Read (variable) length of node IDs + frequency + offsets
                            final int length = is.readFixedInt();
                            if (storedSection != currentSection || storedDocId != this.doc.getDocId()) {
                                // data are related to another section or document:
                                // append them to any existing data
                                os.writeInt(storedDocId);
                                os.writeByte(storedSection);
                                os.writeInt(termCount);
                                os.writeFixedInt(length);
                                is.copyRaw(os, length);
                            } else {
                                // data are related to our section and document:
                                // feed the new list with the GIDs
                                NodeId previous = null;
                                for (int m = 0; m < termCount; m++) {
                                    NodeId nodeId = broker.getBrokerPool()
                                        .getNodeFactory().createFromStream(previous, is);
                                    previous = nodeId;
                                    final int freq = is.readInt();
                                    // add the node to the new list if it is not
                                    // in the list of removed nodes
                                    if (!storedOccurencesList.contains(nodeId)) {
                                        for (int n = 0; n < freq; n++) {
                                            newOccurencesList.add(nodeId, is.readInt());
                                        }
                                    } else {
                                        is.skip(freq);
                                    }
                                }
                            }
                        }
                        //append the data from the new list
                        if (newOccurencesList.getSize() > 0) {
                            //Don't forget this one
                            newOccurencesList.sort();
                            os.writeInt(this.doc.getDocId());
                            os.writeByte(currentSection);
                            os.writeInt(newOccurencesList.getTermCount());
                            //Mark position
                            final int lenOffset = os.position();
                            //Dummy value : actual one will be written below
                            os.writeFixedInt(0);
                            NodeId previous = null;
                            for (int m = 0; m < newOccurencesList.getSize();) {
                                previous = newOccurencesList.getNode(m).write(previous, os);
                                int freq = newOccurencesList.getOccurrences(m);
                                os.writeInt(freq);
                                for (int n = 0; n < freq; n++) {
                                    os.writeInt(newOccurencesList.getOffset(m + n));
                                }
                                m += freq;
                            }
                            //Write (variable) length of node IDs + frequency + offsets
                            os.writeFixedInt(lenOffset, os.position() -
                                lenOffset - LENGTH_NODE_IDS_FREQ_OFFSETS);
                        }
                        //Store the data
                        if(os.data().size() == 0)
                            {dbTokens.remove(key);}
                        else if (dbTokens.update(value.getAddress(), key,
                                os.data()) == BFile.UNKNOWN_ADDRESS) {
                            LOG.error("Could not update index data for token '" +
                                token + "' in '" + dbTokens.getFile().getName() +
                                "' (inverted index)");
                            //TODO : throw an exception ?
                        }
                    } catch (final LockException e) {
                        LOG.warn("Failed to acquire lock for '" +
                            dbTokens.getFile().getName() + "' (inverted index)", e);
                        //TODO : throw exception ? -pb
                    } catch (final IOException e) {
                        LOG.error(e.getMessage() + "' in '" +
                            dbTokens.getFile().getName() + "' (inverted index)", e);
                        //TODO : throw exception ? -pb
                    } finally {
                        lock.release(Lock.WRITE_LOCK);
                        os.clear();
                    }
                }
                words[currentSection].clear();
            }
View Full Code Here


            throw new XPathException("FODC0002: can not access collection '" + pde.getMessage() + "'");
           
        }
        // iterate through all docs and create the node set
        final NodeSet result = new NewArrayNodeSet(docs.getDocumentCount(), 1);
        Lock dlock;
        DocumentImpl doc;
        for (final Iterator<DocumentImpl> i = docs.getDocumentIterator(); i.hasNext();) {
            doc = i.next();
            dlock = doc.getUpdateLock();
            boolean lockAcquired = false;
            try {
                if (!context.inProtectedMode() && !dlock.hasLock()) {
                    dlock.acquire(Lock.READ_LOCK);
                    lockAcquired = true;
                }
                result.add(new NodeProxy(doc)); // , -1, Node.DOCUMENT_NODE));
            } catch (final LockException e) {
                throw new XPathException(e.getMessage());
            } finally {
                if (lockAcquired)
                    {dlock.release(Lock.READ_LOCK);}
            }
        }
        registerUpdateListener();
        if (context.getProfiler().isEnabled())
               {context.getProfiler().end(this, "", result);}
View Full Code Here

     *  Are there more nodes to be read?
     *
     *@return <code>true</code> if there is at least one more node to read
     */
    public boolean hasNext() {
        final Lock lock = db.getLock();
        try {
            try {
                lock.acquire(Lock.READ_LOCK);
            } catch (final LockException e) {
                LOG.warn("Failed to acquire read lock on " + db.getFile().getName());
                //TODO : throw exception here ? -pb
                return false;
            }
            db.setOwnerObject(broker);
            if (gotoNextPosition()) {
                db.getPageBuffer().add(page);
                final DOMFile.DOMFilePageHeader pageHeader = page.getPageHeader();
                if (offset < pageHeader.getDataLength())
                    {return true;}
                else if (pageHeader.getNextDataPage() == Page.NO_PAGE)
                    {return false;}
                else
                    //Mmmmh... strange -pb
                    {return true;}
            }
        } catch (final BTreeException e) {
            LOG.warn(e);
            //TODO : throw exception here ? -pb
        } catch (final IOException e) {
            LOG.warn(e);
            //TODO : throw exception here ? -pb
        } finally {
            lock.release(Lock.READ_LOCK);
        }
        return false;
    }
View Full Code Here

    /**
     *  Returns the next node in document order.
     */
    public StoredNode next() {
        final Lock lock = db.getLock();
        try {
            try {
                lock.acquire(Lock.READ_LOCK);
            } catch (final LockException e) {
                LOG.warn("Failed to acquire read lock on " + db.getFile().getName());
                //TODO : throw exception here ? -pb
                return null;
            }
            db.setOwnerObject(broker);
            StoredNode nextNode = null;
            if (gotoNextPosition()) {
                long backLink = 0;
                do {
                    final DOMFile.DOMFilePageHeader pageHeader = page.getPageHeader();
                    //Next value larger than length of the current page?
                    if (offset >= pageHeader.getDataLength()) {
                        //Load next page in chain
                        long nextPageNum = pageHeader.getNextDataPage();
                        if (nextPageNum == Page.NO_PAGE) {
                            SanityCheck.TRACE("bad link to next " + page.page.getPageInfo() +
                                "; previous: " + pageHeader.getPreviousDataPage() +
                                "; offset = " + offset + "; lastTupleID = " + lastTupleID);
                            System.out.println(db.debugPageContents(page));
                            //TODO : throw exception here ? -pb
                            return null;
                        }
                        pageNum = nextPageNum;
                        page = db.getDOMPage(nextPageNum);
                        db.addToBuffer(page);
                        offset = 0;
                    }
                    //Extract the tuple ID
                    lastTupleID = ByteConversion.byteToShort(page.data, offset);
                    offset += DOMFile.LENGTH_TID;
                    //Check if this is just a link to a relocated node
                    if(ItemId.isLink(lastTupleID)) {
                        //Skip this
                        offset += DOMFile.LENGTH_FORWARD_LOCATION;
                        //Continue the iteration
                        continue;
                    }
                    //Read data length
                    short vlen = ByteConversion.byteToShort(page.data, offset);
                    offset += DOMFile.LENGTH_DATA_LENGTH;
                    if (vlen < 0) {
                        LOG.error("Got negative length" + vlen + " at offset " + offset + "!!!");
                        LOG.debug(db.debugPageContents(page));
                        //TODO : throw an exception right now ?
                    }
                    if(ItemId.isRelocated(lastTupleID)) {
                        //Found a relocated node. Read the original address
                        backLink = ByteConversion.byteToLong(page.data, offset);
                        offset += DOMFile.LENGTH_ORIGINAL_LOCATION;
                    }
                    //Overflow page? Load the overflow value
                    if (vlen == DOMFile.OVERFLOW) {
                        vlen = DOMFile.LENGTH_OVERFLOW_LOCATION;
                        final long overflow = ByteConversion.byteToLong(page.data, offset);
                        offset += DOMFile.LENGTH_OVERFLOW_LOCATION;
                        try {
                            final byte[] overflowValue = db.getOverflowValue(overflow);
                            nextNode = StoredNode.deserialize(overflowValue, 0, overflowValue.length,
                                doc, useNodePool);
                        } catch(final Exception e) {
                            LOG.warn("Exception while loading overflow value: " + e.getMessage() +
                                "; originating page: " + page.page.getPageInfo());
                            //TODO : rethrow exception ? -pb
                        }
                    //Normal node
                    } else {
                        try {
                            nextNode = StoredNode.deserialize(page.data, offset, vlen, doc, useNodePool);
                            offset += vlen;
                        } catch(final Exception e) {
                            LOG.error("Error while deserializing node: " + e.getMessage(), e);
                            LOG.error("Reading from offset: " + offset + "; len = " + vlen);
                            LOG.debug(db.debugPageContents(page));
                            System.out.println(db.debugPageContents(page));
                            throw new RuntimeException(e);
                        }
                    }
                    if (nextNode == null) {
                        LOG.error("illegal node on page " + page.getPageNum() +
                            "; tid = " + ItemId.getId(lastTupleID) +
                            "; next = " + page.getPageHeader().getNextDataPage() +
                            "; prev = " + page.getPageHeader().getPreviousDataPage() +
                            "; offset = " + (offset - vlen) +
                            "; len = " + page.getPageHeader().getDataLength());
                        System.out.println(db.debugPageContents(page));
                        //TODO : throw an exception here ? -pb
                        return null;
                    }
                    if (ItemId.isRelocated(lastTupleID)) {
                        nextNode.setInternalAddress(backLink);
                    } else {
                        nextNode.setInternalAddress(StorageAddress.createPointer((int) pageNum,
                            ItemId.getId(lastTupleID)));
                    }
                    nextNode.setOwnerDocument(doc);
                } while (nextNode == null);
            }
            return nextNode;
        } catch (final BTreeException e) {
            LOG.error(e.getMessage(), e);
            //TODO : re-throw exception ? -pb
        } catch (final IOException e) {
            LOG.error(e.getMessage(), e);
            //TODO : re-throw exception ? -pb
        } finally {
            lock.release(Lock.READ_LOCK);
        }
        return null;
    }
View Full Code Here

    public NodeSet findElementsByTagName(byte type, DocumentSet docs, QName qname, NodeSelector selector) {
        return findElementsByTagName(type, docs, qname, selector, null);
    }

    public NodeSet findElementsByTagName(byte type, DocumentSet docs, QName qname, NodeSelector selector, Expression parent) {
        final Lock lock = index.btree.getLock();
        final NewArrayNodeSet result = new NewArrayNodeSet(docs.getDocumentCount(), 256);
        final FindElementsCallback callback = new FindElementsCallback(type, result, docs, selector, parent);
        // scan the document set to find document id ranges to query
        final List<Range> ranges = new ArrayList<Range>();
        Range next = null;
        for (final Iterator<DocumentImpl> i = docs.getDocumentIterator(); i.hasNext(); ) {
            final DocumentImpl doc = i.next();
            if (next == null)
                {next = new Range(doc.getDocId());}
            else if (next.end + 1 == doc.getDocId())
                {next.end++;}
            else {
                ranges.add(next);
                next = new Range(doc.getDocId());
            }
        }
        if (next != null)
            {ranges.add(next);}

        // for each document id range, scan the index to find matches
        for (final Range range : ranges) {
            final byte[] fromKey = computeKey(type, qname, range.start);
            final byte[] toKey = computeKey(type, qname, range.end + 1);
            final IndexQuery query = new IndexQuery(IndexQuery.RANGE, new Value(fromKey), new Value(toKey));
            try {
                lock.acquire(Lock.READ_LOCK);
                index.btree.query(query, callback);
            } catch (final LockException e) {
                NativeStructuralIndex.LOG.warn("Lock problem while searching structural index: " + e.getMessage(), e);
            } catch (final TerminatedException e) {
                NativeStructuralIndex.LOG.warn("Query was terminated while searching structural index: " + e.getMessage(), e);
            } catch (final Exception e) {
                NativeStructuralIndex.LOG.error("Error while searching structural index: " + e.getMessage(), e);
            } finally {
                lock.release(Lock.READ_LOCK);
            }
        }
        return result;
    }
View Full Code Here

    public NodeSet findDescendantsByTagName(byte type, QName qname, int axis, DocumentSet docs, NodeSet contextSet, int contextId) {
        return findDescendantsByTagName(type, qname, axis, docs, contextSet, contextId, null);
    }

    public NodeSet findDescendantsByTagName(byte type, QName qname, int axis, DocumentSet docs, NodeSet contextSet, int contextId, Expression parent) {
        final Lock lock = index.btree.getLock();
        final NewArrayNodeSet result = new NewArrayNodeSet(docs.getDocumentCount(), 256);
        final FindDescendantsCallback callback = new FindDescendantsCallback(type, axis, contextId, result, parent);
        try {
            lock.acquire(Lock.READ_LOCK);
            for (final NodeProxy ancestor : contextSet) {
                final DocumentImpl doc = ancestor.getDocument();
                final NodeId ancestorId = ancestor.getNodeId();
                callback.setAncestor(doc, ancestor);
                byte[] fromKey, toKey;
                if (ancestorId == NodeId.DOCUMENT_NODE) {
                    fromKey = computeKey(type, qname, doc.getDocId());
                    toKey = computeKey(type, qname, doc.getDocId() + 1);
                } else {
                    fromKey = computeKey(type, qname, doc.getDocId(), ancestorId);
                    toKey = computeKey(type, qname, doc.getDocId(), ancestorId.nextSibling());
                }
                final IndexQuery query = new IndexQuery(IndexQuery.RANGE, new Value(fromKey), new Value(toKey));
                try {
                    index.btree.query(query, callback);
                } catch (final Exception e) {
                    NativeStructuralIndex.LOG.error("Error while searching structural index: " + e.getMessage(), e);
                }
            }
        } catch (final LockException e) {
            NativeStructuralIndex.LOG.warn("Lock problem while searching structural index: " + e.getMessage(), e);
        } finally {
            lock.release(Lock.READ_LOCK);
        }
        result.updateNoSort();
        return result;
    }
View Full Code Here

        return result;
    }

    public NodeSet findAncestorsByTagName(byte type, QName qname, int axis, DocumentSet docs, NodeSet contextSet,
                                          int contextId) {
        final Lock lock = index.btree.getLock();
        final NewArrayNodeSet result = new NewArrayNodeSet(docs.getDocumentCount(), 256);
        try {
            lock.acquire(Lock.READ_LOCK);
            for (final NodeProxy descendant : contextSet) {
                NodeId parentId;
                if (axis == Constants.ANCESTOR_SELF_AXIS || axis == Constants.SELF_AXIS)
                    {parentId = descendant.getNodeId();}
                else
                    {parentId = descendant.getNodeId().getParentId();}
                final DocumentImpl doc = descendant.getDocument();
                while (parentId != NodeId.DOCUMENT_NODE) {
                    final byte[] key = computeKey(type, qname, doc.getDocId(), parentId);
                    final long address = index.btree.findValue(new Value(key));
                    if (address != -1) {
                        final NodeProxy storedNode = new NodeProxy(doc, parentId,
                            type == ElementValue.ATTRIBUTE ? Node.ATTRIBUTE_NODE : Node.ELEMENT_NODE, address);
                        result.add(storedNode);
                        if (Expression.NO_CONTEXT_ID != contextId) {
                            storedNode.deepCopyContext(descendant, contextId);
                        } else
                            {storedNode.copyContext(descendant);}
                        if (contextSet.getTrackMatches())
                          {storedNode.addMatches(descendant);}
                    }
                    // stop after first iteration if we are on the self axis
                    if (axis == Constants.SELF_AXIS || axis == Constants.PARENT_AXIS)
                        {break;}
                    // continue with the parent of the parent
                    parentId = parentId.getParentId();
                }
            }
        } catch (final LockException e) {
            NativeStructuralIndex.LOG.warn("Lock problem while searching structural index: " + e.getMessage(), e);
        } catch (final Exception e) {
            NativeStructuralIndex.LOG.error("Error while searching structural index: " + e.getMessage(), e);
        } finally {
            lock.release(Lock.READ_LOCK);
        }
        result.sort(true);
        return result;
    }
View Full Code Here

        return result;
    }

    public NodeSet scanByType(byte type, int axis, NodeTest test, boolean useSelfAsContext, DocumentSet docs,
        NodeSet contextSet, int contextId) {
        final Lock lock = index.btree.getLock();
        final NewArrayNodeSet result = new NewArrayNodeSet(docs.getDocumentCount(), 256);
        final FindDescendantsCallback callback = new FindDescendantsCallback(type, axis, contextId, useSelfAsContext, result, null);
        for (final NodeProxy ancestor : contextSet) {
            final DocumentImpl doc = ancestor.getDocument();
            final NodeId ancestorId = ancestor.getNodeId();
            final List<QName> qnames = getQNamesForDoc(doc);
            try {
              lock.acquire(Lock.READ_LOCK);
              for (final QName qname : qnames) {
                if (test.getName() == null || test.matches(qname)) {
                  callback.setAncestor(doc, ancestor);
                  byte[] fromKey, toKey;
                      if (ancestorId == NodeId.DOCUMENT_NODE) {
                          fromKey = computeKey(type, qname, doc.getDocId());
                          toKey = computeKey(type, qname, doc.getDocId() + 1);
                      } else {
                          fromKey = computeKey(type, qname, doc.getDocId(), ancestorId);
                          toKey = computeKey(type, qname, doc.getDocId(), ancestorId.nextSibling());
                      }
                      final IndexQuery query = new IndexQuery(IndexQuery.RANGE, new Value(fromKey), new Value(toKey));
                      try {
                          index.btree.query(query, callback);
                      } catch (final Exception e) {
                          NativeStructuralIndex.LOG.error("Error while searching structural index: " + e.getMessage(), e);
                      }
                }
              }
            } catch (final LockException e) {
                NativeStructuralIndex.LOG.warn("Lock problem while searching structural index: " + e.getMessage(), e);
            } finally {
                lock.release(Lock.READ_LOCK);
            }
        }
//        result.updateNoSort();
        return result;
    }
View Full Code Here

    protected void removeSome() {
        if (pending.size() == 0)
            {return;}

        try {
            final Lock lock = index.btree.getLock();
            for (final Map.Entry<QName,List<NodeProxy>> entry: pending.entrySet()) {
                final QName qname = entry.getKey();
                try {
                    lock.acquire(Lock.WRITE_LOCK);
                    final List<NodeProxy> nodes = entry.getValue();
                    for (final NodeProxy proxy : nodes) {
                        final NodeId nodeId = proxy.getNodeId();
                        final byte[] key = computeKey(qname.getNameType(), qname, document.getDocId(), nodeId);
                        index.btree.removeValue(new Value(key));
                    }
                } catch (final LockException e) {
                    NativeStructuralIndex.LOG.warn("Failed to lock structural index: " + e.getMessage(), e);
                } catch (final Exception e) {
                    NativeStructuralIndex.LOG.warn("Exception caught while writing to structural index: " + e.getMessage(), e);
                } finally {
                    lock.release(Lock.WRITE_LOCK);
                }
            }
        } finally {
            pending.clear();
        }
View Full Code Here

        final List<QName> qnames = getQNamesForDoc(docToRemove);
        for (final QName qname : qnames) {
            final byte[] fromKey = computeKey(qname.getNameType(), qname, docToRemove.getDocId());
            final byte[] toKey = computeKey(qname.getNameType(), qname, docToRemove.getDocId() + 1);
            final IndexQuery query = new IndexQuery(IndexQuery.RANGE, new Value(fromKey), new Value(toKey));
            final Lock lock = index.btree.getLock();
            try {
                lock.acquire(Lock.WRITE_LOCK);
                index.btree.remove(query, null);
            } catch (final LockException e) {
                NativeStructuralIndex.LOG.warn("Failed to lock structural index: " + e.getMessage(), e);
            } catch (final Exception e) {
                NativeStructuralIndex.LOG.warn("Exception caught while removing structural index for document " +
                    docToRemove.getURI() + ": " + e.getMessage(), e);
            } finally {
                lock.release(Lock.WRITE_LOCK);
            }
        }
        removeQNamesForDoc(docToRemove);
    }
View Full Code Here

TOP

Related Classes of org.exist.storage.lock.Lock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.