Package org.eclipse.persistence.internal.identitymaps

Examples of org.eclipse.persistence.internal.identitymaps.CacheKey


        if (xmlDescriptor.shouldPreserveDocument() || xmlDescriptor.getPrimaryKeyFieldNames().size() > 0) {
            if ((pk == null) || (pk.size() == 0)) {
                pk = new Vector();
                pk.addElement(new WeakObjectWrapper(obj));
            }
            CacheKey key = session.getIdentityMapAccessorInstance().acquireDeferredLock(pk, xmlDescriptor.getJavaClass(), xmlDescriptor);
            if ((xmlDescriptor).shouldPreserveDocument()) {
                key.setRecord(row);
            }
            key.setObject(obj);
            key.releaseDeferredLock();
        }
    }
View Full Code Here


            Vector pk = xmlDescriptor.getObjectBuilder().extractPrimaryKeyFromObject(obj, session);
            if ((pk == null) || (pk.size() == 0)) {
                pk = new Vector();
                pk.addElement(new WeakObjectWrapper(obj));
            }
            CacheKey cacheKey = session.getIdentityMapAccessorInstance().getCacheKeyForObject(pk, xmlDescriptor.getJavaClass(), xmlDescriptor);
            if(cacheKey != null && cacheKey.getRecord() != null) {
                return ((DOMRecord)cacheKey.getRecord()).getDOM();
            }
        }
        return null;
    }
View Full Code Here

     * This allows all the data to be processed once, instead of n times for each object.
     */
    protected void processDataResults(AbstractSession session) {
        this.dataResultsByPrimaryKey = new HashMap();
        int size = this.dataResults.size();
        CacheKey lastCacheKey = null;
        List childRows = null;
        ObjectBuilder builder = getDescriptor().getObjectBuilder();
        int parentIndex = getParentResultIndex();
        for (int dataResultsIndex = 0; dataResultsIndex < size; dataResultsIndex++) {
            AbstractRecord row = this.dataResults.get(dataResultsIndex);
            AbstractRecord parentRow = row;
            // Must adjust for the parent index to ensure the correct pk is extracted.
            if (parentIndex > 0) {
                Vector trimedFields = new NonSynchronizedSubVector(row.getFields(), parentIndex, row.size());
                Vector trimedValues = new NonSynchronizedSubVector(row.getValues(), parentIndex, row.size());
                parentRow = new DatabaseRecord(trimedFields, trimedValues);
            }
            // Extract the primary key of the source object, to filter only the joined rows for that object.
            Vector sourceKey = builder.extractPrimaryKeyFromRow(parentRow, session);
            // May be any outer-join so ignore null.
            if (sourceKey != null) {
                CacheKey sourceCacheKey = new CacheKey(sourceKey);
                if ((lastCacheKey != null) && lastCacheKey.equals(sourceCacheKey)) {
                    childRows.add(row);
                    if (shouldFilterDuplicates()) {
                        // Also null out the row because it is a duplicate to avoid object building processing it.
                        this.dataResults.set(dataResultsIndex, null);
View Full Code Here

                referenceObjectsByKey = new Hashtable();
                List rows = (List)complexResult.getData();
                int size = results.size();
                for (int index = 0; index < size; index++) {
                    Object eachReferenceObject = results.get(index);
                    CacheKey eachReferenceKey = new CacheKey(extractSourceKeyFromRow((AbstractRecord)rows.get(index), session));
                    if (!referenceObjectsByKey.containsKey(eachReferenceKey)) {
                        referenceObjectsByKey.put(eachReferenceKey, containerPolicy.containerInstance());
                    }
                    containerPolicy.addInto(eachReferenceObject, referenceObjectsByKey.get(eachReferenceKey), session);
                }
                setBatchReadObjects(referenceObjectsByKey, query, session);
            }
        }
        Object result = referenceObjectsByKey.get(new CacheKey(extractPrimaryKeyFromRow(databaseRow, session)));

        // The source object might not have any target objects
        if (result == null) {
            return containerPolicy.containerInstance();
        } else {
View Full Code Here

    public void validateDelete(int rowCount, Object object, DeleteObjectQuery query) {
        if (rowCount <= 0) {
            // Mark the object as invalid in the session cache, only if version is the same as in query.
            Vector primaryKey = query.getPrimaryKey();
            AbstractSession session = query.getSession().getParentIdentityMapSession(query, true, true);
            CacheKey cacheKey = session.getIdentityMapAccessorInstance().getCacheKeyForObject(primaryKey, query.getReferenceClass(), query.getDescriptor());
            if ((cacheKey != null) && (cacheKey.getObject() != null) && (query.getObjectChangeSet() != null)) {
                Object queryVersion = query.getObjectChangeSet().getInitialWriteLockValue();
                Object cacheVersion = getWriteLockValue(cacheKey.getObject(), primaryKey, session);
                if (compareWriteLockValues(queryVersion, cacheVersion) != 0) {
                    cacheKey.setInvalidationState(CacheKey.CACHE_KEY_INVALID);
                }
            }
            throw OptimisticLockException.objectChangedSinceLastReadWhenDeleting(object, query);
        }
    }
View Full Code Here

    public void validateUpdate(int rowCount, Object object, WriteObjectQuery query) {
        if (rowCount <= 0) {
            // Mark the object as invalid in the session cache, only if version is the same as in query.
            Vector primaryKey = query.getPrimaryKey();
            AbstractSession session = query.getSession().getParentIdentityMapSession(query, true, true);
            CacheKey cacheKey = session.getIdentityMapAccessorInstance().getCacheKeyForObject(primaryKey, query.getReferenceClass(), query.getDescriptor());
            if ((cacheKey != null) && (cacheKey.getObject() != null) && (query.getObjectChangeSet() != null)) {
                Object queryVersion = query.getObjectChangeSet().getInitialWriteLockValue();
                Object cacheVersion = getWriteLockValue(cacheKey.getObject(), primaryKey, session);
                if (compareWriteLockValues(queryVersion, cacheVersion) >= 0) {
                    cacheKey.setInvalidationState(CacheKey.CACHE_KEY_INVALID);
                }
            }
            throw OptimisticLockException.objectChangedSinceLastReadWhenUpdating(object, query);
        }
    }
View Full Code Here

        Enumeration objects = map.keys();
        if (!objects.hasMoreElements()) {
            ((AbstractSession)session).log(SessionLog.INFO, SessionLog.SERVER, "jmx_mbean_runtime_services_identity_map_empty", className);
        }

        CacheKey cacheKey;
        while (objects.hasMoreElements()) {
            cacheKey = (CacheKey)objects.nextElement();
            ((AbstractSession)session).log(SessionLog.INFO, SessionLog.SERVER, "jmx_mbean_runtime_services_print_cache_key_value",
                    cacheKey.getKey().toString(), cacheKey.getObject().toString());
        }
    }
View Full Code Here

            // The data results only of the child object are required, they must also be trimmed.
            List nestedDataResults = dataResults;
            if (nestedDataResults == null) {
                // Extract the primary key of the source object, to filter only the joined rows for that object.
                Vector sourceKey = getDescriptor().getObjectBuilder().extractPrimaryKeyFromRow(row, executionSession);
                CacheKey sourceCacheKey = new CacheKey(sourceKey);
                nestedDataResults = joinManager.getDataResultsByPrimaryKey().get(sourceCacheKey);
            }
            nestedDataResults = new ArrayList(nestedDataResults);
            Object indexObject = joinManager.getJoinedMappingIndexes_().get(this);
            // Trim results to start at nested row index.
View Full Code Here

                for (Enumeration rowsEnum = rows.elements(); rowsEnum.hasMoreElements();) {
                  AbstractRecord referenceRow = (AbstractRecord)rowsEnum.nextElement();
                    Object referenceKey = referenceRow.get(getDirectKeyField());
                    Object referenceValue = referenceRow.get(getDirectField());
                    CacheKey eachCacheKey = new CacheKey(extractKeyFromReferenceRow(referenceRow, session));

                    Object container = referenceDataByKey.get(eachCacheKey);
                    if (container == null) {
                        container = mappingContainerPolicy.containerInstance();
                        referenceDataByKey.put(eachCacheKey, container);
                    }

                    // Allow for key conversion.
                    if (getKeyConverter() != null) {
                        referenceKey = getKeyConverter().convertDataValueToObjectValue(referenceKey, query.getSession());
                    }

                    // Allow for value conversion.
                    if (getValueConverter() != null) {
                        referenceValue = getValueConverter().convertDataValueToObjectValue(referenceValue, query.getSession());
                    }

                    mappingContainerPolicy.addInto(referenceKey, referenceValue, container, query.getSession());
                }

                query.setProperty("batched objects", referenceDataByKey);
            }
        }
        Object result = referenceDataByKey.get(new CacheKey(extractPrimaryKeyFromRow(databaseRow, session)));

        // The source object might not have any target objects
        if (result == null) {
            return mappingContainerPolicy.containerInstance();
        } else {
View Full Code Here

        ContainerPolicy policy = getContainerPolicy();
        Object value = policy.containerInstance();
        ObjectBuilder objectBuilder = getDescriptor().getObjectBuilder();
        // Extract the primary key of the source object, to filter only the joined rows for that object.
        Vector sourceKey = objectBuilder.extractPrimaryKeyFromRow(row, executionSession);
        CacheKey sourceCacheKey = new CacheKey(sourceKey);
        // If the query was using joining, all of the result rows by primary key will have been computed.
        List rows = joinManager.getDataResultsByPrimaryKey().get(sourceCacheKey);
       
        // A set of direct values must be maintained to avoid duplicates from multiple 1-m joins.
        Set directValues = new HashSet();
View Full Code Here

TOP

Related Classes of org.eclipse.persistence.internal.identitymaps.CacheKey

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.