Package org.eclipse.persistence.internal.identitymaps

Examples of org.eclipse.persistence.internal.identitymaps.CacheKey


            if ((objectChangeSet == null) && (((UnitOfWorkImpl)session).getUnitOfWorkChangeSet() != null)) {
                objectChangeSet = (ObjectChangeSet)((UnitOfWorkImpl)session).getUnitOfWorkChangeSet().getObjectChangeSetForClone(object);
            }
            if (objectChangeSet != null) {
                updateChangeSet(descriptor, objectChangeSet, sequenceNumberField, object);
                objectChangeSet.setCacheKey(new CacheKey(primaryKey));
            }
        }
    }
View Full Code Here


        Enumeration objects = map.keys();
        if (!objects.hasMoreElements()) {
            ((AbstractSession)session).log(SessionLog.INFO, SessionLog.SERVER, "jmx_mbean_runtime_services_identity_map_empty", className);
        }

        CacheKey cacheKey;
        while (objects.hasMoreElements()) {
            cacheKey = (CacheKey)objects.nextElement();
            ((AbstractSession)session).log(SessionLog.INFO, SessionLog.SERVER, "jmx_mbean_runtime_services_print_cache_key_value",
                    cacheKey.getKey().toString(), cacheKey.getObject().toString());
        }
    }
View Full Code Here

            // If this is a UOW and modification queries have been executed, the cache cannot be trusted.
            if (this.checkDatabaseIfInvalid && (session.isUnitOfWork() &&  ((UnitOfWorkImpl)session).shouldReadFromDB())) {
                return null;
            }
               
            CacheKey cacheKey;
            Class objectClass = object.getClass();
            AbstractSession tempSession = session;
            while (tempSession.isUnitOfWork()){ //could be nested lets check all UOWs
                cacheKey = tempSession.getIdentityMapAccessorInstance().getCacheKeyForObjectForLock(primaryKey, objectClass, descriptor);
                if (cacheKey != null) {
                    // If in the UOW cache it can't be invalid.
                    return Boolean.TRUE;
                }
                tempSession = ((UnitOfWorkImpl)tempSession).getParent();
            }
            // Did not find it registered in UOW so check main cache and check for invalidation.
            cacheKey = tempSession.getIdentityMapAccessorInstance().getCacheKeyForObject(primaryKey,objectClass, descriptor);
               
            if ((cacheKey != null)) {
                // Assume that if there is a cachekey, object exists.
                if (this.checkDatabaseIfInvalid) {
                    checkDescriptor(object, session);
                    if (getDescriptor().getCacheInvalidationPolicy().isInvalidated(cacheKey, System.currentTimeMillis())) {
                        return null;
                    }
                }
               
                Object objectFromCache = cacheKey.getObject();
                if ((session.isUnitOfWork()) && ((UnitOfWorkImpl)session).wasDeleted(objectFromCache)) {
                    if (shouldCheckCacheForDoesExist()) {
                        return Boolean.FALSE;
                    }
                } else {
View Full Code Here

    public void validateDelete(int rowCount, Object object, DeleteObjectQuery query) {
        if (rowCount <= 0) {
            // Mark the object as invalid in the session cache, only if version is the same as in query.
            Vector primaryKey = query.getPrimaryKey();
            AbstractSession session = query.getSession().getParentIdentityMapSession(query, true, true);
            CacheKey cacheKey = session.getIdentityMapAccessorInstance().getCacheKeyForObject(primaryKey, query.getReferenceClass(), query.getDescriptor());
            if ((cacheKey != null) && (cacheKey.getObject() != null) && (query.getObjectChangeSet() != null)) {
                Object queryVersion = query.getObjectChangeSet().getInitialWriteLockValue();
                Object cacheVersion = getWriteLockValue(cacheKey.getObject(), primaryKey, session);
                if (compareWriteLockValues(queryVersion, cacheVersion) != 0) {
                    cacheKey.setInvalidationState(CacheKey.CACHE_KEY_INVALID);
                }
            }
            throw OptimisticLockException.objectChangedSinceLastReadWhenDeleting(object, query);
        }
    }
View Full Code Here

    public void validateUpdate(int rowCount, Object object, WriteObjectQuery query) {
        if (rowCount <= 0) {
            // Mark the object as invalid in the session cache, only if version is the same as in query.
            Vector primaryKey = query.getPrimaryKey();
            AbstractSession session = query.getSession().getParentIdentityMapSession(query, true, true);
            CacheKey cacheKey = session.getIdentityMapAccessorInstance().getCacheKeyForObject(primaryKey, query.getReferenceClass(), query.getDescriptor());
            if ((cacheKey != null) && (cacheKey.getObject() != null) && (query.getObjectChangeSet() != null)) {
                Object queryVersion = query.getObjectChangeSet().getInitialWriteLockValue();
                Object cacheVersion = getWriteLockValue(cacheKey.getObject(), primaryKey, session);
                if (compareWriteLockValues(queryVersion, cacheVersion) >= 0) {
                    cacheKey.setInvalidationState(CacheKey.CACHE_KEY_INVALID);
                }
            }
            throw OptimisticLockException.objectChangedSinceLastReadWhenUpdating(object, query);
        }
    }
View Full Code Here

                for (Enumeration rowsEnum = rows.elements(); rowsEnum.hasMoreElements();) {
                  AbstractRecord referenceRow = (AbstractRecord)rowsEnum.nextElement();
                    Object referenceKey = referenceRow.get(getDirectKeyField());
                    Object referenceValue = referenceRow.get(getDirectField());
                    CacheKey eachCacheKey = new CacheKey(extractKeyFromReferenceRow(referenceRow, session));

                    Object container = referenceDataByKey.get(eachCacheKey);
                    if (container == null) {
                        container = mappingContainerPolicy.containerInstance();
                        referenceDataByKey.put(eachCacheKey, container);
                    }

                    // Allow for key conversion.
                    if (getKeyConverter() != null) {
                        referenceKey = getKeyConverter().convertDataValueToObjectValue(referenceKey, query.getSession());
                    }

                    // Allow for value conversion.
                    if (getValueConverter() != null) {
                        referenceValue = getValueConverter().convertDataValueToObjectValue(referenceValue, query.getSession());
                    }

                    mappingContainerPolicy.addInto(referenceKey, referenceValue, container, query.getSession());
                }

                query.setProperty("batched objects", referenceDataByKey);
                query.setSession(null); //reset session as it is no longer needed.
            }
        }
        Object result = referenceDataByKey.get(new CacheKey(extractPrimaryKeyFromRow(databaseRow, session)));

        // The source object might not have any target objects
        if (result == null) {
            return mappingContainerPolicy.containerInstance();
        } else {
View Full Code Here

        ContainerPolicy policy = getContainerPolicy();
        Object value = policy.containerInstance();
        ObjectBuilder objectBuilder = getDescriptor().getObjectBuilder();
        // Extract the primary key of the source object, to filter only the joined rows for that object.
        Vector sourceKey = objectBuilder.extractPrimaryKeyFromRow(row, executionSession);
        CacheKey sourceCacheKey = new CacheKey(sourceKey);
        // If the query was using joining, all of the result rows by primary key will have been computed.
        List rows = joinManager.getDataResultsByPrimaryKey().get(sourceCacheKey);
       
        // A set of direct values must be maintained to avoid duplicates from multiple 1-m joins.
        Set directValues = new HashSet();
View Full Code Here

            // The data results only of the child object are required, they must also be trimmed.
            List nestedDataResults = dataResults;
            if (nestedDataResults == null) {
                // Extract the primary key of the source object, to filter only the joined rows for that object.
                Vector sourceKey = getDescriptor().getObjectBuilder().extractPrimaryKeyFromRow(row, executionSession);
                CacheKey sourceCacheKey = new CacheKey(sourceKey);
                nestedDataResults = joinManager.getDataResultsByPrimaryKey().get(sourceCacheKey);
            }
            nestedDataResults = new ArrayList(nestedDataResults);
            Object indexObject = joinManager.getJoinedMappingIndexes_().get(this);
            // Trim results to start at nested row index.
View Full Code Here

     * This allows all the data to be processed once, instead of n times for each object.
     */
    protected void processDataResults(AbstractSession session) {
        this.dataResultsByPrimaryKey = new HashMap();
        int size = this.dataResults.size();
        CacheKey lastCacheKey = null;
        List childRows = null;
        ObjectBuilder builder = getDescriptor().getObjectBuilder();
        int parentIndex = getParentResultIndex();
        for (int dataResultsIndex = 0; dataResultsIndex < size; dataResultsIndex++) {
            AbstractRecord row = this.dataResults.get(dataResultsIndex);
            AbstractRecord parentRow = row;
            // Must adjust for the parent index to ensure the correct pk is extracted.
            if (parentIndex > 0) {
                Vector trimedFields = new NonSynchronizedSubVector(row.getFields(), parentIndex, row.size());
                Vector trimedValues = new NonSynchronizedSubVector(row.getValues(), parentIndex, row.size());
                parentRow = new DatabaseRecord(trimedFields, trimedValues);
            }
            // Extract the primary key of the source object, to filter only the joined rows for that object.
            Vector sourceKey = builder.extractPrimaryKeyFromRow(parentRow, session);
            // May be any outer-join so ignore null.
            if (sourceKey != null) {
                CacheKey sourceCacheKey = new CacheKey(sourceKey);
                if ((lastCacheKey != null) && lastCacheKey.equals(sourceCacheKey)) {
                    childRows.add(row);
                    if (shouldFilterDuplicates()) {
                        // Also null out the row because it is a duplicate to avoid object building processing it.
                        this.dataResults.set(dataResultsIndex, null);
View Full Code Here

                referenceObjectsByKey = new Hashtable();
                List rows = (List)complexResult.getData();
                int size = results.size();
                for (int index = 0; index < size; index++) {
                    Object eachReferenceObject = results.get(index);
                    CacheKey eachReferenceKey = new CacheKey(extractSourceKeyFromRow((AbstractRecord)rows.get(index), session));
                    if (!referenceObjectsByKey.containsKey(eachReferenceKey)) {
                        referenceObjectsByKey.put(eachReferenceKey, containerPolicy.containerInstance());
                    }
                    containerPolicy.addInto(eachReferenceObject, referenceObjectsByKey.get(eachReferenceKey), session);
                }
                setBatchReadObjects(referenceObjectsByKey, query, session);
                query.setSession(null); //reset session as it is no longer needed.
            }
        }
        Object result = referenceObjectsByKey.get(new CacheKey(extractPrimaryKeyFromRow(databaseRow, session)));

        // The source object might not have any target objects
        if (result == null) {
            return containerPolicy.containerInstance();
        } else {
View Full Code Here

TOP

Related Classes of org.eclipse.persistence.internal.identitymaps.CacheKey

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.