private void doPut(K key, V value) throws IOException {
int sizeDiff = 0;
if (key == null) {
boolean isNew;
OCacheEntry cacheEntry;
if (diskCache.getFilledUpTo(nullBucketFileId) == 0) {
cacheEntry = diskCache.allocateNewPage(nullBucketFileId);
isNew = true;
} else {
cacheEntry = diskCache.load(nullBucketFileId, 0, false);
isNew = false;
}
cacheEntry.acquireExclusiveLock();
try {
ONullBucket<V> nullBucket = new ONullBucket<V>(cacheEntry, getTrackMode(), valueSerializer, isNew);
if (nullBucket.getValue() != null)
sizeDiff--;
nullBucket.setValue(value);
sizeDiff++;
cacheEntry.markDirty();
logPageChanges(nullBucket, cacheEntry.getFileId(), cacheEntry.getPageIndex(), isNew);
} finally {
cacheEntry.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
changeSize(sizeDiff);
} else {
final long hashCode = keyHashFunction.hashCode(key);
final BucketPath bucketPath = getBucket(hashCode);
final long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
if (bucketPointer == 0)
throw new IllegalStateException("In this version of hash table buckets are added through split only.");
final long pageIndex = getPageIndex(bucketPointer);
final int fileLevel = getFileLevel(bucketPointer);
final OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
cacheEntry.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes,
getTrackMode());
final int index = bucket.getIndex(hashCode, key);
if (index > -1) {
final int updateResult = bucket.updateEntry(index, value);
if (updateResult == 0) {
changeSize(sizeDiff);
return;
}
if (updateResult == 1) {
cacheEntry.markDirty();
logPageChanges(bucket, cacheEntry.getFileId(), cacheEntry.getPageIndex(), false);
changeSize(sizeDiff);
return;
}
assert updateResult == -1;
bucket.deleteEntry(index);
sizeDiff--;
}
if (bucket.addEntry(hashCode, key, value)) {
cacheEntry.markDirty();
logPageChanges(bucket, cacheEntry.getFileId(), cacheEntry.getPageIndex(), false);
sizeDiff++;
changeSize(sizeDiff);
return;
}
final BucketSplitResult splitResult = splitBucket(bucket, fileLevel, pageIndex);
final long updatedBucketPointer = splitResult.updatedBucketPointer;
final long newBucketPointer = splitResult.newBucketPointer;
final int bucketDepth = splitResult.newDepth;
if (bucketDepth <= bucketPath.nodeGlobalDepth) {
updateNodeAfterBucketSplit(bucketPath, bucketDepth, newBucketPointer, updatedBucketPointer);
} else {
if (bucketPath.nodeLocalDepth < MAX_LEVEL_DEPTH) {
final NodeSplitResult nodeSplitResult = splitNode(bucketPath);
assert !(nodeSplitResult.allLeftHashMapsEqual && nodeSplitResult.allRightHashMapsEqual);
final long[] newNode = nodeSplitResult.newNode;
final int nodeLocalDepth = bucketPath.nodeLocalDepth + 1;
final int hashMapSize = 1 << nodeLocalDepth;
assert nodeSplitResult.allRightHashMapsEqual == checkAllMapsContainSameBucket(newNode, hashMapSize);
int newNodeIndex = -1;
if (!nodeSplitResult.allRightHashMapsEqual || bucketPath.itemIndex >= MAX_LEVEL_SIZE / 2)
newNodeIndex = directory.addNewNode((byte) 0, (byte) 0, (byte) nodeLocalDepth, newNode);
final int updatedItemIndex = bucketPath.itemIndex << 1;
final int updatedOffset = bucketPath.hashMapOffset << 1;
final int updatedGlobalDepth = bucketPath.nodeGlobalDepth + 1;
boolean allLeftHashMapsEqual = nodeSplitResult.allLeftHashMapsEqual;
boolean allRightHashMapsEqual = nodeSplitResult.allRightHashMapsEqual;
if (updatedOffset < MAX_LEVEL_SIZE) {
allLeftHashMapsEqual = false;
final BucketPath updatedBucketPath = new BucketPath(bucketPath.parent, updatedOffset, updatedItemIndex,
bucketPath.nodeIndex, nodeLocalDepth, updatedGlobalDepth);
updateNodeAfterBucketSplit(updatedBucketPath, bucketDepth, newBucketPointer, updatedBucketPointer);
} else {
allRightHashMapsEqual = false;
final BucketPath newBucketPath = new BucketPath(bucketPath.parent, updatedOffset - MAX_LEVEL_SIZE, updatedItemIndex,
newNodeIndex, nodeLocalDepth, updatedGlobalDepth);
updateNodeAfterBucketSplit(newBucketPath, bucketDepth, newBucketPointer, updatedBucketPointer);
}
updateNodesAfterSplit(bucketPath, bucketPath.nodeIndex, newNode, nodeLocalDepth, hashMapSize, allLeftHashMapsEqual,
allRightHashMapsEqual, newNodeIndex);
if (allLeftHashMapsEqual)
directory.deleteNode(bucketPath.nodeIndex);
} else {
addNewLevelNode(bucketPath, bucketPath.nodeIndex, newBucketPointer, updatedBucketPointer);
}
}
} finally {
cacheEntry.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
changeSize(sizeDiff);
doPut(key, value);