-
Notifications
You must be signed in to change notification settings - Fork 888
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Optimize memory:Support shrinking in ConcurrentLongLongPairHashMap #3061
Changes from 5 commits
f92c283
84fd318
14f7f84
f4ac7db
7372984
1a92f19
7be0421
cf8896e
ecd9762
3cc9301
39ff27c
f11d7b4
85045cd
b698395
2a8f188
c19e1a2
0cccff8
20e4164
151dda5
f75fe72
4e41659
97bf6c3
9802dc6
79aea61
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -48,6 +48,7 @@ public class ConcurrentLongLongPairHashMap { | |||||
private static final long ValueNotFound = -1L; | ||||||
|
||||||
private static final float MapFillFactor = 0.66f; | ||||||
private static final float MapIdleFactor = 0.25f; | ||||||
|
||||||
private static final int DefaultExpectedItems = 256; | ||||||
private static final int DefaultConcurrencyLevel = 16; | ||||||
|
@@ -228,14 +229,16 @@ private static final class Section extends StampedLock { | |||||
private volatile int capacity; | ||||||
private volatile int size; | ||||||
private int usedBuckets; | ||||||
private int resizeThreshold; | ||||||
private int resizeThresholdUp; | ||||||
private int resizeThresholdBelow; | ||||||
|
||||||
Section(int capacity) { | ||||||
this.capacity = alignToPowerOfTwo(capacity); | ||||||
this.table = new long[4 * this.capacity]; | ||||||
this.size = 0; | ||||||
this.usedBuckets = 0; | ||||||
this.resizeThreshold = (int) (this.capacity * MapFillFactor); | ||||||
this.resizeThresholdUp = (int) (this.capacity * MapFillFactor); | ||||||
this.resizeThresholdBelow = (int) (this.capacity * MapIdleFactor); | ||||||
Arrays.fill(table, EmptyKey); | ||||||
} | ||||||
|
||||||
|
@@ -336,9 +339,10 @@ boolean put(long key1, long key2, long value1, long value2, int keyHash, boolean | |||||
bucket = (bucket + 4) & (table.length - 1); | ||||||
} | ||||||
} finally { | ||||||
if (usedBuckets > resizeThreshold) { | ||||||
if (usedBuckets > resizeThresholdUp) { | ||||||
try { | ||||||
rehash(); | ||||||
// Expand the hashmap | ||||||
rehash(capacity * 2); | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The existing "double the size" strategy was probably not the best one for all use cases either, as it can end up wasting a considerable amount of memory in empty buckets. We could leave it configurable (both the step up and and down), to accommodate different needs. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @merlimat like this?: // Expand the hashmap // shrink the hashmap There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
What does the thresholds mean here? |
||||||
} finally { | ||||||
unlockWrite(stamp); | ||||||
} | ||||||
|
@@ -376,7 +380,16 @@ private boolean remove(long key1, long key2, long value1, long value2, int keyHa | |||||
} | ||||||
|
||||||
} finally { | ||||||
unlockWrite(stamp); | ||||||
if (size < resizeThresholdBelow) { | ||||||
eolivelli marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
try { | ||||||
// shrink the hashmap | ||||||
rehash(capacity / 2); | ||||||
} finally { | ||||||
unlockWrite(stamp); | ||||||
} | ||||||
} else { | ||||||
unlockWrite(stamp); | ||||||
} | ||||||
} | ||||||
} | ||||||
|
||||||
|
@@ -388,6 +401,18 @@ private void cleanBucket(int bucket) { | |||||
table[bucket + 2] = ValueNotFound; | ||||||
table[bucket + 3] = ValueNotFound; | ||||||
--usedBuckets; | ||||||
|
||||||
// Reduce unnecessary rehash | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK |
||||||
bucket = (bucket - 4) & (table.length - 1); | ||||||
while (table[bucket] == DeletedKey) { | ||||||
table[bucket] = EmptyKey; | ||||||
table[bucket + 1] = EmptyKey; | ||||||
table[bucket + 2] = ValueNotFound; | ||||||
table[bucket + 3] = ValueNotFound; | ||||||
--usedBuckets; | ||||||
|
||||||
bucket = (bucket - 4) & (table.length - 1); | ||||||
} | ||||||
} else { | ||||||
table[bucket] = DeletedKey; | ||||||
table[bucket + 1] = DeletedKey; | ||||||
|
@@ -453,9 +478,7 @@ public void forEach(BiConsumerLongPair processor) { | |||||
} | ||||||
} | ||||||
|
||||||
private void rehash() { | ||||||
// Expand the hashmap | ||||||
int newCapacity = capacity * 2; | ||||||
private void rehash(int newCapacity) { | ||||||
long[] newTable = new long[4 * newCapacity]; | ||||||
Arrays.fill(newTable, EmptyKey); | ||||||
|
||||||
|
@@ -475,7 +498,8 @@ private void rehash() { | |||||
// Capacity needs to be updated after the values, so that we won't see | ||||||
// a capacity value bigger than the actual array size | ||||||
capacity = newCapacity; | ||||||
resizeThreshold = (int) (capacity * MapFillFactor); | ||||||
resizeThresholdUp = (int) (capacity * MapFillFactor); | ||||||
resizeThresholdBelow = (int) (capacity * MapIdleFactor); | ||||||
} | ||||||
|
||||||
private static void insertKeyValueNoLock(long[] table, int capacity, long key1, long key2, long value1, | ||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we should be cautious in avoiding constantly flickering between shrink & expand. We should try to use a smaller threshold here to limit that. Maybe 0.15?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
OK