Skip to content

Commit

Permalink
Remove unused configuration of load factor
Browse files Browse the repository at this point in the history
This allows removal of loadFactor/threshold which saves 8 bytes per object.

While here:
- Reduce diff between Map/Set classes
- Make base forEach() work for Linked classes

Shallow object size (bytes):
- CompactHashMap: 64 -> 56
- CompactLinkedHashMap: 80 -> 72
- CompactHashSet: 40 -> 32
- CompactLinkedHashSet: 56 -> 48

RELNOTES=n/a

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=241053148
  • Loading branch information
noackjr authored and ronshapiro committed Apr 2, 2019
1 parent 6d7f655 commit c86ee26
Show file tree
Hide file tree
Showing 8 changed files with 273 additions and 420 deletions.
106 changes: 31 additions & 75 deletions android/guava/src/com/google/common/collect/CompactHashMap.java
Expand Up @@ -96,10 +96,7 @@ public static <K, V> CompactHashMap<K, V> createWithExpectedSize(int expectedSiz
return new CompactHashMap<>(expectedSize);
}

private static final int MAXIMUM_CAPACITY = 1 << 30;

// TODO(user): decide, and inline, load factor. 0.75?
static final float DEFAULT_LOAD_FACTOR = 1.0f;
private static final float LOAD_FACTOR = 1.0f;

/** Bitmask that selects the low 32 bits. */
private static final long NEXT_MASK = (1L << 32) - 1;
Expand Down Expand Up @@ -143,53 +140,34 @@ public static <K, V> CompactHashMap<K, V> createWithExpectedSize(int expectedSiz
*/
@VisibleForTesting @MonotonicNonNullDecl transient Object[] values;

/** The load factor. */
transient float loadFactor;

/**
* Keeps track of modifications of this set, to make it possible to throw
* ConcurrentModificationException in the iterator. Note that we choose not to make this volatile,
* so we do less of a "best effort" to track such errors, for better performance.
*/
transient int modCount;

/** When we have this many elements, resize the hashtable. */
private transient int threshold;

/** The number of elements contained in the set. */
private transient int size;

/** Constructs a new empty instance of {@code CompactHashMap}. */
CompactHashMap() {
init(DEFAULT_SIZE, DEFAULT_LOAD_FACTOR);
init(DEFAULT_SIZE);
}

/**
* Constructs a new instance of {@code CompactHashMap} with the specified capacity.
*
* @param capacity the initial capacity of this {@code CompactHashMap}.
*/
CompactHashMap(int capacity) {
init(capacity, DEFAULT_LOAD_FACTOR);
}

/**
* Constructs a new instance of {@code CompactHashMap} with the specified capacity and load
* factor.
*
* @param capacity the initial capacity of this {@code CompactHashMap}.
* @param loadFactor the load factor of this {@code CompactHashMap}.
*/
CompactHashMap(int capacity, float loadFactor) {
init(capacity, loadFactor);
CompactHashMap(int expectedSize) {
init(expectedSize);
}

/** Pseudoconstructor for serialization support. */
void init(int expectedSize, float loadFactor) {
void init(int expectedSize) {
Preconditions.checkArgument(expectedSize >= 0, "Initial capacity must be non-negative");
Preconditions.checkArgument(loadFactor > 0, "Illegal load factor");
this.loadFactor = loadFactor;
this.threshold = Math.max(1, expectedSize); // Save expectedSize for use in allocArrays()
this.modCount = Math.max(1, expectedSize); // Save expectedSize for use in allocArrays()
}

/** Returns whether arrays need to be allocated. */
Expand All @@ -201,15 +179,13 @@ boolean needsAllocArrays() {
void allocArrays() {
Preconditions.checkState(needsAllocArrays(), "Arrays already allocated");

int expectedSize = threshold;
int buckets = Hashing.closedTableSize(expectedSize, loadFactor);
int expectedSize = modCount;
int buckets = Hashing.closedTableSize(expectedSize, LOAD_FACTOR);
this.table = newTable(buckets);

this.entries = newEntries(expectedSize);
this.keys = new Object[expectedSize];
this.values = new Object[expectedSize];

this.entries = newEntries(expectedSize);
this.threshold = Math.max(1, (int) (buckets * loadFactor));
}

private static int[] newTable(int size) {
Expand Down Expand Up @@ -265,7 +241,7 @@ public V put(@NullableDecl K key, @NullableDecl V value) {
int tableIndex = hash & hashTableMask();
int newEntryIndex = this.size; // current size, and pointer to the entry to be appended
int next = table[tableIndex];
if (next == UNSET) {
if (next == UNSET) { // uninitialized bucket
table[tableIndex] = newEntryIndex;
} else {
int last;
Expand Down Expand Up @@ -293,8 +269,9 @@ public V put(@NullableDecl K key, @NullableDecl V value) {
resizeMeMaybe(newSize);
insertEntry(newEntryIndex, key, value, hash);
this.size = newSize;
if (newEntryIndex >= threshold) {
resizeTable(2 * table.length);
int oldCapacity = table.length;
if (Hashing.needsResizing(newEntryIndex, oldCapacity, LOAD_FACTOR)) {
resizeTable(2 * oldCapacity);
}
modCount++;
return null;
Expand Down Expand Up @@ -340,13 +317,6 @@ void resizeEntries(int newCapacity) {
}

private void resizeTable(int newCapacity) { // newCapacity always a power of two
int[] oldTable = table;
int oldCapacity = oldTable.length;
if (oldCapacity >= MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return;
}
int newThreshold = 1 + (int) (newCapacity * loadFactor);
int[] newTable = newTable(newCapacity);
long[] entries = this.entries;

Expand All @@ -360,7 +330,6 @@ private void resizeTable(int newCapacity) { // newCapacity always a power of two
entries[i] = ((long) hash << 32) | (NEXT_MASK & next);
}

this.threshold = newThreshold;
this.table = newTable;
}

Expand Down Expand Up @@ -412,25 +381,23 @@ private V remove(@NullableDecl Object key, int hash) {
}
int last = UNSET;
do {
if (getHash(entries[next]) == hash) {
if (Objects.equal(key, keys[next])) {
@SuppressWarnings("unchecked") // values only contains Vs
@NullableDecl
V oldValue = (V) values[next];

if (last == UNSET) {
// we need to update the root link from table[]
table[tableIndex] = getNext(entries[next]);
} else {
// we need to update the link from the chain
entries[last] = swapNext(entries[last], getNext(entries[next]));
}

moveLastEntry(next);
size--;
modCount++;
return oldValue;
if (getHash(entries[next]) == hash && Objects.equal(key, keys[next])) {
@SuppressWarnings("unchecked") // values only contains Vs
@NullableDecl
V oldValue = (V) values[next];

if (last == UNSET) {
// we need to update the root link from table[]
table[tableIndex] = getNext(entries[next]);
} else {
// we need to update the link from the chain
entries[last] = swapNext(entries[last], getNext(entries[next]));
}

moveLastEntry(next);
size--;
modCount++;
return oldValue;
}
last = next;
next = getNext(entries[next]);
Expand Down Expand Up @@ -777,18 +744,7 @@ public void trimToSize() {
if (size < entries.length) {
resizeEntries(size);
}
// size / loadFactor gives the table size of the appropriate load factor,
// but that may not be a power of two. We floor it to a power of two by
// keeping its highest bit. But the smaller table may have a load factor
// larger than what we want; then we want to go to the next power of 2 if we can
int minimumTableSize = Math.max(1, Integer.highestOneBit((int) (size / loadFactor)));
if (minimumTableSize < MAXIMUM_CAPACITY) {
double load = (double) size / minimumTableSize;
if (load > loadFactor) {
minimumTableSize <<= 1; // increase to next power if possible
}
}

int minimumTableSize = Hashing.closedTableSize(size, LOAD_FACTOR);
if (minimumTableSize < table.length) {
resizeTable(minimumTableSize);
}
Expand Down Expand Up @@ -827,7 +783,7 @@ private void readObject(ObjectInputStream stream) throws IOException, ClassNotFo
if (elementCount < 0) {
throw new InvalidObjectException("Invalid size: " + elementCount);
}
init(elementCount, DEFAULT_LOAD_FACTOR);
init(elementCount);
for (int i = 0; i < elementCount; i++) {
K key = (K) stream.readObject();
V value = (V) stream.readObject();
Expand Down

0 comments on commit c86ee26

Please sign in to comment.