Skip to content

Commit

Permalink
Ensure data is available when disk cache put completes.
Browse files Browse the repository at this point in the history
Fixes #429.
  • Loading branch information
sjudd committed Apr 26, 2015
1 parent 31fc07f commit 31640f6
Show file tree
Hide file tree
Showing 2 changed files with 95 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
package com.bumptech.glide.load.engine.cache;

import com.bumptech.glide.load.Key;

import java.util.ArrayDeque;
import java.util.HashMap;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;

/**
* Keeps a map of keys to locks that allows locks to be removed from the map when no longer in use
* so the size of the collection is bounded.
*
* <p> This class will be accessed by multiple threads in a thread pool and ensures that the
* number of threads interested in each lock is updated atomically so that when the count reaches
* 0, the lock can safely be removed from the map. </p>
*/
final class DiskCacheWriteLocker {
private final Map<Key, WriteLock> locks = new HashMap<Key, WriteLock>();
private final WriteLockPool writeLockPool = new WriteLockPool();

void acquire(Key key) {
WriteLock writeLock;
synchronized (this) {
writeLock = locks.get(key);
if (writeLock == null) {
writeLock = writeLockPool.obtain();
locks.put(key, writeLock);
}
writeLock.interestedThreads++;
}

writeLock.lock.lock();
}

void release(Key key) {
WriteLock writeLock;
synchronized (this) {
writeLock = locks.get(key);
if (writeLock == null || writeLock.interestedThreads <= 0) {
throw new IllegalArgumentException(
"Cannot release a lock that is not held" + ", key: " + key + ", interestedThreads: "
+ (writeLock == null ? 0 : writeLock.interestedThreads));
}

if (--writeLock.interestedThreads == 0) {
WriteLock removed = locks.remove(key);
if (!removed.equals(writeLock)) {
throw new IllegalStateException("Removed the wrong lock"
+ ", expected to remove: " + writeLock
+ ", but actually removed: " + removed
+ ", key: " + key);
}
writeLockPool.offer(removed);
}
}

writeLock.lock.unlock();
}

private static class WriteLock {
final Lock lock = new ReentrantLock();
int interestedThreads;
}

private static class WriteLockPool {
private static final int MAX_POOL_SIZE = 10;
private final Queue<WriteLock> pool = new ArrayDeque<WriteLock>();

WriteLock obtain() {
WriteLock result;
synchronized (pool) {
result = pool.poll();
}
if (result == null) {
result = new WriteLock();
}
return result;
}

void offer(WriteLock writeLock) {
synchronized (pool) {
if (pool.size() < MAX_POOL_SIZE) {
pool.offer(writeLock);
}
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ public class DiskLruCacheWrapper implements DiskCache {
private static final int VALUE_COUNT = 1;
private static DiskLruCacheWrapper wrapper = null;

private final DiskCacheWriteLocker writeLocker = new DiskCacheWriteLocker();
private final SafeKeyGenerator safeKeyGenerator;
private final File directory;
private final int maxSize;
Expand Down Expand Up @@ -87,6 +88,7 @@ public File get(Key key) {
@Override
public void put(Key key, Writer writer) {
String safeKey = safeKeyGenerator.getSafeKey(key);
writeLocker.acquire(key);

This comment has been minimized.

Copy link
@TWiStErRob

TWiStErRob Apr 27, 2015

Collaborator

If acquire blocks the second thread while the first is writiing, by the time the first wrote all the data and unlocks the second will come in and overwrite everything? So stuff will still be downloaded twice. I hope I'm missing something though.

This comment has been minimized.

Copy link
@sjudd

sjudd Apr 27, 2015

Author Collaborator

The goal was not to stop data from being downloaded twice, but to make sure that if it is downloaded twice, one of the loads doesn't randomly fail.

There's a race without the locks:

  1. One thread starts writing data
  2. Another thread tries to start writing the same data and gets a null editor
  3. The second thread (using SOURCE, or ALL), tries to read data and gets a null value
  4. The first thread finishes writing.

By blocking the second thread until the first has finished writing, we guarantee that by the time a writer completes a put, data will be available if it then does a get. There are still some caveats, like the entry could still be evicted, but with a reasonable cache size that's unlikely.

Preventing duplicate downloads is a much more complicated problem and a matter of efficiency, not correctness. This solves the correctness problem, not the efficiency problem.

This comment has been minimized.

Copy link
@TWiStErRob

TWiStErRob Apr 28, 2015

Collaborator

Thanks, that makes sense!

try {
DiskLruCache.Editor editor = getDiskCache().edit(safeKey);
// Editor will be null if there are two concurrent puts. In the worst case we will just silently fail.

This comment has been minimized.

Copy link
@TWiStErRob

TWiStErRob Apr 27, 2015

Collaborator

Won't acquire block and invalidate this comment?

This comment has been minimized.

Copy link
@sjudd

sjudd Apr 27, 2015

Author Collaborator

It's still technically true, there just shouldn't be concurrent puts. but if there were...

Expand All @@ -104,6 +106,8 @@ public void put(Key key, Writer writer) {
if (Log.isLoggable(TAG, Log.WARN)) {
Log.w(TAG, "Unable to put to disk cache", e);
}
} finally {
writeLocker.release(key);
}
}

Expand Down

0 comments on commit 31640f6

Please sign in to comment.