Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove DigestUtils.getDigestInExclusiveMode() now that SsdModule has … #14231

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 1 addition & 31 deletions src/main/java/com/google/devtools/build/lib/vfs/DigestUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,9 @@
* fail.
*/
public class DigestUtils {
// Object to synchronize on when serializing large file reads.
private static final Object DIGEST_LOCK = new Object();

// Typical size for a digest byte array.
public static final int ESTIMATED_SIZE = 32;

// Files of this size or less are assumed to be readable in one seek.
// (This is the default readahead window on Linux.)
@VisibleForTesting // the unittest is in a different package!
public static final int MULTI_THREADED_DIGEST_MAX_FILE_SIZE = 128 * 1024;

/**
* Keys used to cache the values of the digests for files where we don't have fast digests.
*
Expand Down Expand Up @@ -126,19 +118,6 @@ public int hashCode() {
/** Private constructor to prevent instantiation of utility class. */
private DigestUtils() {}

/**
* Obtain file's digset using synchronized method, ensuring that system is not overloaded in case
* when multiple threads are requesting digest calculations and underlying file system cannot
* provide it via extended attribute.
*/
private static byte[] getDigestInExclusiveMode(Path path) throws IOException {
long startTime = Profiler.nanoTimeMaybe();
synchronized (DIGEST_LOCK) {
Profiler.instance().logSimpleTask(startTime, ProfilerTask.WAIT, path.getPathString());
return path.getDigest();
}
}

/**
* Enables the caching of file digests based on file status data.
*
Expand Down Expand Up @@ -221,16 +200,7 @@ public static byte[] manuallyComputeDigest(Path path, long fileSize) throws IOEx
}
}

// Compute digest from the file contents.
if (fileSize > MULTI_THREADED_DIGEST_MAX_FILE_SIZE) {
// We'll have to read file content in order to calculate the digest.
// We avoid overlapping this process for multiple large files, as
// seeking back and forth between them will result in an overall loss of
// throughput.
digest = getDigestInExclusiveMode(path);
} else {
digest = path.getDigest();
}
digest = path.getDigest();

Preconditions.checkNotNull(digest, "Missing digest for %s (size %s)", path, fileSize);
if (cache != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,25 +92,6 @@ protected byte[] getFastDigest(PathFragment path) throws IOException {
thread2.joinAndAssertState(TestUtils.WAIT_TIMEOUT_MILLISECONDS);
}

/**
* Ensures that digest calculation is synchronized for files greater than
* {@link DigestUtils#MULTI_THREADED_DIGEST_MAX_FILE_SIZE} bytes if the digest is not
* available cheaply, so machines with rotating drives don't become unusable.
*/
@Test
public void testCalculationConcurrency() throws Exception {
int small = DigestUtils.MULTI_THREADED_DIGEST_MAX_FILE_SIZE;
int large = DigestUtils.MULTI_THREADED_DIGEST_MAX_FILE_SIZE + 1;
for (DigestHashFunction hf :
Arrays.asList(DigestHashFunction.SHA256, DigestHashFunction.SHA1)) {
assertDigestCalculationConcurrency(true, true, small, small, hf);
assertDigestCalculationConcurrency(true, true, large, large, hf);
assertDigestCalculationConcurrency(true, false, small, small, hf);
assertDigestCalculationConcurrency(true, false, small, large, hf);
assertDigestCalculationConcurrency(false, false, large, large, hf);
}
}

@Test
public void testCache() throws Exception {
AtomicInteger getFastDigestCounter = new AtomicInteger(0);
Expand Down