diff --git a/src/main/java/com/google/devtools/build/lib/vfs/DigestUtils.java b/src/main/java/com/google/devtools/build/lib/vfs/DigestUtils.java index e22e9577f5c055..d3841ea8ad0306 100644 --- a/src/main/java/com/google/devtools/build/lib/vfs/DigestUtils.java +++ b/src/main/java/com/google/devtools/build/lib/vfs/DigestUtils.java @@ -16,11 +16,8 @@ import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.stats.CacheStats; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import com.google.devtools.build.lib.profiler.Profiler; -import com.google.devtools.build.lib.profiler.ProfilerTask; import java.io.IOException; /** @@ -41,17 +38,9 @@ * fail. */ public class DigestUtils { - // Object to synchronize on when serializing large file reads. - private static final Object DIGEST_LOCK = new Object(); - // Typical size for a digest byte array. public static final int ESTIMATED_SIZE = 32; - // Files of this size or less are assumed to be readable in one seek. - // (This is the default readahead window on Linux.) - @VisibleForTesting // the unittest is in a different package! - public static final int MULTI_THREADED_DIGEST_MAX_FILE_SIZE = 128 * 1024; - /** * Keys used to cache the values of the digests for files where we don't have fast digests. * @@ -126,19 +115,6 @@ public int hashCode() { /** Private constructor to prevent instantiation of utility class. */ private DigestUtils() {} - /** - * Obtain file's digset using synchronized method, ensuring that system is not overloaded in case - * when multiple threads are requesting digest calculations and underlying file system cannot - * provide it via extended attribute. - */ - private static byte[] getDigestInExclusiveMode(Path path) throws IOException { - long startTime = Profiler.nanoTimeMaybe(); - synchronized (DIGEST_LOCK) { - Profiler.instance().logSimpleTask(startTime, ProfilerTask.WAIT, path.getPathString()); - return path.getDigest(); - } - } - /** * Enables the caching of file digests based on file status data. * @@ -221,16 +197,7 @@ public static byte[] manuallyComputeDigest(Path path, long fileSize) throws IOEx } } - // Compute digest from the file contents. - if (fileSize > MULTI_THREADED_DIGEST_MAX_FILE_SIZE) { - // We'll have to read file content in order to calculate the digest. - // We avoid overlapping this process for multiple large files, as - // seeking back and forth between them will result in an overall loss of - // throughput. - digest = getDigestInExclusiveMode(path); - } else { - digest = path.getDigest(); - } + digest = path.getDigest(); Preconditions.checkNotNull(digest, "Missing digest for %s (size %s)", path, fileSize); if (cache != null) { diff --git a/src/test/java/com/google/devtools/build/lib/vfs/DigestUtilsTest.java b/src/test/java/com/google/devtools/build/lib/vfs/DigestUtilsTest.java index 5f4f89edc7c584..910276c7b57d81 100644 --- a/src/test/java/com/google/devtools/build/lib/vfs/DigestUtilsTest.java +++ b/src/test/java/com/google/devtools/build/lib/vfs/DigestUtilsTest.java @@ -19,7 +19,6 @@ import com.google.devtools.build.lib.testutil.TestUtils; import com.google.devtools.build.lib.vfs.inmemoryfs.InMemoryFileSystem; import java.io.IOException; -import java.util.Arrays; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -92,25 +91,6 @@ protected byte[] getFastDigest(PathFragment path) throws IOException { thread2.joinAndAssertState(TestUtils.WAIT_TIMEOUT_MILLISECONDS); } - /** - * Ensures that digest calculation is synchronized for files greater than - * {@link DigestUtils#MULTI_THREADED_DIGEST_MAX_FILE_SIZE} bytes if the digest is not - * available cheaply, so machines with rotating drives don't become unusable. - */ - @Test - public void testCalculationConcurrency() throws Exception { - int small = DigestUtils.MULTI_THREADED_DIGEST_MAX_FILE_SIZE; - int large = DigestUtils.MULTI_THREADED_DIGEST_MAX_FILE_SIZE + 1; - for (DigestHashFunction hf : - Arrays.asList(DigestHashFunction.SHA256, DigestHashFunction.SHA1)) { - assertDigestCalculationConcurrency(true, true, small, small, hf); - assertDigestCalculationConcurrency(true, true, large, large, hf); - assertDigestCalculationConcurrency(true, false, small, small, hf); - assertDigestCalculationConcurrency(true, false, small, large, hf); - assertDigestCalculationConcurrency(false, false, large, large, hf); - } - } - @Test public void testCache() throws Exception { AtomicInteger getFastDigestCounter = new AtomicInteger(0);