diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala index b07c4653e51e0..488e538252257 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala @@ -66,10 +66,8 @@ class BlockManagerDecommissionSuite extends SparkFunSuite with LocalSparkContext // workload we need to worry about. .set(config.STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL, 1L) - // Force fetching to local disk - if (shuffle) { - conf.set("spark.network.maxRemoteBlockSizeFetchToMem", "1") - } + // Allow force fetching to local disk + conf.set("spark.network.maxRemoteBlockSizeFetchToMem", remoteBlockSize) sc = new SparkContext(master, "test", conf)