From e81aa5ae428e6326bcda8b72e2ee4916ab70ba5a Mon Sep 17 00:00:00 2001 From: Holden Karau Date: Mon, 1 Jun 2020 11:56:01 -0700 Subject: [PATCH] Use the remoteBlockSize param in the tests instead of conditioning on if we're testing shuffles or not --- .../spark/storage/BlockManagerDecommissionSuite.scala | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala index b07c4653e51e0..488e538252257 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala @@ -66,10 +66,8 @@ class BlockManagerDecommissionSuite extends SparkFunSuite with LocalSparkContext // workload we need to worry about. .set(config.STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL, 1L) - // Force fetching to local disk - if (shuffle) { - conf.set("spark.network.maxRemoteBlockSizeFetchToMem", "1") - } + // Allow force fetching to local disk + conf.set("spark.network.maxRemoteBlockSizeFetchToMem", remoteBlockSize) sc = new SparkContext(master, "test", conf)