diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index 303e796ab8047..65d1efb82a8f4 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -24,9 +24,9 @@ import java.nio.channels.Channels import java.util.Collections import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, TimeUnit} +import scala.collection.JavaConverters._ import scala.collection.mutable import scala.collection.mutable.HashMap -import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration._ import scala.reflect.ClassTag @@ -1819,6 +1819,12 @@ private[spark] class BlockManager( @volatile var running = true override def run(): Unit = { var migrating: Option[(Int, Long)] = None + val storageLevel = StorageLevel( + useDisk = true, + useMemory = false, + useOffHeap = false, + deserialized = false, + replication = 1) // Once a block fails to transfer to an executor stop trying to transfer more blocks try { while (running) { @@ -1839,12 +1845,7 @@ private[spark] class BlockManager( peer.executorId, indexBlockId, indexBuffer, - StorageLevel( - useDisk=true, - useMemory=false, - useOffHeap=false, - deserialized=false, - replication=1), + storageLevel, null)// class tag, we don't need for shuffle blockTransferService.uploadBlockSync( peer.host, @@ -1852,12 +1853,7 @@ private[spark] class BlockManager( peer.executorId, dataBlockId, dataBuffer, - StorageLevel( - useDisk=true, - useMemory=false, - useOffHeap=false, - deserialized=false, - replication=1), + storageLevel, null)// class tag, we don't need for shuffle } } diff --git a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/DecommissionSuite.scala b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/DecommissionSuite.scala index 87580a753f273..e71d9ea127d25 100644 --- a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/DecommissionSuite.scala +++ b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/DecommissionSuite.scala @@ -31,7 +31,7 @@ private[spark] trait DecommissionSuite { k8sSuite: KubernetesSuite => .set("spark.storage.decommission.enabled", "true") .set("spark.storage.decommission.shuffle_blocks", "true") .set("spark.storage.decommission.shuffle_blocks", "true") - //Ensure we have somewhere to migrate our data too + // Ensure we have somewhere to migrate our data too .set("spark.executor.instances", "3") // The default of 30 seconds is fine, but for testing we just want to get this done fast. .set("spark.storage.decommission.replicationReattemptInterval", "1s")