Skip to content

Commit

Permalink
Style fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
holdenk committed Apr 24, 2020
1 parent 4126c1b commit 8ee8949
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 14 deletions.
22 changes: 9 additions & 13 deletions core/src/main/scala/org/apache/spark/storage/BlockManager.scala
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ import java.nio.channels.Channels
import java.util.Collections
import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, TimeUnit}

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag
Expand Down Expand Up @@ -1819,6 +1819,12 @@ private[spark] class BlockManager(
@volatile var running = true
override def run(): Unit = {
var migrating: Option[(Int, Long)] = None
val storageLevel = StorageLevel(
useDisk = true,
useMemory = false,
useOffHeap = false,
deserialized = false,
replication = 1)
// Once a block fails to transfer to an executor stop trying to transfer more blocks
try {
while (running) {
Expand All @@ -1839,25 +1845,15 @@ private[spark] class BlockManager(
peer.executorId,
indexBlockId,
indexBuffer,
StorageLevel(
useDisk=true,
useMemory=false,
useOffHeap=false,
deserialized=false,
replication=1),
storageLevel,
null)// class tag, we don't need for shuffle
blockTransferService.uploadBlockSync(
peer.host,
peer.port,
peer.executorId,
dataBlockId,
dataBuffer,
StorageLevel(
useDisk=true,
useMemory=false,
useOffHeap=false,
deserialized=false,
replication=1),
storageLevel,
null)// class tag, we don't need for shuffle
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ private[spark] trait DecommissionSuite { k8sSuite: KubernetesSuite =>
.set("spark.storage.decommission.enabled", "true")
.set("spark.storage.decommission.shuffle_blocks", "true")
.set("spark.storage.decommission.shuffle_blocks", "true")
//Ensure we have somewhere to migrate our data too
// Ensure we have somewhere to migrate our data too
.set("spark.executor.instances", "3")
// The default of 30 seconds is fine, but for testing we just want to get this done fast.
.set("spark.storage.decommission.replicationReattemptInterval", "1s")
Expand Down

0 comments on commit 8ee8949

Please sign in to comment.