From 11988c94b9983af8926964b1c21f72bf8efaaa87 Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Fri, 10 Jun 2016 09:42:10 +0530 Subject: [PATCH] [Minor] Replace all occurrences of None: Option[X] with Option.empty[X] --- .../org/apache/spark/sql/catalyst/trees/TreeNode.scala | 4 ++-- .../scala/org/apache/spark/sql/DataFrameWriter.scala | 2 +- .../sql/execution/command/createDataSourceTables.scala | 2 +- .../spark/sql/execution/exchange/ShuffleExchange.scala | 2 +- .../org/apache/spark/sql/hive/orc/OrcQuerySuite.scala | 10 +++++----- .../streaming/receiver/ReceivedBlockHandler.scala | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala index f924efe6e6f44..3cc7a1a3cae83 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala @@ -105,7 +105,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product { */ def find(f: BaseType => Boolean): Option[BaseType] = f(this) match { case true => Some(this) - case false => children.foldLeft(None: Option[BaseType]) { (l, r) => l.orElse(r.find(f)) } + case false => children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) } } /** @@ -165,7 +165,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product { def collectFirst[B](pf: PartialFunction[BaseType, B]): Option[B] = { val lifted = pf.lift lifted(this).orElse { - children.foldLeft(None: Option[B]) { (l, r) => l.orElse(r.collectFirst(pf)) } + children.foldLeft(Option.empty[B]) { (l, r) => l.orElse(r.collectFirst(pf)) } } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala index 1dd8818dedb2e..9b771683e3dd2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala @@ -432,7 +432,7 @@ final class DataFrameWriter private[sql](df: DataFrame) { private def insertInto(tableIdent: TableIdentifier): Unit = { assertNotBucketed("insertInto") assertNotStreaming("insertInto() can only be called on non-continuous queries") - val partitions = normalizedParCols.map(_.map(col => col -> (None: Option[String])).toMap) + val partitions = normalizedParCols.map(_.map(col => col -> (Option.empty[String])).toMap) val overwrite = mode == SaveMode.Overwrite // A partitioned relation's schema can be different from the input logicalPlan, since diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala index 66753fa7f27bc..865e406ce27a6 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala @@ -169,7 +169,7 @@ case class CreateDataSourceTableAsSelectCommand( options } - var existingSchema = None: Option[StructType] + var existingSchema = Option.empty[StructType] if (sparkSession.sessionState.catalog.tableExists(tableIdent)) { // Check if we need to throw an exception or just return. mode match { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala index e18b59f49b984..afe0fbea73bd9 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala @@ -129,7 +129,7 @@ case class ShuffleExchange( object ShuffleExchange { def apply(newPartitioning: Partitioning, child: SparkPlan): ShuffleExchange = { - ShuffleExchange(newPartitioning, child, coordinator = None: Option[ExchangeCoordinator]) + ShuffleExchange(newPartitioning, child, coordinator = Option.empty[ExchangeCoordinator]) } /** diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala index 9771b2314a08d..e6c9c5d4d9cc5 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala @@ -147,11 +147,11 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest { test("save and load case class RDD with `None`s as orc") { val data = ( - None: Option[Int], - None: Option[Long], - None: Option[Float], - None: Option[Double], - None: Option[Boolean] + Option.empty[Int], + Option.empty[Long], + Option.empty[Float], + Option.empty[Double], + Option.empty[Boolean] ) :: Nil withOrcFile(data) { file => diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala index c4bc5cf3f6a58..80c07958b41f2 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala @@ -170,7 +170,7 @@ private[streaming] class WriteAheadLogBasedBlockHandler( */ def storeBlock(blockId: StreamBlockId, block: ReceivedBlock): ReceivedBlockStoreResult = { - var numRecords = None: Option[Long] + var numRecords = Option.empty[Long] // Serialize the block so that it can be inserted into both val serializedBlock = block match { case ArrayBufferBlock(arrayBuffer) =>