diff --git a/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala b/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala index 81aa44f1b210f..5a8c2914314c2 100644 --- a/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala +++ b/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala @@ -40,7 +40,7 @@ private[spark] object UIWorkloadGenerator { if (args.length < 3) { // scalastyle:off println println( - "usage: ./bin/spark-class org.apache.spark.ui.UIWorkloadGenerator " + + "Usage: ./bin/spark-class org.apache.spark.ui.UIWorkloadGenerator " + "[master] [FIFO|FAIR] [#job set (4 jobs per set)]") // scalastyle:on println System.exit(1) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala index 8ac164f698055..16d7c3ab39b03 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala @@ -33,9 +33,7 @@ class NumericParserSuite extends SparkFunSuite { malformatted.foreach { s => intercept[SparkException] { NumericParser.parse(s) - // scalastyle:off println - println(s"Didn't detect malformatted string $s.") - // scalastyle:on println + throw new RuntimeException(s"Didn't detect malformatted string $s.") } } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala index eb8e7a080d765..e6081cb05bc2d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala @@ -24,7 +24,7 @@ import org.apache.spark.unsafe.types.UTF8String import scala.collection.mutable.HashSet -import org.apache.spark.{AccumulatorParam, Accumulator} +import org.apache.spark.{AccumulatorParam, Accumulator, Logging} import org.apache.spark.annotation.DeveloperApi import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.trees.TreeNodeRef @@ -57,7 +57,7 @@ package object debug { * Augments [[DataFrame]]s with debug methods. */ @DeveloperApi - implicit class DebugQuery(query: DataFrame) { + implicit class DebugQuery(query: DataFrame) extends Logging { def debug(): Unit = { val plan = query.queryExecution.executedPlan val visited = new collection.mutable.HashSet[TreeNodeRef]() @@ -66,9 +66,7 @@ package object debug { visited += new TreeNodeRef(s) DebugNode(s) } - // scalastyle:off println - println(s"Results returned: ${debugPlan.execute().count()}") - // scalastyle:on println + logDebug(s"Results returned: ${debugPlan.execute().count()}") debugPlan.foreach { case d: DebugNode => d.dumpStats() case _ => @@ -84,15 +82,11 @@ package object debug { TypeCheck(s) } try { - // scalastyle:off println - println(s"Results returned: ${debugPlan.execute().count()}") - // scalastyle:on println + logDebug(s"Results returned: ${debugPlan.execute().count()}") } catch { case e: Exception => def unwrap(e: Throwable): Throwable = if (e.getCause == null) e else unwrap(e.getCause) - // scalastyle:off println - println(s"Deepest Error: ${unwrap(e)}") - // scalastyle:on println + logDebug(s"Deepest Error: ${unwrap(e)}") } } } @@ -125,15 +119,11 @@ package object debug { val columnStats: Array[ColumnMetrics] = Array.fill(child.output.size)(new ColumnMetrics()) def dumpStats(): Unit = { - // scalastyle:off println - println(s"== ${child.simpleString} ==") - println(s"Tuples output: ${tupleCount.value}") - // scalastyle:on println + logDebug(s"== ${child.simpleString} ==") + logDebug(s"Tuples output: ${tupleCount.value}") child.output.zip(columnStats).foreach { case(attr, metric) => val actualDataTypes = metric.elementTypes.value.mkString("{", ",", "}") - // scalastyle:off println - println(s" ${attr.name} ${attr.dataType}: $actualDataTypes") - // scalastyle:on println + logDebug(s" ${attr.name} ${attr.dataType}: $actualDataTypes") } } diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala index 96ee627602b8d..f66a17b20915f 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala @@ -40,7 +40,7 @@ import org.apache.spark.Logging import org.apache.spark.sql.hive.HiveContext import org.apache.spark.util.Utils -private[hive] object SparkSQLCLIDriver { +private[hive] object SparkSQLCLIDriver extends Logging { private var prompt = "spark-sql" private var continuedPrompt = "".padTo(prompt.length, ' ') private var transport: TSocket = _ @@ -158,14 +158,13 @@ private[hive] object SparkSQLCLIDriver { System.exit(cli.processLine(sessionState.execString)) } - // scalastyle:off println try { if (sessionState.fileName != null) { System.exit(cli.processFile(sessionState.fileName)) } } catch { case e: FileNotFoundException => - System.err.println(s"Could not open input file for reading. (${e.getMessage})") + logError(s"Could not open input file for reading. (${e.getMessage})") System.exit(3) } @@ -181,16 +180,15 @@ private[hive] object SparkSQLCLIDriver { val historyFile = historyDirectory + File.separator + ".hivehistory" reader.setHistory(new History(new File(historyFile))) } else { - System.err.println("WARNING: Directory for Hive history file: " + historyDirectory + + logWarning("WARNING: Directory for Hive history file: " + historyDirectory + " does not exist. History will not be available during this session.") } } catch { case e: Exception => - System.err.println("WARNING: Encountered an error while trying to initialize Hive's " + + logWarning("WARNING: Encountered an error while trying to initialize Hive's " + "history file. History will not be available during this session.") - System.err.println(e.getMessage) + logWarning(e.getMessage) } - // scalastyle:on println val clientTransportTSocketField = classOf[CliSessionState].getDeclaredField("transport") clientTransportTSocketField.setAccessible(true) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala index d528d69583aca..7fc517b646b20 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.parse._ import org.apache.hadoop.hive.ql.plan.PlanUtils import org.apache.hadoop.hive.ql.session.SessionState +import org.apache.spark.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.expressions._ @@ -73,7 +74,7 @@ private[hive] case class CreateTableAsSelect( } /** Provides a mapping from HiveQL statements to catalyst logical plans and expression trees. */ -private[hive] object HiveQl { +private[hive] object HiveQl extends Logging { protected val nativeCommands = Seq( "TOK_ALTERDATABASE_OWNER", "TOK_ALTERDATABASE_PROPERTIES", @@ -186,9 +187,7 @@ private[hive] object HiveQl { .map(ast => Option(ast).map(_.transform(rule)).orNull)) } catch { case e: Exception => - // scalastyle:off println - println(dumpTree(n)) - // scalastyle:on println + logError(dumpTree(n).toString) throw e } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala index 8a215d455cc6f..508695919e9a7 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala @@ -86,10 +86,6 @@ class InsertIntoHiveTableSuite extends QueryTest with BeforeAndAfter { val message = intercept[QueryExecutionException] { sql("CREATE TABLE doubleCreateAndInsertTest (key int, value string)") }.getMessage - - // scalastyle:off println - println("message!!!!" + message) - // scalastyle:on println } test("Double create does not fail when allowExisting = true") { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala index 6ff7e29069dd2..4765fa9e50e95 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala @@ -253,9 +253,6 @@ class PairUDF extends GenericUDF { ) override def evaluate(args: Array[DeferredObject]): AnyRef = { - // scalastyle:off println - println("Type = %s".format(args(0).getClass.getName)) - // scalastyle:on println Integer.valueOf(args(0).get.asInstanceOf[TestPair].entry._2) }