Skip to content

Commit

Permalink
Changing some printlns to logs in tests
Browse files Browse the repository at this point in the history
  • Loading branch information
jonalter committed Jul 7, 2015
1 parent eeec1e7 commit 10724b6
Show file tree
Hide file tree
Showing 10 changed files with 36 additions and 61 deletions.
7 changes: 3 additions & 4 deletions core/src/test/scala/org/apache/spark/ThreadingSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import java.util.concurrent.{TimeUnit, Semaphore}
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicInteger

import org.apache.spark.Logging
import org.apache.spark.scheduler._

/**
Expand All @@ -36,7 +37,7 @@ object ThreadingSuiteState {
}
}

class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
class ThreadingSuite extends SparkFunSuite with LocalSparkContext with Logging{

test("accessing SparkContext form a different thread") {
sc = new SparkContext("local", "test")
Expand Down Expand Up @@ -130,10 +131,8 @@ class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
Thread.sleep(100)
}
if (running.get() != 4) {
// scalastyle:off println
println("Waited 1 second without seeing runningThreads = 4 (it was " +
logInfo("Waited 1 second without seeing runningThreads = 4 (it was " +
running.get() + "); failing test")
// scalastyle:on println
ThreadingSuiteState.failed.set(true)
}
number
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import org.scalatest.BeforeAndAfterAll

import org.apache.hadoop.io.Text

import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.{Logging, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.util.Utils
import org.apache.hadoop.io.compress.{DefaultCodec, CompressionCodecFactory, GzipCodec}

Expand All @@ -36,7 +36,7 @@ import org.apache.hadoop.io.compress.{DefaultCodec, CompressionCodecFactory, Gzi
* [[org.apache.spark.input.WholeTextFileRecordReader WholeTextFileRecordReader]]. A temporary
* directory is created as fake input. Temporal storage would be deleted in the end.
*/
class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAll {
class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAll with Logging {
private var sc: SparkContext = _
private var factory: CompressionCodecFactory = _

Expand Down Expand Up @@ -76,7 +76,6 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl
out.close()
}

// scalastyle:off println
/**
* This code will test the behaviors of WholeTextFileRecordReader based on local disk. There are
* three aspects to check:
Expand All @@ -86,7 +85,7 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl
*/
test("Correctness of WholeTextFileRecordReader.") {
val dir = Utils.createTempDir()
println(s"Local disk address is ${dir.toString}.")
logInfo(s"Local disk address is ${dir.toString}.")

WholeTextFileRecordReaderSuite.files.foreach { case (filename, contents) =>
createNativeFile(dir, filename, contents, false)
Expand All @@ -110,7 +109,7 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl

test("Correctness of WholeTextFileRecordReader with GzipCodec.") {
val dir = Utils.createTempDir()
println(s"Local disk address is ${dir.toString}.")
logInfo(s"Local disk address is ${dir.toString}.")

WholeTextFileRecordReaderSuite.files.foreach { case (filename, contents) =>
createNativeFile(dir, filename, contents, true)
Expand All @@ -132,7 +131,6 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl

Utils.deleteRecursively(dir)
}
// scalastyle:on println
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ package org.apache.spark.util.collection
import java.lang.{Float => JFloat, Integer => JInteger}
import java.util.{Arrays, Comparator}

import org.apache.spark.SparkFunSuite
import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.util.random.XORShiftRandom

class SorterSuite extends SparkFunSuite {
class SorterSuite extends SparkFunSuite with Logging {

test("equivalent to Arrays.sort") {
val rand = new XORShiftRandom(123)
Expand Down Expand Up @@ -73,9 +73,8 @@ class SorterSuite extends SparkFunSuite {

/** Runs an experiment several times. */
def runExperiment(name: String, skip: Boolean = false)(f: => Unit, prepare: () => Unit): Unit = {
// scalastyle:off println
if (skip) {
println(s"Skipped experiment $name.")
logInfo(s"Skipped experiment $name.")
return
}

Expand All @@ -87,12 +86,11 @@ class SorterSuite extends SparkFunSuite {
while (i < 10) {
val time = org.apache.spark.util.Utils.timeIt(1)(f, Some(prepare))
next10 += time
println(s"$name: Took $time ms")
logInfo(s"$name: Took $time ms")
i += 1
}

println(s"$name: ($firstTry ms first try, ${next10 / 10} ms average)")
// scalastyle:on println
logInfo(s"$name: ($firstTry ms first try, ${next10 / 10} ms average)")
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,7 @@ class DirectKafkaStreamSuite
rdd
}.foreachRDD { rdd =>
for (o <- offsetRanges) {
// scalastyle:off println
println(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
// scalastyle:on println
logInfo(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
}
val collected = rdd.mapPartitionsWithIndex { (i, iter) =>
// For each partition, get size of the range in the partition,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@ package org.apache.spark.ml.feature

import scala.beans.{BeanInfo, BeanProperty}

import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.{Logging, SparkException, SparkFunSuite}
import org.apache.spark.ml.attribute._
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.mllib.linalg.{SparseVector, Vector, Vectors}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame

class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext {
class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext with Logging {

import VectorIndexerSuite.FeatureData

Expand Down Expand Up @@ -113,15 +113,11 @@ class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext {
model.transform(sparsePoints1) // should work
intercept[SparkException] {
model.transform(densePoints2).collect()
// scalastyle:off println
println("Did not throw error when fit, transform were called on vectors of different lengths")
// scalastyle:on println
logInfo("Did not throw error when fit, transform were called on vectors of different lengths")
}
intercept[SparkException] {
vectorIndexer.fit(badPoints)
// scalastyle:off println
println("Did not throw error when fitting vectors of different lengths in same RDD.")
// scalastyle:on println
logInfo("Did not throw error when fitting vectors of different lengths in same RDD.")
}
}

Expand Down Expand Up @@ -200,9 +196,7 @@ class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext {
}
} catch {
case e: org.scalatest.exceptions.TestFailedException =>
// scalastyle:off println
println(errMsg)
// scalastyle:on println
logError(errMsg)
throw e
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ import scala.util.Random

import breeze.linalg.{DenseMatrix => BDM, squaredDistance => breezeSquaredDistance}

import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.{Logging, SparkException, SparkFunSuite}
import org.apache.spark.mllib.util.TestingUtils._

class VectorsSuite extends SparkFunSuite {
class VectorsSuite extends SparkFunSuite with Logging {

val arr = Array(0.1, 0.0, 0.3, 0.4)
val n = 4
Expand Down Expand Up @@ -142,9 +142,7 @@ class VectorsSuite extends SparkFunSuite {
malformatted.foreach { s =>
intercept[SparkException] {
Vectors.parse(s)
// scalastyle:off println
println(s"Didn't detect malformatted string $s.")
// scalastyle:on println
logInfo(s"Didn't detect malformatted string $s.")
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@ package org.apache.spark.mllib.stat

import breeze.linalg.{DenseMatrix => BDM, Matrix => BM}

import org.apache.spark.SparkFunSuite
import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.correlation.{Correlations, PearsonCorrelation,
SpearmanCorrelation}
import org.apache.spark.mllib.util.MLlibTestSparkContext

class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext {
class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext with Logging {

// test input data
val xData = Array(1.0, 0.0, -2.0)
Expand Down Expand Up @@ -146,9 +146,7 @@ class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext {
def matrixApproxEqual(A: BM[Double], B: BM[Double], threshold: Double = 1e-6): Boolean = {
for (i <- 0 until A.rows; j <- 0 until A.cols) {
if (!approxEqual(A(i, j), B(i, j), threshold)) {
// scalastyle:off println
println("i, j = " + i + ", " + j + " actual: " + A(i, j) + " expected:" + B(i, j))
// scalastyle:on println
logInfo("i, j = " + i + ", " + j + " actual: " + A(i, j) + " expected:" + B(i, j))
return false
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.mllib.tree

import org.apache.spark.SparkFunSuite
import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.configuration.{BoostingStrategy, Strategy}
Expand All @@ -31,7 +31,7 @@ import org.apache.spark.util.Utils
/**
* Test suite for [[GradientBoostedTrees]].
*/
class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext {
class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext with Logging {

test("Regression with continuous features: SquaredError") {
GradientBoostedTreesSuite.testCombinations.foreach {
Expand All @@ -50,10 +50,8 @@ class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext
EnsembleTestHelper.validateRegressor(gbt, GradientBoostedTreesSuite.data, 0.06)
} catch {
case e: java.lang.AssertionError =>
// scalastyle:off println
println(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
logError(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
s" subsamplingRate=$subsamplingRate")
// scalastyle:on println
throw e
}

Expand Down Expand Up @@ -82,10 +80,8 @@ class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext
EnsembleTestHelper.validateRegressor(gbt, GradientBoostedTreesSuite.data, 0.85, "mae")
} catch {
case e: java.lang.AssertionError =>
// scalastyle:off println
println(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
logError(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
s" subsamplingRate=$subsamplingRate")
// scalastyle:on println
throw e
}

Expand Down Expand Up @@ -115,10 +111,8 @@ class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext
EnsembleTestHelper.validateClassifier(gbt, GradientBoostedTreesSuite.data, 0.9)
} catch {
case e: java.lang.AssertionError =>
// scalastyle:off println
println(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
logError(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
s" subsamplingRate=$subsamplingRate")
// scalastyle:on println
throw e
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@

package org.apache.spark.sql.hive

import org.apache.spark.SparkFunSuite
import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.sql.hive.test.TestHive

import org.apache.spark.sql.test.ExamplePointUDT
import org.apache.spark.sql.types.StructType

class HiveMetastoreCatalogSuite extends SparkFunSuite {
class HiveMetastoreCatalogSuite extends SparkFunSuite with Logging {

test("struct field should accept underscore in sub-column name") {
val metastr = "struct<a: int, b_1: string, c: string>"
Expand All @@ -41,9 +41,7 @@ class HiveMetastoreCatalogSuite extends SparkFunSuite {
test("duplicated metastore relations") {
import TestHive.implicits._
val df = TestHive.sql("SELECT * FROM src")
// scalastyle:off println
println(df.queryExecution)
// scalastyle:on println
logInfo(df.queryExecution.toString)
df.as('a).join(df.as('b), $"a.key" === $"b.key")
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import org.scalatest.BeforeAndAfterAll
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.InvalidInputException

import org.apache.spark.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.hive.client.{HiveTable, ManagedTable}
import org.apache.spark.sql.hive.test.TestHive
Expand All @@ -40,7 +41,8 @@ import org.apache.spark.util.Utils
/**
* Tests for persisting tables created though the data sources API into the metastore.
*/
class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll {
class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll
with Logging {
override val sqlContext = TestHive

var jsonFilePath: String = _
Expand Down Expand Up @@ -415,9 +417,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeA
|)
""".stripMargin)

// scalastyle:off println
sql("DROP TABLE jsonTable").collect().foreach(println)
// scalastyle:on println
sql("DROP TABLE jsonTable").collect().foreach(i => logInfo(i.toString))
}
}

Expand Down

0 comments on commit 10724b6

Please sign in to comment.