Skip to content

Commit

Permalink
Revert "Tag a random sample of tests in HiveCompatibilitySuite with E…
Browse files Browse the repository at this point in the history
…xtendedHiveTest."

This reverts commit b7d0507.
  • Loading branch information
Marcelo Vanzin committed Aug 26, 2015
1 parent 85fed20 commit 83bec7f
Show file tree
Hide file tree
Showing 6 changed files with 9 additions and 66 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,11 @@ package org.apache.spark.sql.hive.execution
import java.io.File

import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.hive.ExtendedHiveTest
import org.apache.spark.sql.hive.test.TestHive

/**
* Runs the test cases that are included in the hive distribution with hash joins.
*/
@ExtendedHiveTest
class HashJoinCompatibilitySuite extends HiveCompatibilitySuite {
override def beforeAll() {
super.beforeAll()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,13 @@ import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.scalatest.BeforeAndAfter

import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.hive.ExtendedHiveTest
import org.apache.spark.sql.hive.test.TestHive

/**
* Runs the test cases that are included in the hive distribution.
*/
@ExtendedHiveTest
class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
// TODO: bundle in jar files... get from classpath
private lazy val hiveQueryDir = TestHive.getHiveFile(
Expand Down
26 changes: 0 additions & 26 deletions sql/hive/src/test/scala/org/apache/spark/sql/hive/TestTags.scala

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import java.io._

import scala.util.control.NonFatal

import org.scalatest.{BeforeAndAfterAll, GivenWhenThen, Tag}
import org.scalatest.{BeforeAndAfterAll, GivenWhenThen}

import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
Expand Down Expand Up @@ -209,11 +209,7 @@ abstract class HiveComparisonTest
}

val installHooksCommand = "(?i)SET.*hooks".r
def createQueryTest(
testCaseName: String,
sql: String,
tag: Option[Tag] = None,
reset: Boolean = true) {
def createQueryTest(testCaseName: String, sql: String, reset: Boolean = true) {
// testCaseName must not contain ':', which is not allowed to appear in a filename of Windows
assert(!testCaseName.contains(":"))

Expand Down Expand Up @@ -241,16 +237,7 @@ abstract class HiveComparisonTest
return
}

def createTest(name: String)(fn: => Unit): Unit = {
tag match {
case Some(tagValue) =>
test(name, tagValue)(fn)
case None =>
test(name)(fn)
}
}

createTest(testCaseName) {
test(testCaseName) {
logDebug(s"=== HIVE TEST: $testCaseName ===")

// Clear old output for this testcase.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,7 @@ package org.apache.spark.sql.hive.execution

import java.io.File

import scala.util.Random

import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.hive.TestTags.ExtendedHiveTest

/**
* A framework for running the query tests that are listed as a set of text files.
Expand Down Expand Up @@ -54,36 +51,21 @@ abstract class HiveQueryFileTest extends HiveComparisonTest {
Option(System.getProperty(whiteListProperty)).map(_.split(",").toSeq).getOrElse(whiteList)

// Go through all the test cases and add them to scala test.
val testsToRun = testCases.sorted.flatMap {
testCases.sorted.foreach {
case (testCaseName, testCaseFile) =>
if (blackList.map(_.r.pattern.matcher(testCaseName).matches()).reduceLeft(_||_)) {
logDebug(s"Blacklisted test skipped $testCaseName")
None
} else if (realWhiteList.map(_.r.pattern.matcher(testCaseName).matches()).reduceLeft(_||_) ||
runAll) {
// Build a test case and submit it to scala test framework...
Some(testCaseName -> testCaseFile)
val queriesString = fileToString(testCaseFile)
createQueryTest(testCaseName, queriesString)
} else {
// Only output warnings for the built in whitelist as this clutters the output when the user
// trying to execute a single test from the commandline.
if (System.getProperty(whiteListProperty) == null && !runAll) {
ignore(testCaseName) {}
}
None
}
}

// Pick a random sample of tests to serve as a "smoke" test. This is used by automated tests when
// the sql/ code hasn't been changed, to avoid running the whole test suite for every PR that
// touches core code.
private val smokeCount = sys.props.getOrElse("spark.hive.smoke.count", "20").toInt
private val smokeSet = Random.shuffle(testsToRun).take(smokeCount)
.map { case (name, _) => name }.toSet

testsToRun.foreach { case (testCaseName, testCaseFile) =>
val queriesString = fileToString(testCaseFile)
val tag = if (!smokeSet.contains(testCaseName)) Some(ExtendedHiveTest) else None
createQueryTest(testCaseName, queriesString, tag = tag)
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class HiveSerDeSuite extends HiveComparisonTest with BeforeAndAfterAll {
}

// table sales is not a cache table, and will be clear after reset
createQueryTest("Read with RegexSerDe", "SELECT * FROM sales", reset = false)
createQueryTest("Read with RegexSerDe", "SELECT * FROM sales", false)

createQueryTest(
"Read and write with LazySimpleSerDe (tab separated)",
Expand Down

0 comments on commit 83bec7f

Please sign in to comment.