Skip to content

Commit

Permalink
[MINOR][SQL][TEST] Test case cleanups for recent PRs
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?
Revert the unneeded test case changes we made in SPARK-23000

Also fixes the test suites that do not call `super.afterAll()` in the local `afterAll`. The `afterAll()` of `TestHiveSingleton` actually reset the environments.

## How was this patch tested?
N/A

Author: gatorsmile <gatorsmile@gmail.com>

Closes #20341 from gatorsmile/testRelated.

(cherry picked from commit 896e45a)
Signed-off-by: gatorsmile <gatorsmile@gmail.com>
  • Loading branch information
gatorsmile committed Jan 22, 2018
1 parent d933fce commit 1069fad
Show file tree
Hide file tree
Showing 7 changed files with 60 additions and 45 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import org.apache.spark.sql.catalyst.plans.{Inner, LeftOuter, RightOuter}
import org.apache.spark.sql.catalyst.plans.logical.Join
import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext

class DataFrameJoinSuite extends QueryTest with SharedSQLContext {
Expand Down Expand Up @@ -276,16 +277,14 @@ class DataFrameJoinSuite extends QueryTest with SharedSQLContext {

test("SPARK-23087: don't throw Analysis Exception in CheckCartesianProduct when join condition " +
"is false or null") {
val df = spark.range(10)
val dfNull = spark.range(10).select(lit(null).as("b"))
val planNull = df.join(dfNull, $"id" === $"b", "left").queryExecution.analyzed

spark.sessionState.executePlan(planNull).optimizedPlan

val dfOne = df.select(lit(1).as("a"))
val dfTwo = spark.range(10).select(lit(2).as("b"))
val planFalse = dfOne.join(dfTwo, $"a" === $"b", "left").queryExecution.analyzed

spark.sessionState.executePlan(planFalse).optimizedPlan
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
val df = spark.range(10)
val dfNull = spark.range(10).select(lit(null).as("b"))
df.join(dfNull, $"id" === $"b", "left").queryExecution.optimizedPlan

val dfOne = df.select(lit(1).as("a"))
val dfTwo = spark.range(10).select(lit(2).as("b"))
dfOne.join(dfTwo, $"a" === $"b", "left").queryExecution.optimizedPlan
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -492,8 +492,7 @@ private[hive] class TestHiveSparkSession(
protected val originalUDFs: JavaSet[String] = FunctionRegistry.getFunctionNames

/**
* Resets the test instance by deleting any tables that have been created.
* TODO: also clear out UDFs, views, etc.
* Resets the test instance by deleting any table, view, temp view, and UDF that have been created
*/
def reset() {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,13 +166,13 @@ class DataSourceWithHiveMetastoreCatalogSuite
))
).foreach { case (provider, (inputFormat, outputFormat, serde)) =>
test(s"Persist non-partitioned $provider relation into metastore as managed table") {
withTable("default.t") {
withTable("t") {
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") {
testDF
.write
.mode(SaveMode.Overwrite)
.format(provider)
.saveAsTable("default.t")
.saveAsTable("t")
}

val hiveTable = sessionState.catalog.getTableMetadata(TableIdentifier("t", Some("default")))
Expand All @@ -187,15 +187,14 @@ class DataSourceWithHiveMetastoreCatalogSuite
assert(columns.map(_.name) === Seq("d1", "d2"))
assert(columns.map(_.dataType) === Seq(DecimalType(10, 3), StringType))

checkAnswer(table("default.t"), testDF)
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM default.t") ===
Seq("1.1\t1", "2.1\t2"))
checkAnswer(table("t"), testDF)
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === Seq("1.1\t1", "2.1\t2"))
}
}

test(s"Persist non-partitioned $provider relation into metastore as external table") {
withTempPath { dir =>
withTable("default.t") {
withTable("t") {
val path = dir.getCanonicalFile

withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") {
Expand All @@ -204,7 +203,7 @@ class DataSourceWithHiveMetastoreCatalogSuite
.mode(SaveMode.Overwrite)
.format(provider)
.option("path", path.toString)
.saveAsTable("default.t")
.saveAsTable("t")
}

val hiveTable =
Expand All @@ -220,18 +219,18 @@ class DataSourceWithHiveMetastoreCatalogSuite
assert(columns.map(_.name) === Seq("d1", "d2"))
assert(columns.map(_.dataType) === Seq(DecimalType(10, 3), StringType))

checkAnswer(table("default.t"), testDF)
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM default.t") ===
checkAnswer(table("t"), testDF)
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") ===
Seq("1.1\t1", "2.1\t2"))
}
}
}

test(s"Persist non-partitioned $provider relation into metastore as managed table using CTAS") {
withTempPath { dir =>
withTable("default.t") {
withTable("t") {
sql(
s"""CREATE TABLE default.t USING $provider
s"""CREATE TABLE t USING $provider
|OPTIONS (path '${dir.toURI}')
|AS SELECT 1 AS d1, "val_1" AS d2
""".stripMargin)
Expand All @@ -249,9 +248,8 @@ class DataSourceWithHiveMetastoreCatalogSuite
assert(columns.map(_.name) === Seq("d1", "d2"))
assert(columns.map(_.dataType) === Seq(IntegerType, StringType))

checkAnswer(table("default.t"), Row(1, "val_1"))
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM default.t") ===
Seq("1\tval_1"))
checkAnswer(table("t"), Row(1, "val_1"))
assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === Seq("1\tval_1"))
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,12 @@ class HiveUDAFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
}

protected override def afterAll(): Unit = {
sql(s"DROP TEMPORARY FUNCTION IF EXISTS mock")
sql(s"DROP TEMPORARY FUNCTION IF EXISTS hive_max")
try {
sql(s"DROP TEMPORARY FUNCTION IF EXISTS mock")
sql(s"DROP TEMPORARY FUNCTION IF EXISTS hive_max")
} finally {
super.afterAll()
}
}

test("built-in Hive UDAF") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,11 @@ class Hive_2_1_DDLSuite extends SparkFunSuite with TestHiveSingleton with Before
}

override def afterAll(): Unit = {
catalog = null
try {
catalog = null
} finally {
super.afterAll()
}
}

test("SPARK-21617: ALTER TABLE for non-compatible DataSource tables") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,11 @@ class ObjectHashAggregateSuite
}

protected override def afterAll(): Unit = {
sql(s"DROP TEMPORARY FUNCTION IF EXISTS hive_max")
try {
sql(s"DROP TEMPORARY FUNCTION IF EXISTS hive_max")
} finally {
super.afterAll()
}
}

test("typed_count without grouping keys") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,15 +180,18 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest {
}

override def afterAll(): Unit = {
dropTables("partitioned_parquet",
"partitioned_parquet_with_key",
"partitioned_parquet_with_complextypes",
"partitioned_parquet_with_key_and_complextypes",
"normal_parquet",
"jt",
"jt_array",
"test_parquet")
super.afterAll()
try {
dropTables("partitioned_parquet",
"partitioned_parquet_with_key",
"partitioned_parquet_with_complextypes",
"partitioned_parquet_with_key_and_complextypes",
"normal_parquet",
"jt",
"jt_array",
"test_parquet")
} finally {
super.afterAll()
}
}

test(s"conversion is working") {
Expand Down Expand Up @@ -931,11 +934,15 @@ abstract class ParquetPartitioningTest extends QueryTest with SQLTestUtils with
}

override protected def afterAll(): Unit = {
partitionedTableDir.delete()
normalTableDir.delete()
partitionedTableDirWithKey.delete()
partitionedTableDirWithComplexTypes.delete()
partitionedTableDirWithKeyAndComplexTypes.delete()
try {
partitionedTableDir.delete()
normalTableDir.delete()
partitionedTableDirWithKey.delete()
partitionedTableDirWithComplexTypes.delete()
partitionedTableDirWithKeyAndComplexTypes.delete()
} finally {
super.afterAll()
}
}

/**
Expand Down

0 comments on commit 1069fad

Please sign in to comment.