Skip to content

Commit

Permalink
[SPARK-33670][SQL] Verify the partition provider is Hive in v1 SHOW T…
Browse files Browse the repository at this point in the history
…ABLE EXTENDED

### What changes were proposed in this pull request?
Invoke the check `DDLUtils.verifyPartitionProviderIsHive()` from V1 implementation of `SHOW TABLE EXTENDED` when partition specs are specified.

This PR is some kind of follow up #16373 and #15515.

### Why are the changes needed?
To output an user friendly error with recommendation like
**"
... partition metadata is not stored in the Hive metastore. To import this information into the metastore, run `msck repair table tableName`
"**
instead of silently output an empty result.

### Does this PR introduce _any_ user-facing change?
Yes.

### How was this patch tested?
By running the affected test suites, in particular:
```
$ build/sbt -Phive-2.3 -Phive-thriftserver "hive/test:testOnly *PartitionProviderCompatibilitySuite"
```

Closes #30618 from MaxGekk/show-table-extended-verifyPartitionProviderIsHive.

Authored-by: Max Gekk <max.gekk@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
  • Loading branch information
MaxGekk authored and HyukjinKwon committed Dec 7, 2020
1 parent e32de29 commit 29096a8
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 6 deletions.
Expand Up @@ -879,6 +879,9 @@ case class ShowTablesCommand(
// Note: tableIdentifierPattern should be non-empty, otherwise a [[ParseException]]
// should have been thrown by the sql parser.
val table = catalog.getTableMetadata(TableIdentifier(tableIdentifierPattern.get, Some(db)))

DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "SHOW TABLE EXTENDED")

val tableIdent = table.identifier
val normalizedSpec = PartitioningUtils.normalizePartitionSpec(
partitionSpec.get,
Expand Down
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.execution.command.v1

import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.sql.{AnalysisException, Row, SaveMode}
import org.apache.spark.sql.connector.catalog.CatalogManager
import org.apache.spark.sql.execution.command
import org.apache.spark.sql.internal.SQLConf
Expand Down Expand Up @@ -111,4 +111,18 @@ trait ShowTablesSuiteBase extends command.ShowTablesSuiteBase {
}
}

class ShowTablesSuite extends ShowTablesSuiteBase with SharedSparkSession
class ShowTablesSuite extends ShowTablesSuiteBase with SharedSparkSession {
test("SPARK-33670: show partitions from a datasource table") {
import testImplicits._
withNamespace(s"$catalog.ns") {
sql(s"CREATE NAMESPACE $catalog.ns")
sql(s"USE $catalog.ns")
val t = "part_datasrc"
withTable(t) {
val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c")
df.write.partitionBy("a").format("parquet").mode(SaveMode.Overwrite).saveAsTable(t)
assert(sql(s"SHOW TABLE EXTENDED LIKE '$t' PARTITION(a = 1)").count() === 1)
}
}
}
}
Expand Up @@ -53,7 +53,8 @@ class PartitionProviderCompatibilitySuite
s"ALTER TABLE $tableName PARTITION (partCol=1) SET LOCATION '/foo'",
s"ALTER TABLE $tableName DROP PARTITION (partCol=1)",
s"DESCRIBE $tableName PARTITION (partCol=1)",
s"SHOW PARTITIONS $tableName")
s"SHOW PARTITIONS $tableName",
s"SHOW TABLE EXTENDED LIKE '$tableName' PARTITION (partCol=1)")

withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true") {
for (cmd <- unsupportedCommands) {
Expand Down Expand Up @@ -124,10 +125,15 @@ class PartitionProviderCompatibilitySuite
}
// disabled
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "false") {
val e = intercept[AnalysisException] {
spark.sql(s"show partitions test")
Seq(
"SHOW PARTITIONS test",
"SHOW TABLE EXTENDED LIKE 'test' PARTITION (partCol=1)"
).foreach { showPartitions =>
val e = intercept[AnalysisException] {
spark.sql(showPartitions)
}
assert(e.getMessage.contains("filesource partition management is disabled"))
}
assert(e.getMessage.contains("filesource partition management is disabled"))
spark.sql("refresh table test")
assert(spark.sql("select * from test").count() == 5)
}
Expand Down

0 comments on commit 29096a8

Please sign in to comment.