diff --git a/common/utils/src/main/resources/error/error-conditions.json b/common/utils/src/main/resources/error/error-conditions.json index 2017f1fc331e..c672c8e493a8 100644 --- a/common/utils/src/main/resources/error/error-conditions.json +++ b/common/utils/src/main/resources/error/error-conditions.json @@ -2431,6 +2431,14 @@ ], "sqlState" : "XX000" }, + "INTERNAL_ERROR_HIVE_METASTORE_PARTITION_FILTER" : { + "message" : [ + "Failed to get partition metadata by filter from Hive metastore.", + "To work around this issue, set '' to true. Note that this may result in degraded performance as Spark will fetch all partition metadata instead of filtering at the metastore level.", + "To report this issue, visit: https://issues.apache.org/jira/browse/SPARK" + ], + "sqlState" : "XX000" + }, "INTERNAL_ERROR_MEMORY" : { "message" : [ "" @@ -9021,11 +9029,6 @@ "Partition filter cannot have both `\"` and `'` characters." ] }, - "_LEGACY_ERROR_TEMP_2193" : { - "message" : [ - "Caught Hive MetaException attempting to get partition metadata by filter from Hive. You can set the Spark configuration setting to true to work around this problem, however this will result in degraded performance. Please report a bug: https://issues.apache.org/jira/browse/SPARK." - ] - }, "_LEGACY_ERROR_TEMP_2194" : { "message" : [ "Unsupported Hive Metastore version . Please set with a valid version." diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala index 989ad8b0dc41..cb3c063f434e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala @@ -1667,7 +1667,7 @@ private[sql] object QueryExecutionErrors extends QueryErrorsBase with ExecutionE def getPartitionMetadataByFilterError(e: Exception): SparkRuntimeException = { new SparkRuntimeException( - errorClass = "_LEGACY_ERROR_TEMP_2193", + errorClass = "INTERNAL_ERROR_HIVE_METASTORE_PARTITION_FILTER", messageParameters = Map( "hiveMetastorePartitionPruningFallbackOnException" -> SQLConf.HIVE_METASTORE_PARTITION_PRUNING_FALLBACK_ON_EXCEPTION.key), diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala index 5bb81873449c..aecd5ef54d37 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala @@ -573,7 +573,7 @@ abstract class ExternalCatalogSuite extends SparkFunSuite { // then be caught and converted to a RuntimeException with a descriptive message. case ex: RuntimeException if ex.getMessage.contains("MetaException") => throw new AnalysisException( - errorClass = "_LEGACY_ERROR_TEMP_2193", + errorClass = "INTERNAL_ERROR_HIVE_METASTORE_PARTITION_FILTER", messageParameters = Map( "hiveMetastorePartitionPruningFallbackOnException" -> SQLConf.HIVE_METASTORE_PARTITION_PRUNING_FALLBACK_ON_EXCEPTION.key)) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala index 63be4dc363f1..4bcf039ac56d 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe import org.apache.hadoop.mapred.TextInputFormat import org.scalatest.BeforeAndAfterAll +import org.apache.spark.SparkRuntimeException import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.DEFAULT_PARTITION_NAME @@ -142,11 +143,12 @@ class HivePartitionFilteringSuite(version: String) test(s"getPartitionsByFilter should fail when $fallbackKey=false") { withSQLConf(fallbackKey -> "false") { - val e = intercept[RuntimeException]( + val e = intercept[SparkRuntimeException]( clientWithoutDirectSql.getPartitionsByFilter( clientWithoutDirectSql.getRawHiveTable("default", "test"), Seq(attr("ds") === 20170101))) - assert(e.getMessage.contains("Caught Hive MetaException")) + assert(e.getCondition == "INTERNAL_ERROR_HIVE_METASTORE_PARTITION_FILTER") + assert(e.getMessage.contains("Failed to get partition metadata by filter")) } }