From 45cb3b39acda876163bc49c821a46c31116fb69d Mon Sep 17 00:00:00 2001 From: Lars Volker Date: Tue, 6 Dec 2016 13:52:55 -0800 Subject: [PATCH] IMPALA-4403: Implement SHOW RANGE PARTITIONS for Kudu tables Change-Id: Idf5b2fdd02938a42fa59ec98884e4ac915dd1f65 Reviewed-on: http://gerrit.cloudera.org:8080/5390 Reviewed-by: Lars Volker Reviewed-by: Matthew Jacobs Tested-by: Internal Jenkins --- common/thrift/Frontend.thrift | 12 +++- fe/src/main/cup/sql-parser.cup | 18 +++-- .../impala/analysis/ShowPartitionsStmt.java | 55 ---------------- .../apache/impala/analysis/ShowStatsStmt.java | 50 ++++++++++++-- .../org/apache/impala/catalog/KuduTable.java | 29 +++++++++ .../org/apache/impala/service/Frontend.java | 9 ++- .../apache/impala/service/JniFrontend.java | 6 +- .../impala/analysis/AnalyzeDDLTest.java | 20 ++++++ .../apache/impala/analysis/ParserTest.java | 5 ++ .../queries/QueryTest/kudu_alter.test | 65 ++++++++++++++++++- .../queries/QueryTest/kudu_partition_ddl.test | 8 +++ tests/query_test/test_kudu.py | 33 ++++++++++ 12 files changed, 238 insertions(+), 72 deletions(-) delete mode 100644 fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java diff --git a/common/thrift/Frontend.thrift b/common/thrift/Frontend.thrift index 135fa7b3f0c..8f8e3ac6579 100644 --- a/common/thrift/Frontend.thrift +++ b/common/thrift/Frontend.thrift @@ -177,9 +177,17 @@ struct TShowDbsParams { 1: optional string show_pattern } -// Parameters for SHOW TABLE/COLUMN STATS commands +// Used by SHOW STATS and SHOW PARTITIONS to control what information is returned. +enum TShowStatsOp { + TABLE_STATS, + COLUMN_STATS, + PARTITIONS, + RANGE_PARTITIONS +} + +// Parameters for SHOW TABLE/COLUMN STATS and SHOW PARTITIONS commands struct TShowStatsParams { - 1: required bool is_show_col_stats + 1: TShowStatsOp op 2: CatalogObjects.TTableName table_name } diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup index b66483b0987..012da42c35f 100644 --- a/fe/src/main/cup/sql-parser.cup +++ b/fe/src/main/cup/sql-parser.cup @@ -49,6 +49,7 @@ import org.apache.impala.thrift.TDescribeOutputStyle; import org.apache.impala.thrift.TFunctionCategory; import org.apache.impala.thrift.THdfsFileFormat; import org.apache.impala.thrift.TPrivilegeLevel; +import org.apache.impala.thrift.TShowStatsOp; import org.apache.impala.thrift.TTablePropertyType; parser code {: @@ -308,8 +309,8 @@ nonterminal UseStmt use_stmt; nonterminal SetStmt set_stmt; nonterminal ShowTablesStmt show_tables_stmt; nonterminal ShowDbsStmt show_dbs_stmt; -nonterminal ShowPartitionsStmt show_partitions_stmt; -nonterminal ShowStatsStmt show_stats_stmt; +nonterminal ShowStatsStmt show_stats_stmt, show_partitions_stmt, + show_range_partitions_stmt; nonterminal String show_pattern; nonterminal ShowFilesStmt show_files_stmt; nonterminal DescribeDbStmt describe_db_stmt; @@ -552,6 +553,8 @@ stmt ::= {: RESULT = show_dbs; :} | show_partitions_stmt:show_partitions {: RESULT = show_partitions; :} + | show_range_partitions_stmt:show_range_partitions + {: RESULT = show_range_partitions; :} | show_stats_stmt:show_stats {: RESULT = show_stats; :} | show_functions_stmt:show_functions @@ -2084,14 +2087,19 @@ show_dbs_stmt ::= show_stats_stmt ::= KW_SHOW KW_TABLE KW_STATS table_name:table - {: RESULT = new ShowStatsStmt(table, false); :} + {: RESULT = new ShowStatsStmt(table, TShowStatsOp.TABLE_STATS); :} | KW_SHOW KW_COLUMN KW_STATS table_name:table - {: RESULT = new ShowStatsStmt(table, true); :} + {: RESULT = new ShowStatsStmt(table, TShowStatsOp.COLUMN_STATS); :} ; show_partitions_stmt ::= KW_SHOW KW_PARTITIONS table_name:table - {: RESULT = new ShowPartitionsStmt(table); :} + {: RESULT = new ShowStatsStmt(table, TShowStatsOp.PARTITIONS); :} + ; + +show_range_partitions_stmt ::= + KW_SHOW KW_RANGE KW_PARTITIONS table_name:table + {: RESULT = new ShowStatsStmt(table, TShowStatsOp.RANGE_PARTITIONS); :} ; show_functions_stmt ::= diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java deleted file mode 100644 index 9ade66b693a..00000000000 --- a/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java +++ /dev/null @@ -1,55 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.impala.analysis; - -import org.apache.impala.catalog.HdfsTable; -import org.apache.impala.common.AnalysisException; -import com.google.common.base.Preconditions; - -/** - * Representation of a SHOW PARTITIONS statement for displaying - * partition information on a given table. - */ -public class ShowPartitionsStmt extends ShowStatsStmt { - - public ShowPartitionsStmt(TableName tableName) { - super(tableName, false); - } - - @Override - public String toSql() { - return getSqlPrefix() + " " + tableName_.toString(); - } - - @Override - protected String getSqlPrefix() { return "SHOW PARTITIONS"; } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - super.analyze(analyzer); - Preconditions.checkNotNull(table_); - if (!(table_ instanceof HdfsTable)) { - throw new AnalysisException(getSqlPrefix() + " must target an HDFS table: " + - table_.getFullName()); - } - if (table_.getNumClusteringCols() == 0) { - throw new AnalysisException(String.format( - "Table is not partitioned: %s", table_.getFullName())); - } - } -} diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java index 79095ea18ec..8801b0abbc2 100644 --- a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java +++ b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java @@ -18,25 +18,29 @@ package org.apache.impala.analysis; import org.apache.impala.authorization.Privilege; +import org.apache.impala.catalog.HdfsTable; +import org.apache.impala.catalog.KuduTable; import org.apache.impala.catalog.Table; import org.apache.impala.catalog.View; import org.apache.impala.common.AnalysisException; +import org.apache.impala.thrift.TShowStatsOp; import org.apache.impala.thrift.TShowStatsParams; +import com.google.common.base.Preconditions; /** * Representation of a SHOW TABLE/COLUMN STATS statement for * displaying column and table/partition statistics for a given table. */ public class ShowStatsStmt extends StatementBase { - protected final boolean isShowColStats_; + protected final TShowStatsOp op_; protected final TableName tableName_; // Set during analysis. protected Table table_; - public ShowStatsStmt(TableName tableName, boolean isShowColStats) { + public ShowStatsStmt(TableName tableName, TShowStatsOp op) { + this.op_ = op; this.tableName_ = tableName; - this.isShowColStats_ = isShowColStats; } @Override @@ -45,21 +49,57 @@ public String toSql() { } protected String getSqlPrefix() { - return "SHOW " + ((isShowColStats_) ? "COLUMN" : "TABLE") + " STATS"; + if (op_ == TShowStatsOp.TABLE_STATS) { + return "SHOW TABLE STATS"; + } else if (op_ == TShowStatsOp.COLUMN_STATS) { + return "SHOW COLUMN STATS"; + } else if (op_ == TShowStatsOp.PARTITIONS) { + return "SHOW PARTITIONS"; + } else if (op_ == TShowStatsOp.RANGE_PARTITIONS) { + return "SHOW RANGE PARTITIONS"; + } else { + Preconditions.checkState(false); + return ""; + } } @Override public void analyze(Analyzer analyzer) throws AnalysisException { table_ = analyzer.getTable(tableName_, Privilege.VIEW_METADATA); + Preconditions.checkNotNull(table_); if (table_ instanceof View) { throw new AnalysisException(String.format( "%s not applicable to a view: %s", getSqlPrefix(), table_.getFullName())); } + if (table_ instanceof HdfsTable) { + if (table_.getNumClusteringCols() == 0 && op_ == TShowStatsOp.PARTITIONS) { + throw new AnalysisException("Table is not partitioned: " + table_.getFullName()); + } + if (op_ == TShowStatsOp.RANGE_PARTITIONS) { + throw new AnalysisException(getSqlPrefix() + " must target a Kudu table: " + + table_.getFullName()); + } + } else if (table_ instanceof KuduTable) { + KuduTable kuduTable = (KuduTable) table_; + if (op_ == TShowStatsOp.RANGE_PARTITIONS && + kuduTable.getRangePartitioningColNames().isEmpty()) { + throw new AnalysisException(getSqlPrefix() + " requested but table does not " + + "have range partitions: " + table_.getFullName()); + } + } else { + if (op_ == TShowStatsOp.RANGE_PARTITIONS) { + throw new AnalysisException(getSqlPrefix() + " must target a Kudu table: " + + table_.getFullName()); + } else if (op_ == TShowStatsOp.PARTITIONS) { + throw new AnalysisException(getSqlPrefix() + " must target an HDFS table: " + + table_.getFullName()); + } + } } public TShowStatsParams toThrift() { // Ensure the DB is set in the table_name field by using table and not tableName. - return new TShowStatsParams(isShowColStats_, + return new TShowStatsParams(op_, new TableName(table_.getDb().getName(), table_.getName()).toThrift()); } } diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java index ae2a6402311..8409b455481 100644 --- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java +++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java @@ -413,4 +413,33 @@ public TResultSet getTableStats() throws ImpalaRuntimeException { } return result; } + + public TResultSet getRangePartitions() throws ImpalaRuntimeException { + TResultSet result = new TResultSet(); + TResultSetMetadata resultSchema = new TResultSetMetadata(); + result.setSchema(resultSchema); + + // Build column header + String header = "RANGE (" + Joiner.on(',').join(getRangePartitioningColNames()) + ")"; + resultSchema.addToColumns(new TColumn(header, Type.STRING.toThrift())); + try (KuduClient client = KuduUtil.createKuduClient(getKuduMasterHosts())) { + org.apache.kudu.client.KuduTable kuduTable = client.openTable(kuduTableName_); + // The Kudu table API will return the partitions in sorted order by value. + List partitions = kuduTable.getFormattedRangePartitions( + BackendConfig.INSTANCE.getKuduClientTimeoutMs()); + if (partitions.isEmpty()) { + TResultRowBuilder builder = new TResultRowBuilder(); + result.addToRows(builder.add("").get()); + return result; + } + for (String partition: partitions) { + TResultRowBuilder builder = new TResultRowBuilder(); + builder.add(partition); + result.addToRows(builder.get()); + } + } catch (Exception e) { + throw new ImpalaRuntimeException("Error accessing Kudu for table partitions.", e); + } + return result; + } } diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java index c98ba49a785..9e7a16adda7 100644 --- a/fe/src/main/java/org/apache/impala/service/Frontend.java +++ b/fe/src/main/java/org/apache/impala/service/Frontend.java @@ -120,6 +120,7 @@ import org.apache.impala.thrift.TResultSet; import org.apache.impala.thrift.TResultSetMetadata; import org.apache.impala.thrift.TShowFilesParams; +import org.apache.impala.thrift.TShowStatsOp; import org.apache.impala.thrift.TStatus; import org.apache.impala.thrift.TStmtType; import org.apache.impala.thrift.TTableName; @@ -715,7 +716,7 @@ public TResultSet getColumnStats(String dbName, String tableName) /** * Generate result set and schema for a SHOW TABLE STATS command. */ - public TResultSet getTableStats(String dbName, String tableName) + public TResultSet getTableStats(String dbName, String tableName, TShowStatsOp op) throws ImpalaException { Table table = impaladCatalog_.getTable(dbName, tableName); if (table instanceof HdfsTable) { @@ -725,7 +726,11 @@ public TResultSet getTableStats(String dbName, String tableName) } else if (table instanceof DataSourceTable) { return ((DataSourceTable) table).getTableStats(); } else if (table instanceof KuduTable) { - return ((KuduTable) table).getTableStats(); + if (op == TShowStatsOp.RANGE_PARTITIONS) { + return ((KuduTable) table).getRangePartitions(); + } else { + return ((KuduTable) table).getTableStats(); + } } else { throw new InternalException("Invalid table class: " + table.getClass()); } diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java index fec35d78025..bff53424fe9 100644 --- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java +++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java @@ -80,6 +80,7 @@ import org.apache.impala.thrift.TShowGrantRoleParams; import org.apache.impala.thrift.TShowRolesParams; import org.apache.impala.thrift.TShowRolesResult; +import org.apache.impala.thrift.TShowStatsOp; import org.apache.impala.thrift.TShowStatsParams; import org.apache.impala.thrift.TTableName; import org.apache.impala.thrift.TUniqueId; @@ -358,12 +359,13 @@ public byte[] getStats(byte[] thriftShowStatsParams) throws ImpalaException { JniUtil.deserializeThrift(protocolFactory_, params, thriftShowStatsParams); Preconditions.checkState(params.isSetTable_name()); TResultSet result; - if (params.isIs_show_col_stats()) { + + if (params.op == TShowStatsOp.COLUMN_STATS) { result = frontend_.getColumnStats(params.getTable_name().getDb_name(), params.getTable_name().getTable_name()); } else { result = frontend_.getTableStats(params.getTable_name().getDb_name(), - params.getTable_name().getTable_name()); + params.getTable_name().getTable_name(), params.op); } TSerializer serializer = new TSerializer(protocolFactory_); try { diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java index e62f797325a..935edc52b03 100644 --- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java +++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java @@ -3287,6 +3287,26 @@ public void TestShowPartitions() throws AnalysisException { "SHOW PARTITIONS must target an HDFS table: functional_hbase.alltypes"); } + @Test + public void TestShowRangePartitions() throws AnalysisException { + AnalyzesOk("show range partitions functional_kudu.dimtbl"); + AnalysisError("show range partitions baddb.alltypes", + "Database does not exist: baddb"); + AnalysisError("show range partitions functional.badtbl", + "Table does not exist: functional.badtbl"); + AnalysisError("show range partitions functional.alltypes", + "SHOW RANGE PARTITIONS must target a Kudu table: functional.alltypes"); + AnalysisError("show range partitions functional.alltypesnopart", + "SHOW RANGE PARTITIONS must target a Kudu table: functional.alltypes"); + AnalysisError("show range partitions functional_kudu.alltypes", + "SHOW RANGE PARTITIONS requested but table does not have range partitions: " + + "functional_kudu.alltypes"); + AnalysisError("show range partitions functional.view_view", + "SHOW RANGE PARTITIONS not applicable to a view: functional.view_view"); + AnalysisError("show range partitions functional_hbase.alltypes", + "SHOW RANGE PARTITIONS must target a Kudu table: functional_hbase.alltypes"); + } + @Test public void TestShowCreateFunction() throws AnalysisException { addTestFunction("TestFn", Lists.newArrayList(Type.INT, Type.INT), false); diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java index 565b2076ceb..180eabd0414 100644 --- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java +++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java @@ -1744,6 +1744,11 @@ public void TestShow() { ParsesOk("SHOW PARTITIONS db.tbl"); ParsesOk("SHOW PARTITIONS `db`.`tbl`"); + // Show range partitions + ParsesOk("SHOW RANGE PARTITIONS tbl"); + ParsesOk("SHOW RANGE PARTITIONS db.tbl"); + ParsesOk("SHOW RANGE PARTITIONS `db`.`tbl`"); + // Show files of table ParsesOk("SHOW FILES IN tbl"); ParsesOk("SHOW FILES IN db.tbl"); diff --git a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test index 8c523180c94..f959a423ec4 100644 --- a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test +++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test @@ -5,6 +5,12 @@ create table simple (id int primary key, name string, valf float, vali bigint) ---- RESULTS ==== ---- QUERY +# Hash partitions cannot be enumerated as range partitions +show range partitions simple +---- CATCH +AnalysisException: SHOW RANGE PARTITIONS requested but table does not have range partitions: $DATABASE.simple +==== +---- QUERY # Alter master address to a different location alter table simple set tblproperties ( 'kudu.master_addresses' = 'localhost' @@ -53,11 +59,24 @@ create table tbl_to_alter (id int primary key, name string null, vali bigint not ---- RESULTS ==== ---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; +---- RESULTS +'2 <= VALUES < 11' +==== +---- QUERY # Add a range partition alter table tbl_to_alter add range partition 10 < values <= 20 ---- RESULTS ==== ---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; +---- RESULTS +'2 <= VALUES < 11' +'11 <= VALUES < 21' +==== +---- QUERY # Insert a row to the new partition insert into tbl_to_alter values (15, 'name', 100) ---- RUNTIME_PROFILE @@ -76,6 +95,14 @@ alter table tbl_to_alter add range partition value = 100 ---- RESULTS ==== ---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; +---- RESULTS +'2 <= VALUES < 11' +'11 <= VALUES < 21' +'VALUE = 100' +==== +---- QUERY # Insert a row to the new partition insert into tbl_to_alter values (100, 'name1', 1000) ---- RUNTIME_PROFILE @@ -95,6 +122,15 @@ alter table tbl_to_alter add range partition 1000 < values ---- RESULTS ==== ---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; +---- RESULTS +'2 <= VALUES < 11' +'11 <= VALUES < 21' +'VALUE = 100' +'VALUES >= 1001' +==== +---- QUERY # Try to insert a partition that overlaps with an existing partition alter table tbl_to_alter add range partition 10 < values <= 30 ---- CATCH @@ -112,6 +148,14 @@ alter table tbl_to_alter drop range partition value = 100 ---- RESULTS ==== ---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; +---- RESULTS +'2 <= VALUES < 11' +'11 <= VALUES < 21' +'VALUES >= 1001' +==== +---- QUERY # Select table rows after one partition was dropped select * from tbl_to_alter ---- RESULTS @@ -121,8 +165,15 @@ INT,STRING,BIGINT ==== ---- QUERY # Drop an existing range partition -alter table tbl_to_alter drop range partition 10 < values <= 20 +alter table tbl_to_alter drop range partition 11 <= values < 21 +---- RESULTS +==== +---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; ---- RESULTS +'2 <= VALUES < 11' +'VALUES >= 1001' ==== ---- QUERY # Drop all the range partitions @@ -131,6 +182,12 @@ alter table tbl_to_alter drop range partition 1000 < values ---- RESULTS ==== ---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; +---- RESULTS +'' +==== +---- QUERY # Retrieve the rows of a table after all the partitions got dropped select count(*), count(id) from tbl_to_alter where id = 1 and cast(sin(id) as boolean) = true @@ -154,6 +211,12 @@ alter table tbl_to_alter add columns (new_col1 int not null default 10, ---- RESULTS ==== ---- QUERY +# Verify partition layout +show range partitions tbl_to_alter; +---- RESULTS +'2 <= VALUES < 21' +==== +---- QUERY # Insert a row that has values for the new columns insert into tbl_to_alter values (2, 'test', 100, 1, 100) ---- RUNTIME_PROFILE diff --git a/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test b/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test index eefbc28b7ba..96cbc145874 100644 --- a/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test +++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_partition_ddl.test @@ -91,6 +91,14 @@ create table range_part_multiple_cols (id int, name string, valf float, vali big ---- RESULTS ==== ---- QUERY +-- Test printing of multiple column range partitioning +show range partitions range_part_multiple_cols +---- RESULTS +'VALUE = (10, "martin")' +'VALUE = (20, "dimitris")' +'VALUE = (30, "matthew")' +==== +---- QUERY show table stats range_part_multiple_cols ---- LABELS # Rows,Start Key,Stop Key,Leader Replica,# Replicas diff --git a/tests/query_test/test_kudu.py b/tests/query_test/test_kudu.py index 7b373859279..9a5bab940ee 100644 --- a/tests/query_test/test_kudu.py +++ b/tests/query_test/test_kudu.py @@ -23,9 +23,11 @@ INT32, INT64, INT8, + SchemaBuilder, STRING, BINARY, UNIXTIME_MICROS) +from kudu.client import Partitioning import logging import pytest import textwrap @@ -103,6 +105,37 @@ def test_kudu_rename_table(self, cursor, kudu_client, unique_database): assert kudu_client.table_exists(new_kudu_tbl_name) assert not kudu_client.table_exists(kudu_tbl_name) + def test_kudu_show_unbounded_range_partition(self, cursor, kudu_client, + unique_database): + """Check that a single unbounded range partition gets printed correctly.""" + schema_builder = SchemaBuilder() + column_spec = schema_builder.add_column("id", INT64) + column_spec.nullable(False) + schema_builder.set_primary_keys(["id"]) + schema = schema_builder.build() + + name = unique_database + ".unbounded_range_table" + + try: + kudu_client.create_table(name, schema, + partitioning=Partitioning().set_range_partition_columns(["id"])) + kudu_table = kudu_client.table(name) + + impala_table_name = self.get_kudu_table_base_name(kudu_table.name) + props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name + cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name, + props)) + with self.drop_impala_table_after_context(cursor, impala_table_name): + cursor.execute("SHOW RANGE PARTITIONS %s" % impala_table_name) + assert cursor.description == [ + ('RANGE (id)', 'STRING', None, None, None, None, None)] + assert cursor.fetchall() == [('UNBOUNDED',)] + + finally: + if kudu_client.table_exists(name): + kudu_client.delete_table(name) + + class TestCreateExternalTable(KuduTestSuite): def test_implicit_table_props(self, cursor, kudu_client):