Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -155,14 +155,13 @@ private void testOptimization(String dataTableName, String dataTableFullName, St



String expected =
"CLIENT PARALLEL 1-WAY FULL SCAN OVER " + dataTableName + "\n" +
" SKIP-SCAN-JOIN TABLE 0\n" +
" CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableName + " \\['a'\\]\n" +
" SERVER FILTER BY FIRST KEY ONLY\n" +
" DYNAMIC SERVER FILTER BY \\(\"" + dataTableName + ".T_ID\", \"" + dataTableName + ".K1\", \"" + dataTableName + ".K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
String expected =
"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableName + " ['a']\n" +
" SERVER MERGE [0.K3]\n" +
" SERVER FILTER BY FIRST KEY ONLY";
Comment on lines +158 to +161
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we not need NO_INDEX_SERVER_MERGE with INDEX("dt" "it") for this to be changed?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With PHOENIX-6959 the behaviour is the same as 5.2, and these changes are taken from 5.2 branch.

I have added a single new test case for the NO_INDEX_SERVER_MERGE option below.

String actual = QueryUtil.getExplainPlan(rs);
assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
assertTrue("Expected:\n" + expected + "\nbut got\n" + actual,
actual.equals(expected));

rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
Expand All @@ -180,14 +179,13 @@ private void testOptimization(String dataTableName, String dataTableFullName, St
query = "SELECT /*+ INDEX(" + dataTableName + " " + indexTableName + ")*/ * FROM " + dataTableName +" where v1='a'";
rs = conn1.createStatement().executeQuery("EXPLAIN "+ query);

expected =
"CLIENT PARALLEL 1-WAY FULL SCAN OVER " + dataTableName + "\n" +
" SKIP-SCAN-JOIN TABLE 0\n" +
" CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableName + " \\['a'\\]\n" +
" SERVER FILTER BY FIRST KEY ONLY\n" +
" DYNAMIC SERVER FILTER BY \\(\"" + dataTableName + ".T_ID\", \"" + dataTableName + ".K1\", \"" + dataTableName + ".K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)";
expected =
"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableName + " ['a']\n" +
" SERVER MERGE [0.K3]\n" +
" SERVER FILTER BY FIRST KEY ONLY";
actual = QueryUtil.getExplainPlan(rs);
assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
assertTrue("Expected:\n" + expected + "\nbut got\n" + actual,
actual.equals(expected));

rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
Expand All @@ -207,16 +205,15 @@ private void testOptimization(String dataTableName, String dataTableFullName, St
query = "SELECT /*+ INDEX(" + dataTableName + " " + indexTableName + ")*/ * FROM " + dataTableName +" where v1='a' limit 1";
rs = conn1.createStatement().executeQuery("EXPLAIN "+ query);

expected =
"CLIENT PARALLEL 1-WAY FULL SCAN OVER " + dataTableName + "\n" +
"CLIENT 1 ROW LIMIT\n" +
" SKIP-SCAN-JOIN TABLE 0\n" +
" CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableName + " \\['a'\\]\n" +
" SERVER FILTER BY FIRST KEY ONLY\n" +
" DYNAMIC SERVER FILTER BY \\(\"" + dataTableName + ".T_ID\", \"" + dataTableName + ".K1\", \"" + dataTableName + ".K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)\n" +
" JOIN-SCANNER 1 ROW LIMIT";
expected =
"CLIENT SERIAL 1-WAY RANGE SCAN OVER " + indexTableName + " ['a']\n" +
" SERVER MERGE [0.K3]\n" +
" SERVER FILTER BY FIRST KEY ONLY\n" +
" SERVER 1 ROW LIMIT\n" +
"CLIENT 1 ROW LIMIT";
actual = QueryUtil.getExplainPlan(rs);
assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));
assertTrue("Expected:\n" + expected + "\nbut got\n" + actual,
actual.equals(expected));

rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
Expand Down Expand Up @@ -257,7 +254,44 @@ private void testOptimization(String dataTableName, String dataTableFullName, St
assertEquals(4, rs.getInt("k3"));
assertEquals("z", rs.getString("V1"));
assertFalse(rs.next());


//Same as above, but with SKIP-SCAN-JOIN hint
query = "SELECT /*+ INDEX(" + dataTableName + " " + indexTableName + "), NO_INDEX_SERVER_MERGE */ t_id, k1, k2, k3, V1 from " + dataTableFullName + " where v1<='z' and k3 > 1 order by V1,t_id";
rs = conn1.createStatement().executeQuery("EXPLAIN " + query);

expected =
"CLIENT PARALLEL 1-WAY FULL SCAN OVER " + dataTableName + "\n"
+ " SERVER FILTER BY K3 > 1\n"
+ " SERVER SORTED BY \\[" + dataTableName + "\\.V1, " + dataTableName + "\\.T_ID\\]\n"
+ "CLIENT MERGE SORT\n"
+ " SKIP-SCAN-JOIN TABLE 0\n"
+ " CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableName + " \\[\\*\\] - \\['z'\\]\n"
+ " SERVER FILTER BY FIRST KEY ONLY\n"
+ " DYNAMIC SERVER FILTER BY \\(\"" + dataTableName + "\\.T_ID\", \"" + dataTableName + "\\.K1\", \"" + dataTableName + "\\.K2\"\\) IN \\(\\(\\$\\d+\\.\\$\\d+, \\$\\d+\\.\\$\\d+, \\$\\d+\\.\\$\\d+\\)\\)";
actual = QueryUtil.getExplainPlan(rs);
assertTrue("Expected:\n" + expected + "\nbut got\n" + actual, Pattern.matches(expected, actual));

rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals("f", rs.getString("t_id"));
assertEquals(1, rs.getInt("k1"));
assertEquals(2, rs.getInt("k2"));
assertEquals(3, rs.getInt("k3"));
assertEquals("a", rs.getString("V1"));
assertTrue(rs.next());
assertEquals("j", rs.getString("t_id"));
assertEquals(2, rs.getInt("k1"));
assertEquals(4, rs.getInt("k2"));
assertEquals(2, rs.getInt("k3"));
assertEquals("a", rs.getString("V1"));
assertTrue(rs.next());
assertEquals("b", rs.getString("t_id"));
assertEquals(1, rs.getInt("k1"));
assertEquals(2, rs.getInt("k2"));
assertEquals(4, rs.getInt("k3"));
assertEquals("z", rs.getString("V1"));
assertFalse(rs.next());

query = "SELECT /*+ INDEX(" + dataTableName + " " + indexTableName + ")*/ t_id, V1, k3 from " + dataTableFullName + " where v1 <='z' group by v1,t_id, k3";
rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
expected =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ private static void projectAllIndexColumns(StatementContext context, TableRef ta
int tableOffset = dataTable.getBucketNum() == null ? 0 : 1;
int minTablePKOffset = getMinPKOffset(dataTable, tenantId);
int minIndexPKOffset = getMinPKOffset(index, tenantId);
if (index.getIndexType() != IndexType.LOCAL) {
if (index.getIndexType() != IndexType.LOCAL && !isHintedGlobalIndex(tableRef)) {
if (index.getColumns().size()-minIndexPKOffset != dataTable.getColumns().size()-minTablePKOffset) {
// We'll end up not using this by the optimizer, so just throw
String schemaNameStr = dataTable.getSchemaName()==null?null:dataTable.getSchemaName().getString();
Expand Down Expand Up @@ -246,7 +246,7 @@ private static void projectAllIndexColumns(StatementContext context, TableRef ta
}
String colName = tableColumn.getName().getString();
String tableAlias = tableRef.getTableAlias();
if (resolveColumn) {
if (resolveColumn && !(ref instanceof IndexDataColumnRef)) {
try {
if (tableAlias != null) {
ref = resolver.resolveColumn(null, tableAlias, indexColName);
Expand Down Expand Up @@ -505,7 +505,15 @@ public static RowProjector compile(StatementContext context, SelectStatement sta
} else {
isProjectEmptyKeyValue = where == null || LiteralExpression.isTrue(where) || where.requiresFinalEvaluation();
for (byte[] family : projectedFamilies) {
projectColumnFamily(table, scan, family);
try {
if (table.getColumnFamily(family) != null) {
projectColumnFamily(table, scan, family);
}
} catch (ColumnFamilyNotFoundException e) {
if (!(tableRef.getTable().getIndexType() == IndexType.LOCAL || isHintedGlobalIndex(tableRef))) {
throw e;
}
Comment on lines +513 to +515
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for else case, would a DEBUG log be helpful?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is again synced from 5.2, which does not have a debug statement there.
This applies to all server merged columns, so it would generate a log line for each merged column, which sounds excessive.

}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -341,9 +341,22 @@ private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PT
if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE
|| (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
try {
if (select.getHint().hasHint(HintNode.Hint.NO_INDEX_SERVER_MERGE)) {
String schemaNameStr = index.getSchemaName() == null ? null
: index.getSchemaName().getString();
String tableNameStr = index.getTableName() == null ? null
: index.getTableName().getString();
throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*");
}
// translate nodes that match expressions that are indexed to the associated column parse node
indexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(index, null, statement.getConnection(), indexSelect.getUdfParseNodes()));
QueryCompiler compiler = new QueryCompiler(statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans);
SelectStatement rewrittenIndexSelect =
ParseNodeRewriter.rewrite(indexSelect,
new IndexExpressionParseNodeRewriter(index, null,
statement.getConnection(), indexSelect.getUdfParseNodes()));
QueryCompiler compiler =
new QueryCompiler(statement, rewrittenIndexSelect, resolver, targetColumns,
parallelIteratorFactory, dataPlan.getContext().getSequenceManager(),
isProjected, true, dataPlans);

QueryPlan plan = compiler.compile();

Expand All @@ -357,8 +370,10 @@ private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PT
if (plan.getProjector().getColumnCount() == nColumns) {
return plan;
} else if (index.getIndexType() == IndexType.GLOBAL) {
String schemaNameStr = index.getSchemaName()==null?null:index.getSchemaName().getString();
String tableNameStr = index.getTableName()==null?null:index.getTableName().getString();
String schemaNameStr = index.getSchemaName() == null ? null
: index.getSchemaName().getString();
String tableNameStr = index.getTableName() == null ? null
: index.getTableName().getString();
throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*");
}
}
Expand All @@ -369,6 +384,10 @@ private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PT
* otherwise we just don't use this index (as opposed to trying to join back from
* the index table to the data table.
*/
// Reset the state changes from the attempt above
indexTableRef.setHinted(false);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

without this reset, the compiler might still end up using uncovered index right?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We'd use the skip-scan-join plan with the projector for the server merge plan, which doesn't work.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For example we saw

Error: ERROR 504 (42703): Undefined column. columnName=BILLING_ORDER.0:PERIOD_END (state=42703,code=504)
org.apache.phoenix.schema.ColumnNotFoundException: ERROR 504 (42703): Undefined column. columnName=BILLING_ORDER.0:PERIOD_END

which would refer to the projected data table in the server merge plan, but the actual Table is unprojected data table in the skip-join-scan, which doesn't have a column of that name.

Resetting those flags makes sure that we generate and use the correct projector.

dataPlan.getContext().setUncoveredIndex(false);

SelectStatement dataSelect = (SelectStatement)dataPlan.getStatement();
ParseNode where = dataSelect.getWhere();
if (isHinted && where != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,10 @@ public enum Hint {
* Issue https://issues.apache.org/jira/browse/PHOENIX-4751.
*/
HASH_AGGREGATE,
/**
* Do not use server merge for hinted uncovered indexes
*/
NO_INDEX_SERVER_MERGE
};

private final Map<Hint,String> hints;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7022,4 +7022,40 @@ public void testUncoveredPhoenix6969() throws Exception {
}
}

@Test
public void testUncoveredPhoenix6984() throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl());
Statement stmt = conn.createStatement()) {
stmt.execute("CREATE TABLE D (\n" + "K1 CHAR(6) NOT NULL,\n"
+ "K2 VARCHAR(22) NOT NULL,\n"
+ "K3 CHAR(2) NOT NULL,\n"
+ "K4 VARCHAR(36) NOT NULL,\n"
+ "V1 TIMESTAMP,\n"
+ "V2 TIMESTAMP,\n"
+ "CONSTRAINT PK_BILLING_ORDER PRIMARY KEY (K1,K2,K3,K4))");

stmt.execute("CREATE INDEX I ON D(K2, K1, K3, K4)");
String query =
"SELECT /*+ INDEX(D I), NO_INDEX_SERVER_MERGE */ * "
+ "FROM D "
+ "WHERE K2 = 'XXX' AND "
+ "V2 >= TIMESTAMP '2023-05-31 23:59:59.000' AND "
+ "V1 <= TIMESTAMP '2023-04-01 00:00:00.000' "
+ "ORDER BY V2 asc";
ResultSet rs = stmt.executeQuery("EXPLAIN " + query);
String explainPlan = QueryUtil.getExplainPlan(rs);
assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER D\n"
+ " SERVER FILTER BY (V2 >= TIMESTAMP '2023-05-31 23:59:59.000'"
+ " AND V1 <= TIMESTAMP '2023-04-01 00:00:00.000')\n"
+ " SERVER SORTED BY [D.V2]\n"
+ "CLIENT MERGE SORT\n"
+ " SKIP-SCAN-JOIN TABLE 0\n"
+ " CLIENT PARALLEL 1-WAY RANGE SCAN OVER I ['XXX']\n"
+ " SERVER FILTER BY FIRST KEY ONLY\n"
+ " DYNAMIC SERVER FILTER BY (\"D.K1\", \"D.K2\", \"D.K3\", \"D.K4\")"
+ " IN (($2.$4, $2.$5, $2.$6, $2.$7))",
explainPlan);
}
}

}