From f1ffcf6b76a0a764df5529a03ba8dc42112dc5a9 Mon Sep 17 00:00:00 2001 From: Mahdi Salarkia Date: Fri, 27 Jul 2018 22:27:46 -0700 Subject: [PATCH 01/14] PHOENIX-3547 Supporting more number of indices per table. Currently the number of indices per Phoenix table is bound to maximum of 65535 (java.lang.Short) which is a limitation for applications requiring to have unlimited number of indices. This change will consider any new table created in Phoenix to support view index ids to be in the range of -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807 (java.lang.Long) which is undoubtably big enough to cover this requirement. Any existing Phoenix table will still continue to support only maximum of 65535 of indices. A new boolean column (USE_LONG_VIEW_INDEX BOOLEAN DEFAULT FALSE) is added to SYSTEM.CATALOG to specify each Phoenix table's support for large number of indices. On each new Phoenix table creation the value for USE_LONG_VIEW_INDEX will be set to `true` while this value would be false for any existing table. --- .../BaseTenantSpecificViewIndexIT.java | 4 +- .../apache/phoenix/end2end/BaseViewIT.java | 8 +- .../end2end/TenantSpecificViewIndexIT.java | 6 +- .../org/apache/phoenix/end2end/UpgradeIT.java | 2 +- .../org/apache/phoenix/end2end/ViewIT.java | 10 +- .../index/ChildViewsUseParentViewIndexIT.java | 4 +- .../index/GlobalIndexOptimizationIT.java | 2 +- .../phoenix/end2end/index/IndexUsageIT.java | 4 +- .../phoenix/end2end/index/LocalIndexIT.java | 2 +- .../end2end/index/MutableIndexFailureIT.java | 4 +- .../phoenix/end2end/index/ViewIndexIT.java | 6 +- .../IndexHalfStoreFileReaderGenerator.java | 3 +- .../phoenix/compile/CreateTableCompiler.java | 3 +- .../phoenix/compile/DeleteCompiler.java | 2 +- .../apache/phoenix/compile/FromCompiler.java | 2 +- .../apache/phoenix/compile/JoinCompiler.java | 2 +- .../compile/TupleProjectionCompiler.java | 4 +- .../apache/phoenix/compile/UnionCompiler.java | 2 +- .../phoenix/compile/UpsertCompiler.java | 2 +- .../phoenix/compile/WhereOptimizer.java | 2 +- .../coprocessor/MetaDataEndpointImpl.java | 63 ++- .../phoenix/coprocessor/MetaDataProtocol.java | 44 +- .../coprocessor/generated/MetaDataProtos.java | 471 ++++++++++++------ .../coprocessor/generated/PTableProtos.java | 157 ++++-- .../generated/ServerCachingProtos.java | 128 ++++- .../apache/phoenix/index/IndexMaintainer.java | 14 +- .../index/PhoenixIndexFailurePolicy.java | 2 +- .../apache/phoenix/iterate/ExplainTable.java | 12 +- .../phoenix/jdbc/PhoenixDatabaseMetaData.java | 2 + .../ConnectionlessQueryServicesImpl.java | 2 +- .../apache/phoenix/query/QueryConstants.java | 3 +- .../apache/phoenix/schema/DelegateTable.java | 8 +- .../apache/phoenix/schema/MetaDataClient.java | 21 +- .../org/apache/phoenix/schema/PTable.java | 3 +- .../org/apache/phoenix/schema/PTableImpl.java | 47 +- .../org/apache/phoenix/util/MetaDataUtil.java | 4 + .../TenantSpecificViewIndexCompileTest.java | 6 +- .../phoenix/execute/CorrelatePlanTest.java | 2 +- .../LiteralResultIteratorPlanTest.java | 2 +- .../src/main/MetaDataService.proto | 6 +- phoenix-protocol/src/main/PTable.proto | 3 +- .../src/main/ServerCachingService.proto | 1 + 42 files changed, 772 insertions(+), 303 deletions(-) diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java index 9bd689ef0aa..26e28608cb5 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java @@ -148,9 +148,9 @@ private void createAndVerifyIndex(Connection conn, String viewName, String table + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs)); } else { String expected = saltBuckets == null ? - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName + " [-32768,'" + tenantId + "','" + valuePrefix + "v2-1']\n" + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName + " [-9223372036854775808,'" + tenantId + "','" + valuePrefix + "v2-1']\n" + " SERVER FILTER BY FIRST KEY ONLY" : - "CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName + " [0,-32768,'" + tenantId + "','" + valuePrefix + "v2-1'] - ["+(saltBuckets.intValue()-1)+",-32768,'" + tenantId + "','" + valuePrefix + "v2-1']\n" + "CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName + " [0,-9223372036854775808,'" + tenantId + "','" + valuePrefix + "v2-1'] - ["+(saltBuckets.intValue()-1)+",-9223372036854775808,'" + tenantId + "','" + valuePrefix + "v2-1']\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT"; diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java index 53abf77ea1c..f081a3ba649 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java @@ -196,8 +196,8 @@ protected Pair testUpdatableViewIndex(Integer saltBuckets, boolean queryPlan); } else { assertEquals(saltBuckets == null - ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + Short.MIN_VALUE + ",51]" - : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + Short.MIN_VALUE + ",51] - ["+(saltBuckets.intValue()-1)+"," + Short.MIN_VALUE + ",51]\nCLIENT MERGE SORT", + ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + Long.MIN_VALUE + ",51]" + : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + Long.MIN_VALUE + ",51] - ["+(saltBuckets.intValue()-1)+"," + Long.MIN_VALUE + ",51]\nCLIENT MERGE SORT", queryPlan); } @@ -237,9 +237,9 @@ protected Pair testUpdatableViewIndex(Integer saltBuckets, boolean } else { physicalTableName = viewIndexPhysicalName; assertEquals(saltBuckets == null - ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n" + ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + (Long.MIN_VALUE+1) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY" - : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + (Short.MIN_VALUE+1) + ",'foo'] - ["+(saltBuckets.intValue()-1)+"," + (Short.MIN_VALUE+1) + ",'foo']\n" + : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + (Long.MIN_VALUE+1) + ",'foo'] - ["+(saltBuckets.intValue()-1)+"," + (Long.MIN_VALUE+1) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs)); diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java index 31f356959a5..a3176937220 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java @@ -130,8 +130,8 @@ private void testMultiCFViewIndex(boolean localIndex, boolean isNamespaceEnabled String sequenceNameA = getViewIndexSequenceName(PNameFactory.newName(tableName), PNameFactory.newName(tenantId2), isNamespaceEnabled); String sequenceNameB = getViewIndexSequenceName(PNameFactory.newName(tableName), PNameFactory.newName(tenantId1), isNamespaceEnabled); String sequenceSchemaName = getViewIndexSequenceSchemaName(PNameFactory.newName(tableName), isNamespaceEnabled); - verifySequenceValue(isNamespaceEnabled? tenantId2 : null, sequenceNameA, sequenceSchemaName, -32767); - verifySequenceValue(isNamespaceEnabled? tenantId1 : null, sequenceNameB, sequenceSchemaName, -32767); + verifySequenceValue(isNamespaceEnabled? tenantId2 : null, sequenceNameA, sequenceSchemaName, -9223372036854775807L); + verifySequenceValue(isNamespaceEnabled? tenantId1 : null, sequenceNameB, sequenceSchemaName, -9223372036854775807L); Properties props = new Properties(); props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId2); @@ -207,7 +207,7 @@ private void createViewAndIndexesWithTenantId(String tableName, String viewName, assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + Bytes.toString(MetaDataUtil.getViewIndexPhysicalName( SchemaUtil.getPhysicalTableName(Bytes.toBytes(tableName), isNamespaceMapped).toBytes())) - + " [-32768,'" + tenantId + "','f']\n" + " SERVER FILTER BY FIRST KEY ONLY", + + " [-9223372036854775808,'" + tenantId + "','f']\n" + " SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs)); } diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java index 80c92ea4a9a..b0786e68044 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java @@ -192,7 +192,7 @@ public void testMapTableToNamespaceDuringUpgrade() + " IS NULL AND " + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " = '" + newSchemaName + "' AND " + PhoenixDatabaseMetaData.SEQUENCE_NAME + "='" + newSequenceName + "'"); assertTrue(rs.next()); - assertEquals("-32765", rs.getString(1)); + assertEquals("-9223372036854775805", rs.getString(1)); rs = phxConn.createStatement().executeQuery("SELECT " + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," + PhoenixDatabaseMetaData.CURRENT_VALUE + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " WHERE " + PhoenixDatabaseMetaData.TENANT_ID diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java index 558b92e5ce4..1821e9e5b38 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java @@ -1191,9 +1191,9 @@ public static Pair testUpdatableViewIndex(String fullTableName, In queryPlan); } else { assertEquals(saltBuckets == null - ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [" + Short.MIN_VALUE + ",51]" + ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [" + Long.MIN_VALUE + ",51]" : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," - + Short.MIN_VALUE + ",51] - [" + (saltBuckets.intValue() - 1) + "," + Short.MIN_VALUE + + Long.MIN_VALUE + ",51] - [" + (saltBuckets.intValue() - 1) + "," + Long.MIN_VALUE + ",51]\nCLIENT MERGE SORT", queryPlan); } @@ -1235,10 +1235,10 @@ public static Pair testUpdatableViewIndex(String fullTableName, In assertEquals( saltBuckets == null ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [" - + (Short.MIN_VALUE + 1) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY" + + (Long.MIN_VALUE + 1) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY" : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName - + " [0," + (Short.MIN_VALUE + 1) + ",'foo'] - [" + (saltBuckets.intValue() - 1) - + "," + (Short.MIN_VALUE + 1) + ",'foo']\n" + + " [0," + (Long.MIN_VALUE + 1) + ",'foo'] - [" + (saltBuckets.intValue() - 1) + + "," + (Long.MIN_VALUE + 1) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs)); } diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ChildViewsUseParentViewIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ChildViewsUseParentViewIndexIT.java index f3ee2cb16d7..54528a940af 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ChildViewsUseParentViewIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ChildViewsUseParentViewIndexIT.java @@ -161,7 +161,7 @@ private void assertQueryUsesIndex(final String baseTableName, final String viewN ResultSet rs = conn.prepareStatement("EXPLAIN " + sql).executeQuery(); String childViewScanKey = isChildView ? ",'Y'" : ""; assertEquals( - "CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 KEYS OVER _IDX_" + baseTableName + " [-32768,'1'" + childViewScanKey + "] - [-32768,'3'" + childViewScanKey + "]\n" + + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 KEYS OVER _IDX_" + baseTableName + " [-9223372036854775808,'1'" + childViewScanKey + "] - [-9223372036854775808,'3'" + childViewScanKey + "]\n" + " SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs)); @@ -264,7 +264,7 @@ private void assertQueryIndex(String viewName, String baseTableName, Connection " ORDER BY WO_ID, A_DATE DESC"; ResultSet rs = conn.prepareStatement("EXPLAIN " + sql).executeQuery(); assertEquals( - "CLIENT PARALLEL 1-WAY SKIP SCAN ON 5 RANGES OVER _IDX_" + baseTableName + " [-32768,'00Dxxxxxxxxxxx1','003xxxxxxxxxxx1',*] - [-32768,'00Dxxxxxxxxxxx1','003xxxxxxxxxxx5',~'2016-01-01 06:00:00.000']\n" + + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 5 RANGES OVER _IDX_" + baseTableName + " [-9223372036854775808,'00Dxxxxxxxxxxx1','003xxxxxxxxxxx1',*] - [-9223372036854775808,'00Dxxxxxxxxxxx1','003xxxxxxxxxxx5',~'2016-01-01 06:00:00.000']\n" + " SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs)); diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java index 51e9de9e5f9..09003b05346 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java @@ -344,7 +344,7 @@ public void testGlobalIndexOptimizationOnSharedIndex() throws Exception { "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + dataTableName + "\n" + " SERVER FILTER BY V1 = 'a'\n" + " SKIP-SCAN-JOIN TABLE 0\n" + - " CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER _IDX_" + dataTableName + " \\[-32768,1\\] - \\[-32768,2\\]\n" + + " CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER _IDX_" + dataTableName + " \\[-9223372036854775808,1\\] - \\[-9223372036854775808,2\\]\n" + " SERVER FILTER BY FIRST KEY ONLY AND \"K2\" IN \\(3,4\\)\n" + " DYNAMIC SERVER FILTER BY \\(\"" + viewName + ".T_ID\", \"" + viewName + ".K1\", \"" + viewName + ".K2\"\\) IN \\(\\(\\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+, \\$\\d+.\\$\\d+\\)\\)"; assertTrue("Expected:\n" + expected + "\ndid not match\n" + actual, Pattern.matches(expected,actual)); diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java index 14b569ad7a0..f114010c660 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java @@ -496,7 +496,7 @@ private void helpTestUpdatableViewIndex(boolean local) throws Exception { assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + dataTableName + " [1,173]\n" + "CLIENT MERGE SORT", queryPlan); } else { - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + dataTableName + " [" + Short.MIN_VALUE + ",173]", queryPlan); + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + dataTableName + " [" + Long.MIN_VALUE + ",173]", queryPlan); } rs = conn.createStatement().executeQuery(query); assertTrue(rs.next()); @@ -516,7 +516,7 @@ private void helpTestUpdatableViewIndex(boolean local) throws Exception { + ",'foo2_bar2']\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs)); } else { - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + dataTableName + " [" + (Short.MIN_VALUE + 1) + ",'foo2_bar2']\n" + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + dataTableName + " [" + (Long.MIN_VALUE + 1) + ",'foo2_bar2']\n" + " SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs)); } rs = conn.createStatement().executeQuery(query); diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java index 41616f2822c..5ae964553ab 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java @@ -201,7 +201,7 @@ public void testDropLocalIndexTable() throws Exception { Connection conn1 = getConnection(); Connection conn2 = getConnection(); conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)"); - verifySequenceValue(null, sequenceName, sequenceSchemaName,-32767); + verifySequenceValue(null, sequenceName, sequenceSchemaName,-9223372036854775807L); conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next(); conn1.createStatement().execute("DROP TABLE "+ tableName); verifySequenceNotExists(null, sequenceName, sequenceSchemaName); diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java index dbda4e8f82c..b5279be3f3f 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java @@ -588,9 +588,9 @@ public void preBatchMutate(ObserverContext c, Mini if (Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { int regionStartKeyLen = c.getEnvironment().getRegionInfo().getStartKey().length; Cell firstCell = entry.getValue().get(0); - short indexId = MetaDataUtil.getViewIndexIdDataType().getCodec().decodeShort(firstCell.getRowArray(), firstCell.getRowOffset() + regionStartKeyLen, SortOrder.getDefault()); + long indexId = MetaDataUtil.getViewIndexIdDataType().getCodec().decodeLong(firstCell.getRowArray(), firstCell.getRowOffset() + regionStartKeyLen, SortOrder.getDefault()); // Only throw for first local index as the test may have multiple local indexes - if (indexId == Short.MIN_VALUE) { + if (indexId == Long.MIN_VALUE) { throwException = true; break; } diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java index 3ce6ae59809..11cfc5c930f 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java @@ -124,10 +124,10 @@ public void testDeleteViewIndexSequences() throws Exception { String sequenceSchemaName = getViewIndexSequenceSchemaName(PNameFactory.newName(fullTableName), isNamespaceMapped); String seqName = getViewIndexSequenceName(PNameFactory.newName(fullTableName), null, !isNamespaceMapped); String seqSchemaName = getViewIndexSequenceSchemaName(PNameFactory.newName(fullTableName), !isNamespaceMapped); - verifySequenceValue(null, sequenceName, sequenceSchemaName, -32767); - verifySequenceValue(null, sequenceName, sequenceSchemaName, -32767); + verifySequenceValue(null, sequenceName, sequenceSchemaName, -9223372036854775807L); + verifySequenceValue(null, sequenceName, sequenceSchemaName, -9223372036854775807L); conn1.createStatement().execute("CREATE INDEX " + indexName + "_2 ON " + fullViewName + " (v1)"); - verifySequenceValue(null, sequenceName, sequenceSchemaName, -32766); + verifySequenceValue(null, sequenceName, sequenceSchemaName, -9223372036854775806L); // Check other format of sequence is not there as Sequences format is different for views/indexes created on // table which are namespace mapped and which are not. verifySequenceNotExists(null, seqName, seqSchemaName); diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java index a72aa921ba3..6b863f7c881 100644 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java +++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java @@ -164,8 +164,7 @@ public StoreFileReader preStoreFileReaderOpen(ObserverContext emptyList(), false, Collections. emptyList(), null, null, false, false, - false, null, null, null, false, null, 0, 0L, SchemaUtil + false, null, null, null, null, false, null, 0, 0L, SchemaUtil .isNamespaceMappingEnabled(PTableType.SUBQUERY, connection.getQueryServices().getProps()), null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, true); String alias = subselectNode.getAlias(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java index 824d933d683..36bfc5fdc23 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java @@ -1275,7 +1275,7 @@ public static PTable joinProjectedTables(PTable left, PTable right, JoinType typ left.getType(), left.getIndexState(), left.getTimeStamp(), left.getSequenceNumber(), left.getPKName(), left.getBucketNum(), merged, left.getParentSchemaName(), left.getParentTableName(), left.getIndexes(), left.isImmutableRows(), Collections. emptyList(), null, null, PTable.DEFAULT_DISABLE_WAL, - left.isMultiTenant(), left.getStoreNulls(), left.getViewType(), left.getViewIndexId(), + left.isMultiTenant(), left.getStoreNulls(), left.getViewType(), left.getViewIndexType(), left.getViewIndexId(), left.getIndexType(), left.rowKeyOrderOptimizable(), left.getTransactionProvider(), left.getUpdateCacheFrequency(), left.getIndexDisableTimestamp(), left.isNamespaceMapped(), left.getAutoPartitionSeqName(), left.isAppendOnlySchema(), ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, left.useStatsForParallelization()); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java index 91be356c7e9..a2937bfb6a3 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java @@ -166,7 +166,7 @@ public static PTable createProjectedTable(SelectStatement select, StatementConte table.getPKName(), table.getBucketNum(), projectedColumns, table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(), table.isImmutableRows(), Collections. emptyList(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), - table.getViewIndexId(), + table.getViewIndexType(), table.getViewIndexId(), table.getIndexType(), table.rowKeyOrderOptimizable(), table.getTransactionProvider(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization()); } @@ -198,7 +198,7 @@ public static PTable createProjectedTable(TableRef tableRef, List sou table.getBucketNum(), projectedColumns, null, null, Collections. emptyList(), table.isImmutableRows(), Collections. emptyList(), null, null, table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), - table.getViewIndexId(), null, table.rowKeyOrderOptimizable(), table.getTransactionProvider(), + table.getViewIndexType(), table.getViewIndexId(), null, table.rowKeyOrderOptimizable(), table.getTransactionProvider(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), table.getEncodingScheme(), cqCounter, table.useStatsForParallelization()); } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java index c94634bb822..6327ec6310d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java @@ -100,7 +100,7 @@ public static TableRef contructSchemaTable(PhoenixStatement statement, List TABLE_KV_COLUMNS = Arrays.asList( EMPTY_KEYVALUE_KV, @@ -350,6 +355,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr MULTI_TENANT_KV, VIEW_TYPE_KV, VIEW_INDEX_ID_KV, + USE_LONG_VIEW_INDEX_ID_KV, INDEX_TYPE_KV, INDEX_DISABLE_TIMESTAMP_KV, STORE_NULLS_KV, @@ -382,6 +388,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr private static final int DISABLE_WAL_INDEX = TABLE_KV_COLUMNS.indexOf(DISABLE_WAL_KV); private static final int MULTI_TENANT_INDEX = TABLE_KV_COLUMNS.indexOf(MULTI_TENANT_KV); private static final int VIEW_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_TYPE_KV); + private static final int USE_LONG_VIEW_INDEX = TABLE_KV_COLUMNS.indexOf(USE_LONG_VIEW_INDEX_ID_KV); private static final int VIEW_INDEX_ID_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_KV); private static final int INDEX_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_TYPE_KV); private static final int STORE_NULLS_INDEX = TABLE_KV_COLUMNS.indexOf(STORE_NULLS_KV); @@ -1349,8 +1356,8 @@ private PTable getTable(RegionScanner scanner, long clientTimeStamp, long tableT } Cell viewTypeKv = tableKeyValues[VIEW_TYPE_INDEX]; ViewType viewType = viewTypeKv == null ? null : ViewType.fromSerializedValue(viewTypeKv.getValueArray()[viewTypeKv.getValueOffset()]); - Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX]; - Short viewIndexId = viewIndexIdKv == null ? null : (Short)MetaDataUtil.getViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(), viewIndexIdKv.getValueOffset(), SortOrder.getDefault()); + PDataType viewIndexType = getViewIndexType(tableKeyValues); + Long viewIndexId = getViewIndexId(tableKeyValues, viewIndexType); Cell indexTypeKv = tableKeyValues[INDEX_TYPE_INDEX]; IndexType indexType = indexTypeKv == null ? null : IndexType.fromSerializedValue(indexTypeKv.getValueArray()[indexTypeKv.getValueOffset()]); Cell baseColumnCountKv = tableKeyValues[BASE_COLUMN_COUNT_INDEX]; @@ -1431,11 +1438,42 @@ private PTable getTable(RegionScanner scanner, long clientTimeStamp, long tableT // server while holding this lock is a bad idea and likely to cause contention. return PTableImpl.makePTable(tenantId, schemaName, tableName, tableType, indexState, timeStamp, tableSeqNum, pkName, saltBucketNum, columns, parentSchemaName, parentTableName, indexes, isImmutableRows, physicalTables, defaultFamilyName, - viewStatement, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, + viewStatement, disableWAL, multiTenant, storeNulls, viewType, viewIndexType, viewIndexId, indexType, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, baseColumnCount, indexDisableTimestamp, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, storageScheme, encodingScheme, cqCounter, useStatsForParallelization); } + private Long getViewIndexId(Cell[] tableKeyValues, PDataType viewIndexType) { + Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX]; + return viewIndexIdKv == null ? null : + decodeViewIndexId(viewIndexIdKv, viewIndexType); + } + /** + * check the value for {@value USE_LONG_VIEW_INDEX} and if its present consider viewIndexId as long otherwise + * read as short and convert it to long + * + * @param tableKeyValues + * @param viewIndexType + * @return + */ + private Long decodeViewIndexId(Cell viewIndexIdKv, PDataType viewIndexType) { + boolean useLongViewIndex = MetaDataUtil.getViewIndexIdDataType().equals(viewIndexType); + return new Long( + useLongViewIndex + ? viewIndexType.getCodec().decodeLong(viewIndexIdKv.getValueArray(), + viewIndexIdKv.getValueOffset(), SortOrder.getDefault()) + : MetaDataUtil.getLegacyViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(), + viewIndexIdKv.getValueOffset(), SortOrder.getDefault()) + ); + } + + private PDataType getViewIndexType(Cell[] tableKeyValues) { + Cell useLongViewIndexKv = tableKeyValues[USE_LONG_VIEW_INDEX]; + boolean useLongViewIndex = useLongViewIndexKv != null; + return useLongViewIndex ? + MetaDataUtil.getViewIndexIdDataType() + : MetaDataUtil.getLegacyViewIndexIdDataType(); + } private boolean isQualifierCounterKV(Cell kv) { int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), @@ -2160,7 +2198,7 @@ public void createTable(RpcController controller, CreateTableRequest request, cell.getTimestamp(), bytes, 0, bytes.length, cell.getType()); cells.add(viewConstantCell); } - Short indexId = null; + Long indexId = null; if (request.hasAllocateIndexId() && request.getAllocateIndexId()) { String tenantIdStr = tenantIdBytes.length == 0 ? null : Bytes.toString(tenantIdBytes); try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class)) { @@ -2174,7 +2212,7 @@ public void createTable(RpcController controller, CreateTableRequest request, long sequenceTimestamp = HConstants.LATEST_TIMESTAMP; try { connection.getQueryServices().createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(), - Short.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, sequenceTimestamp); + Long.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, sequenceTimestamp); } catch (SequenceAlreadyExistsException e) { } long[] seqValues = new long[1]; @@ -2185,7 +2223,7 @@ public void createTable(RpcController controller, CreateTableRequest request, throw sqlExceptions[0]; } long seqValue = seqValues[0]; - if (seqValue > Short.MAX_VALUE) { + if (seqValue > Long.MAX_VALUE) { builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES); builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); done.run(builder.build()); @@ -2208,7 +2246,7 @@ public void createTable(RpcController controller, CreateTableRequest request, VIEW_INDEX_ID_BYTES.length, cell.getTimestamp(), bytes, 0, bytes.length, cell.getType()); cells.add(indexIdCell); - indexId = (short) seqValue; + indexId = (long) seqValue; } } @@ -2285,6 +2323,7 @@ public void createTable(RpcController controller, CreateTableRequest request, builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); if (indexId != null) { builder.setViewIndexId(indexId); + builder.setUseLongViewIndexId(true); } builder.setMutationTime(currentTimeStamp); done.run(builder.build()); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java index 874a382bea5..611c466b328 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java @@ -38,11 +38,13 @@ import org.apache.phoenix.schema.PNameFactory; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableImpl; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.util.ByteUtil; import com.google.common.base.Function; import com.google.common.collect.Lists; import com.google.protobuf.ByteString; +import org.apache.phoenix.util.MetaDataUtil; /** * @@ -175,7 +177,8 @@ public static class SharedTableState { private PName tableName; private List columns; private List physicalNames; - private Short viewIndexId; + private PDataType viewIndexType; + private Long viewIndexId; public SharedTableState(PTable table) { this.tenantId = table.getTenantId(); @@ -183,6 +186,7 @@ public SharedTableState(PTable table) { this.tableName = table.getTableName(); this.columns = table.getColumns(); this.physicalNames = table.getPhysicalNames(); + this.viewIndexType = table.getViewIndexType(); this.viewIndexId = table.getViewIndexId(); } @@ -205,7 +209,10 @@ public PName apply(ByteString physicalName) { return PNameFactory.newName(physicalName.toByteArray()); } }); - this.viewIndexId = (short)sharedTable.getViewIndexId(); + this.viewIndexId = sharedTable.getViewIndexId(); + this.viewIndexType = sharedTable.hasUseLongViewIndexId() + ? MetaDataUtil.getViewIndexIdDataType() + : MetaDataUtil.getLegacyViewIndexIdDataType(); } public PName getTenantId() { @@ -228,10 +235,13 @@ public List getPhysicalNames() { return physicalNames; } - public Short getViewIndexId() { + public Long getViewIndexId() { return viewIndexId; } - + + public PDataType getViewIndexType() { + return viewIndexType; + } } public static class MetaDataMutationResult { @@ -244,8 +254,8 @@ public static class MetaDataMutationResult { private byte[] familyName; private boolean wasUpdated; private PSchema schema; - private Short viewIndexId; - + private Long viewIndexId; + private PDataType viewIndexType; private List functions = new ArrayList(1); private long autoPartitionNum; @@ -290,9 +300,10 @@ public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable this.tableNamesToDelete = tableNamesToDelete; } - public MetaDataMutationResult(MutationCode returnCode, int currentTime, PTable table, int viewIndexId) { + public MetaDataMutationResult(MutationCode returnCode, int currentTime, PTable table, long viewIndexId, PDataType viewIndexType ) { this(returnCode, currentTime, table, Collections. emptyList()); - this.viewIndexId = (short)viewIndexId; + this.viewIndexId = viewIndexId; + this.viewIndexType = viewIndexType; } public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, List tableNamesToDelete, List sharedTablesToDelete) { @@ -348,10 +359,14 @@ public long getAutoPartitionNum() { return autoPartitionNum; } - public Short getViewIndexId() { + public Long getViewIndexId() { return viewIndexId; } + public PDataType getViewIndexType() { + return viewIndexType; + } + public static MetaDataMutationResult constructFromProto(MetaDataResponse proto) { MetaDataMutationResult result = new MetaDataMutationResult(); result.returnCode = MutationCode.values()[proto.getReturnCode().ordinal()]; @@ -393,9 +408,12 @@ public static MetaDataMutationResult constructFromProto(MetaDataResponse proto) if (proto.hasAutoPartitionNum()) { result.autoPartitionNum = proto.getAutoPartitionNum(); } - if (proto.hasViewIndexId()) { - result.viewIndexId = (short)proto.getViewIndexId(); - } + if (proto.hasViewIndexId()) { + result.viewIndexId = proto.getViewIndexId(); + } + result.viewIndexType = proto.hasUseLongViewIndexId() + ? MetaDataUtil.getViewIndexIdDataType() + : MetaDataUtil.getLegacyViewIndexIdDataType(); return result; } @@ -436,6 +454,7 @@ public static MetaDataResponse toProto(MetaDataMutationResult result) { sharedTableStateBuilder.setSchemaName(ByteStringer.wrap(sharedTableState.getSchemaName().getBytes())); sharedTableStateBuilder.setTableName(ByteStringer.wrap(sharedTableState.getTableName().getBytes())); sharedTableStateBuilder.setViewIndexId(sharedTableState.getViewIndexId()); + sharedTableStateBuilder.setUseLongViewIndexId(MetaDataUtil.getViewIndexIdDataType().equals(sharedTableState.viewIndexType)); builder.addSharedTablesToDelete(sharedTableStateBuilder.build()); } } @@ -445,6 +464,7 @@ public static MetaDataResponse toProto(MetaDataMutationResult result) { builder.setAutoPartitionNum(result.getAutoPartitionNum()); if (result.getViewIndexId() != null) { builder.setViewIndexId(result.getViewIndexId()); + builder.setUseLongViewIndexId(MetaDataUtil.getViewIndexIdDataType().equals(result.getViewIndexType())); } } return builder.build(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java index 744dc7ec16d..fd54ce8c37e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java @@ -369,15 +369,25 @@ org.apache.phoenix.coprocessor.generated.PTableProtos.PColumnOrBuilder getColumn */ com.google.protobuf.ByteString getPhysicalNames(int index); - // required int32 viewIndexId = 6; + // required int64 viewIndexId = 6; /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ boolean hasViewIndexId(); /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ - int getViewIndexId(); + long getViewIndexId(); + + // optional bool useLongViewIndexId = 7; + /** + * optional bool useLongViewIndexId = 7; + */ + boolean hasUseLongViewIndexId(); + /** + * optional bool useLongViewIndexId = 7; + */ + boolean getUseLongViewIndexId(); } /** * Protobuf type {@code SharedTableState} @@ -463,7 +473,12 @@ private SharedTableState( } case 48: { bitField0_ |= 0x00000008; - viewIndexId_ = input.readInt32(); + viewIndexId_ = input.readInt64(); + break; + } + case 56: { + bitField0_ |= 0x00000010; + useLongViewIndexId_ = input.readBool(); break; } } @@ -619,29 +634,46 @@ public com.google.protobuf.ByteString getPhysicalNames(int index) { return physicalNames_.get(index); } - // required int32 viewIndexId = 6; + // required int64 viewIndexId = 6; public static final int VIEWINDEXID_FIELD_NUMBER = 6; - private int viewIndexId_; + private long viewIndexId_; /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ public boolean hasViewIndexId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ - public int getViewIndexId() { + public long getViewIndexId() { return viewIndexId_; } + // optional bool useLongViewIndexId = 7; + public static final int USELONGVIEWINDEXID_FIELD_NUMBER = 7; + private boolean useLongViewIndexId_; + /** + * optional bool useLongViewIndexId = 7; + */ + public boolean hasUseLongViewIndexId() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool useLongViewIndexId = 7; + */ + public boolean getUseLongViewIndexId() { + return useLongViewIndexId_; + } + private void initFields() { tenantId_ = com.google.protobuf.ByteString.EMPTY; schemaName_ = com.google.protobuf.ByteString.EMPTY; tableName_ = com.google.protobuf.ByteString.EMPTY; columns_ = java.util.Collections.emptyList(); physicalNames_ = java.util.Collections.emptyList(); - viewIndexId_ = 0; + viewIndexId_ = 0L; + useLongViewIndexId_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -689,7 +721,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeBytes(5, physicalNames_.get(i)); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt32(6, viewIndexId_); + output.writeInt64(6, viewIndexId_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(7, useLongViewIndexId_); } getUnknownFields().writeTo(output); } @@ -727,7 +762,11 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeInt32Size(6, viewIndexId_); + .computeInt64Size(6, viewIndexId_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(7, useLongViewIndexId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -776,6 +815,11 @@ public boolean equals(final java.lang.Object obj) { result = result && (getViewIndexId() == other.getViewIndexId()); } + result = result && (hasUseLongViewIndexId() == other.hasUseLongViewIndexId()); + if (hasUseLongViewIndexId()) { + result = result && (getUseLongViewIndexId() + == other.getUseLongViewIndexId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -811,7 +855,11 @@ public int hashCode() { } if (hasViewIndexId()) { hash = (37 * hash) + VIEWINDEXID_FIELD_NUMBER; - hash = (53 * hash) + getViewIndexId(); + hash = (53 * hash) + hashLong(getViewIndexId()); + } + if (hasUseLongViewIndexId()) { + hash = (37 * hash) + USELONGVIEWINDEXID_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getUseLongViewIndexId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; @@ -937,8 +985,10 @@ public Builder clear() { } physicalNames_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000010); - viewIndexId_ = 0; + viewIndexId_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); + useLongViewIndexId_ = false; + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -997,6 +1047,10 @@ public org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState to_bitField0_ |= 0x00000008; } result.viewIndexId_ = viewIndexId_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000010; + } + result.useLongViewIndexId_ = useLongViewIndexId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1061,6 +1115,9 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos if (other.hasViewIndexId()) { setViewIndexId(other.getViewIndexId()); } + if (other.hasUseLongViewIndexId()) { + setUseLongViewIndexId(other.getUseLongViewIndexId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -1526,35 +1583,68 @@ public Builder clearPhysicalNames() { return this; } - // required int32 viewIndexId = 6; - private int viewIndexId_ ; + // required int64 viewIndexId = 6; + private long viewIndexId_ ; /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ public boolean hasViewIndexId() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ - public int getViewIndexId() { + public long getViewIndexId() { return viewIndexId_; } /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ - public Builder setViewIndexId(int value) { + public Builder setViewIndexId(long value) { bitField0_ |= 0x00000020; viewIndexId_ = value; onChanged(); return this; } /** - * required int32 viewIndexId = 6; + * required int64 viewIndexId = 6; */ public Builder clearViewIndexId() { bitField0_ = (bitField0_ & ~0x00000020); - viewIndexId_ = 0; + viewIndexId_ = 0L; + onChanged(); + return this; + } + + // optional bool useLongViewIndexId = 7; + private boolean useLongViewIndexId_ ; + /** + * optional bool useLongViewIndexId = 7; + */ + public boolean hasUseLongViewIndexId() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool useLongViewIndexId = 7; + */ + public boolean getUseLongViewIndexId() { + return useLongViewIndexId_; + } + /** + * optional bool useLongViewIndexId = 7; + */ + public Builder setUseLongViewIndexId(boolean value) { + bitField0_ |= 0x00000040; + useLongViewIndexId_ = value; + onChanged(); + return this; + } + /** + * optional bool useLongViewIndexId = 7; + */ + public Builder clearUseLongViewIndexId() { + bitField0_ = (bitField0_ & ~0x00000040); + useLongViewIndexId_ = false; onChanged(); return this; } @@ -1725,15 +1815,25 @@ org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableStateOrBuilde */ long getAutoPartitionNum(); - // optional int32 viewIndexId = 12; + // optional int64 viewIndexId = 12; /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ boolean hasViewIndexId(); /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ - int getViewIndexId(); + long getViewIndexId(); + + // optional bool useLongViewIndexId = 13; + /** + * optional bool useLongViewIndexId = 13; + */ + boolean hasUseLongViewIndexId(); + /** + * optional bool useLongViewIndexId = 13; + */ + boolean getUseLongViewIndexId(); } /** * Protobuf type {@code MetaDataResponse} @@ -1874,7 +1974,12 @@ private MetaDataResponse( } case 96: { bitField0_ |= 0x00000100; - viewIndexId_ = input.readInt32(); + viewIndexId_ = input.readInt64(); + break; + } + case 104: { + bitField0_ |= 0x00000200; + useLongViewIndexId_ = input.readBool(); break; } } @@ -2161,22 +2266,38 @@ public long getAutoPartitionNum() { return autoPartitionNum_; } - // optional int32 viewIndexId = 12; + // optional int64 viewIndexId = 12; public static final int VIEWINDEXID_FIELD_NUMBER = 12; - private int viewIndexId_; + private long viewIndexId_; /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ public boolean hasViewIndexId() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ - public int getViewIndexId() { + public long getViewIndexId() { return viewIndexId_; } + // optional bool useLongViewIndexId = 13; + public static final int USELONGVIEWINDEXID_FIELD_NUMBER = 13; + private boolean useLongViewIndexId_; + /** + * optional bool useLongViewIndexId = 13; + */ + public boolean hasUseLongViewIndexId() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bool useLongViewIndexId = 13; + */ + public boolean getUseLongViewIndexId() { + return useLongViewIndexId_; + } + private void initFields() { returnCode_ = org.apache.phoenix.coprocessor.generated.MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS; mutationTime_ = 0L; @@ -2189,7 +2310,8 @@ private void initFields() { sharedTablesToDelete_ = java.util.Collections.emptyList(); schema_ = org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.getDefaultInstance(); autoPartitionNum_ = 0L; - viewIndexId_ = 0; + viewIndexId_ = 0L; + useLongViewIndexId_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2261,7 +2383,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeInt64(11, autoPartitionNum_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeInt32(12, viewIndexId_); + output.writeInt64(12, viewIndexId_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBool(13, useLongViewIndexId_); } getUnknownFields().writeTo(output); } @@ -2323,7 +2448,11 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream - .computeInt32Size(12, viewIndexId_); + .computeInt64Size(12, viewIndexId_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(13, useLongViewIndexId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -2399,6 +2528,11 @@ public boolean equals(final java.lang.Object obj) { result = result && (getViewIndexId() == other.getViewIndexId()); } + result = result && (hasUseLongViewIndexId() == other.hasUseLongViewIndexId()); + if (hasUseLongViewIndexId()) { + result = result && (getUseLongViewIndexId() + == other.getUseLongViewIndexId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -2458,7 +2592,11 @@ public int hashCode() { } if (hasViewIndexId()) { hash = (37 * hash) + VIEWINDEXID_FIELD_NUMBER; - hash = (53 * hash) + getViewIndexId(); + hash = (53 * hash) + hashLong(getViewIndexId()); + } + if (hasUseLongViewIndexId()) { + hash = (37 * hash) + USELONGVIEWINDEXID_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getUseLongViewIndexId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; @@ -2611,8 +2749,10 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000200); autoPartitionNum_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); - viewIndexId_ = 0; + viewIndexId_ = 0L; bitField0_ = (bitField0_ & ~0x00000800); + useLongViewIndexId_ = false; + bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -2708,6 +2848,10 @@ public org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse to_bitField0_ |= 0x00000100; } result.viewIndexId_ = viewIndexId_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000200; + } + result.useLongViewIndexId_ = useLongViewIndexId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -2813,6 +2957,9 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos if (other.hasViewIndexId()) { setViewIndexId(other.getViewIndexId()); } + if (other.hasUseLongViewIndexId()) { + setUseLongViewIndexId(other.getUseLongViewIndexId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -3860,35 +4007,68 @@ public Builder clearAutoPartitionNum() { return this; } - // optional int32 viewIndexId = 12; - private int viewIndexId_ ; + // optional int64 viewIndexId = 12; + private long viewIndexId_ ; /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ public boolean hasViewIndexId() { return ((bitField0_ & 0x00000800) == 0x00000800); } /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ - public int getViewIndexId() { + public long getViewIndexId() { return viewIndexId_; } /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ - public Builder setViewIndexId(int value) { + public Builder setViewIndexId(long value) { bitField0_ |= 0x00000800; viewIndexId_ = value; onChanged(); return this; } /** - * optional int32 viewIndexId = 12; + * optional int64 viewIndexId = 12; */ public Builder clearViewIndexId() { bitField0_ = (bitField0_ & ~0x00000800); - viewIndexId_ = 0; + viewIndexId_ = 0L; + onChanged(); + return this; + } + + // optional bool useLongViewIndexId = 13; + private boolean useLongViewIndexId_ ; + /** + * optional bool useLongViewIndexId = 13; + */ + public boolean hasUseLongViewIndexId() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * optional bool useLongViewIndexId = 13; + */ + public boolean getUseLongViewIndexId() { + return useLongViewIndexId_; + } + /** + * optional bool useLongViewIndexId = 13; + */ + public Builder setUseLongViewIndexId(boolean value) { + bitField0_ |= 0x00001000; + useLongViewIndexId_ = value; + onChanged(); + return this; + } + /** + * optional bool useLongViewIndexId = 13; + */ + public Builder clearUseLongViewIndexId() { + bitField0_ = (bitField0_ & ~0x00001000); + useLongViewIndexId_ = false; onChanged(); return this; } @@ -17647,104 +17827,105 @@ public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCac static { java.lang.String[] descriptorData = { "\n\025MetaDataService.proto\032\014PTable.proto\032\017P" + - "Function.proto\032\rPSchema.proto\"\222\001\n\020Shared" + + "Function.proto\032\rPSchema.proto\"\256\001\n\020Shared" + "TableState\022\020\n\010tenantId\030\001 \001(\014\022\022\n\nschemaNa" + "me\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\031\n\007columns\030\004" + " \003(\0132\010.PColumn\022\025\n\rphysicalNames\030\005 \003(\014\022\023\n" + - "\013viewIndexId\030\006 \002(\005\"\321\002\n\020MetaDataResponse\022" + - "!\n\nreturnCode\030\001 \001(\0162\r.MutationCode\022\024\n\014mu" + - "tationTime\030\002 \001(\003\022\026\n\005table\030\003 \001(\0132\007.PTable" + - "\022\026\n\016tablesToDelete\030\004 \003(\014\022\022\n\ncolumnName\030\005" + - " \001(\014\022\022\n\nfamilyName\030\006 \001(\014\022\024\n\014functionName", - "\030\007 \001(\014\022\034\n\010function\030\010 \003(\0132\n.PFunction\022/\n\024" + - "sharedTablesToDelete\030\t \003(\0132\021.SharedTable" + - "State\022\030\n\006schema\030\n \001(\0132\010.PSchema\022\030\n\020autoP" + - "artitionNum\030\013 \001(\003\022\023\n\013viewIndexId\030\014 \001(\005\"\364" + - "\001\n\017GetTableRequest\022\020\n\010tenantId\030\001 \002(\014\022\022\n\n" + - "schemaName\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\026\n\016t" + - "ableTimestamp\030\004 \002(\003\022\027\n\017clientTimestamp\030\005" + - " \002(\003\022\025\n\rclientVersion\030\006 \001(\005\022\037\n\027skipAddin" + - "gParentColumns\030\007 \001(\010\022\031\n\021skipAddingIndexe" + - "s\030\010 \001(\010\022$\n\023lockedAncestorTable\030\t \001(\0132\007.P", - "Table\"\212\001\n\023GetFunctionsRequest\022\020\n\010tenantI" + - "d\030\001 \002(\014\022\025\n\rfunctionNames\030\002 \003(\014\022\032\n\022functi" + - "onTimestamps\030\003 \003(\003\022\027\n\017clientTimestamp\030\004 " + - "\002(\003\022\025\n\rclientVersion\030\005 \001(\005\"V\n\020GetSchemaR" + - "equest\022\022\n\nschemaName\030\001 \002(\t\022\027\n\017clientTime" + - "stamp\030\002 \002(\003\022\025\n\rclientVersion\030\003 \002(\005\"d\n\022Cr" + - "eateTableRequest\022\036\n\026tableMetadataMutatio" + - "ns\030\001 \003(\014\022\025\n\rclientVersion\030\002 \001(\005\022\027\n\017alloc" + - "ateIndexId\030\003 \001(\010\"r\n\025CreateFunctionReques" + - "t\022\036\n\026tableMetadataMutations\030\001 \003(\014\022\021\n\ttem", - "porary\030\002 \002(\010\022\017\n\007replace\030\003 \001(\010\022\025\n\rclientV" + - "ersion\030\004 \001(\005\"`\n\023CreateSchemaRequest\022\036\n\026t" + - "ableMetadataMutations\030\001 \003(\014\022\022\n\nschemaNam" + - "e\030\002 \002(\t\022\025\n\rclientVersion\030\003 \002(\005\"\216\001\n\020DropT" + - "ableRequest\022\036\n\026tableMetadataMutations\030\001 " + - "\003(\014\022\021\n\ttableType\030\002 \002(\t\022\017\n\007cascade\030\003 \001(\010\022" + - "\025\n\rclientVersion\030\004 \001(\005\022\037\n\027skipAddingPare" + - "ntColumns\030\005 \001(\010\"_\n\021DropSchemaRequest\022\037\n\027" + - "schemaMetadataMutations\030\001 \003(\014\022\022\n\nschemaN" + - "ame\030\002 \002(\t\022\025\n\rclientVersion\030\003 \002(\005\"I\n\020AddC", - "olumnRequest\022\036\n\026tableMetadataMutations\030\001" + - " \003(\014\022\025\n\rclientVersion\030\002 \001(\005\"J\n\021DropColum" + - "nRequest\022\036\n\026tableMetadataMutations\030\001 \003(\014" + - "\022\025\n\rclientVersion\030\002 \001(\005\"^\n\023DropFunctionR" + - "equest\022\036\n\026tableMetadataMutations\030\001 \003(\014\022\020" + - "\n\010ifExists\030\002 \001(\010\022\025\n\rclientVersion\030\003 \001(\005\"" + - "P\n\027UpdateIndexStateRequest\022\036\n\026tableMetad" + - "ataMutations\030\001 \003(\014\022\025\n\rclientVersion\030\002 \001(" + - "\005\"*\n\021ClearCacheRequest\022\025\n\rclientVersion\030" + - "\001 \001(\005\"*\n\022ClearCacheResponse\022\024\n\014unfreedBy", - "tes\030\001 \001(\003\"*\n\021GetVersionRequest\022\025\n\rclient" + - "Version\030\001 \001(\005\"E\n\022GetVersionResponse\022\017\n\007v" + - "ersion\030\001 \002(\003\022\036\n\026systemCatalogTimestamp\030\002" + - " \001(\003\"\205\001\n\032ClearTableFromCacheRequest\022\020\n\010t" + - "enantId\030\001 \002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\ttab" + - "leName\030\003 \002(\014\022\027\n\017clientTimestamp\030\004 \002(\003\022\025\n" + - "\rclientVersion\030\005 \001(\005\"\035\n\033ClearTableFromCa" + - "cheResponse*\271\005\n\014MutationCode\022\030\n\024TABLE_AL" + - "READY_EXISTS\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020C" + - "OLUMN_NOT_FOUND\020\002\022\031\n\025COLUMN_ALREADY_EXIS", - "TS\020\003\022\035\n\031CONCURRENT_TABLE_MUTATION\020\004\022\027\n\023T" + - "ABLE_NOT_IN_REGION\020\005\022\025\n\021NEWER_TABLE_FOUN" + - "D\020\006\022\034\n\030UNALLOWED_TABLE_MUTATION\020\007\022\021\n\rNO_" + - "PK_COLUMNS\020\010\022\032\n\026PARENT_TABLE_NOT_FOUND\020\t" + - "\022\033\n\027FUNCTION_ALREADY_EXISTS\020\n\022\026\n\022FUNCTIO" + - "N_NOT_FOUND\020\013\022\030\n\024NEWER_FUNCTION_FOUND\020\014\022" + - "\032\n\026FUNCTION_NOT_IN_REGION\020\r\022\031\n\025SCHEMA_AL" + - "READY_EXISTS\020\016\022\026\n\022NEWER_SCHEMA_FOUND\020\017\022\024" + - "\n\020SCHEMA_NOT_FOUND\020\020\022\030\n\024SCHEMA_NOT_IN_RE" + - "GION\020\021\022\032\n\026TABLES_EXIST_ON_SCHEMA\020\022\022\035\n\031UN", - "ALLOWED_SCHEMA_MUTATION\020\023\022%\n!AUTO_PARTIT" + - "ION_SEQUENCE_NOT_FOUND\020\024\022#\n\037CANNOT_COERC" + - "E_AUTO_PARTITION_ID\020\025\022\024\n\020TOO_MANY_INDEXE" + - "S\020\026\022\037\n\033UNABLE_TO_CREATE_CHILD_LINK\020\027\022!\n\035" + - "UNABLE_TO_UPDATE_PARENT_TABLE\020\0302\345\006\n\017Meta" + - "DataService\022/\n\010getTable\022\020.GetTableReques" + - "t\032\021.MetaDataResponse\0227\n\014getFunctions\022\024.G" + - "etFunctionsRequest\032\021.MetaDataResponse\0221\n" + - "\tgetSchema\022\021.GetSchemaRequest\032\021.MetaData" + - "Response\0225\n\013createTable\022\023.CreateTableReq", - "uest\032\021.MetaDataResponse\022;\n\016createFunctio" + - "n\022\026.CreateFunctionRequest\032\021.MetaDataResp" + - "onse\0227\n\014createSchema\022\024.CreateSchemaReque" + - "st\032\021.MetaDataResponse\0221\n\tdropTable\022\021.Dro" + - "pTableRequest\032\021.MetaDataResponse\0223\n\ndrop" + - "Schema\022\022.DropSchemaRequest\032\021.MetaDataRes" + - "ponse\0227\n\014dropFunction\022\024.DropFunctionRequ" + - "est\032\021.MetaDataResponse\0221\n\taddColumn\022\021.Ad" + - "dColumnRequest\032\021.MetaDataResponse\0223\n\ndro" + - "pColumn\022\022.DropColumnRequest\032\021.MetaDataRe", - "sponse\022?\n\020updateIndexState\022\030.UpdateIndex" + - "StateRequest\032\021.MetaDataResponse\0225\n\nclear" + - "Cache\022\022.ClearCacheRequest\032\023.ClearCacheRe" + - "sponse\0225\n\ngetVersion\022\022.GetVersionRequest" + - "\032\023.GetVersionResponse\022P\n\023clearTableFromC" + - "ache\022\033.ClearTableFromCacheRequest\032\034.Clea" + - "rTableFromCacheResponseBB\n(org.apache.ph" + - "oenix.coprocessor.generatedB\016MetaDataPro" + - "tosH\001\210\001\001\240\001\001" + "\013viewIndexId\030\006 \002(\003\022\032\n\022useLongViewIndexId" + + "\030\007 \001(\010\"\355\002\n\020MetaDataResponse\022!\n\nreturnCod" + + "e\030\001 \001(\0162\r.MutationCode\022\024\n\014mutationTime\030\002" + + " \001(\003\022\026\n\005table\030\003 \001(\0132\007.PTable\022\026\n\016tablesTo" + + "Delete\030\004 \003(\014\022\022\n\ncolumnName\030\005 \001(\014\022\022\n\nfami", + "lyName\030\006 \001(\014\022\024\n\014functionName\030\007 \001(\014\022\034\n\010fu" + + "nction\030\010 \003(\0132\n.PFunction\022/\n\024sharedTables" + + "ToDelete\030\t \003(\0132\021.SharedTableState\022\030\n\006sch" + + "ema\030\n \001(\0132\010.PSchema\022\030\n\020autoPartitionNum\030" + + "\013 \001(\003\022\023\n\013viewIndexId\030\014 \001(\003\022\032\n\022useLongVie" + + "wIndexId\030\r \001(\010\"\364\001\n\017GetTableRequest\022\020\n\010te" + + "nantId\030\001 \002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\ttabl" + + "eName\030\003 \002(\014\022\026\n\016tableTimestamp\030\004 \002(\003\022\027\n\017c" + + "lientTimestamp\030\005 \002(\003\022\025\n\rclientVersion\030\006 " + + "\001(\005\022\037\n\027skipAddingParentColumns\030\007 \001(\010\022\031\n\021", + "skipAddingIndexes\030\010 \001(\010\022$\n\023lockedAncesto" + + "rTable\030\t \001(\0132\007.PTable\"\212\001\n\023GetFunctionsRe" + + "quest\022\020\n\010tenantId\030\001 \002(\014\022\025\n\rfunctionNames" + + "\030\002 \003(\014\022\032\n\022functionTimestamps\030\003 \003(\003\022\027\n\017cl" + + "ientTimestamp\030\004 \002(\003\022\025\n\rclientVersion\030\005 \001" + + "(\005\"V\n\020GetSchemaRequest\022\022\n\nschemaName\030\001 \002" + + "(\t\022\027\n\017clientTimestamp\030\002 \002(\003\022\025\n\rclientVer" + + "sion\030\003 \002(\005\"d\n\022CreateTableRequest\022\036\n\026tabl" + + "eMetadataMutations\030\001 \003(\014\022\025\n\rclientVersio" + + "n\030\002 \001(\005\022\027\n\017allocateIndexId\030\003 \001(\010\"r\n\025Crea", + "teFunctionRequest\022\036\n\026tableMetadataMutati" + + "ons\030\001 \003(\014\022\021\n\ttemporary\030\002 \002(\010\022\017\n\007replace\030" + + "\003 \001(\010\022\025\n\rclientVersion\030\004 \001(\005\"`\n\023CreateSc" + + "hemaRequest\022\036\n\026tableMetadataMutations\030\001 " + + "\003(\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n\rclientVersion" + + "\030\003 \002(\005\"\216\001\n\020DropTableRequest\022\036\n\026tableMeta" + + "dataMutations\030\001 \003(\014\022\021\n\ttableType\030\002 \002(\t\022\017" + + "\n\007cascade\030\003 \001(\010\022\025\n\rclientVersion\030\004 \001(\005\022\037" + + "\n\027skipAddingParentColumns\030\005 \001(\010\"_\n\021DropS" + + "chemaRequest\022\037\n\027schemaMetadataMutations\030", + "\001 \003(\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n\rclientVersi" + + "on\030\003 \002(\005\"I\n\020AddColumnRequest\022\036\n\026tableMet" + + "adataMutations\030\001 \003(\014\022\025\n\rclientVersion\030\002 " + + "\001(\005\"J\n\021DropColumnRequest\022\036\n\026tableMetadat" + + "aMutations\030\001 \003(\014\022\025\n\rclientVersion\030\002 \001(\005\"" + + "^\n\023DropFunctionRequest\022\036\n\026tableMetadataM" + + "utations\030\001 \003(\014\022\020\n\010ifExists\030\002 \001(\010\022\025\n\rclie" + + "ntVersion\030\003 \001(\005\"P\n\027UpdateIndexStateReque" + + "st\022\036\n\026tableMetadataMutations\030\001 \003(\014\022\025\n\rcl" + + "ientVersion\030\002 \001(\005\"*\n\021ClearCacheRequest\022\025", + "\n\rclientVersion\030\001 \001(\005\"*\n\022ClearCacheRespo" + + "nse\022\024\n\014unfreedBytes\030\001 \001(\003\"*\n\021GetVersionR" + + "equest\022\025\n\rclientVersion\030\001 \001(\005\"E\n\022GetVers" + + "ionResponse\022\017\n\007version\030\001 \002(\003\022\036\n\026systemCa" + + "talogTimestamp\030\002 \001(\003\"\205\001\n\032ClearTableFromC" + + "acheRequest\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaN" + + "ame\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\027\n\017clientTi" + + "mestamp\030\004 \002(\003\022\025\n\rclientVersion\030\005 \001(\005\"\035\n\033" + + "ClearTableFromCacheResponse*\271\005\n\014Mutation" + + "Code\022\030\n\024TABLE_ALREADY_EXISTS\020\000\022\023\n\017TABLE_", + "NOT_FOUND\020\001\022\024\n\020COLUMN_NOT_FOUND\020\002\022\031\n\025COL" + + "UMN_ALREADY_EXISTS\020\003\022\035\n\031CONCURRENT_TABLE" + + "_MUTATION\020\004\022\027\n\023TABLE_NOT_IN_REGION\020\005\022\025\n\021" + + "NEWER_TABLE_FOUND\020\006\022\034\n\030UNALLOWED_TABLE_M" + + "UTATION\020\007\022\021\n\rNO_PK_COLUMNS\020\010\022\032\n\026PARENT_T" + + "ABLE_NOT_FOUND\020\t\022\033\n\027FUNCTION_ALREADY_EXI" + + "STS\020\n\022\026\n\022FUNCTION_NOT_FOUND\020\013\022\030\n\024NEWER_F" + + "UNCTION_FOUND\020\014\022\032\n\026FUNCTION_NOT_IN_REGIO" + + "N\020\r\022\031\n\025SCHEMA_ALREADY_EXISTS\020\016\022\026\n\022NEWER_" + + "SCHEMA_FOUND\020\017\022\024\n\020SCHEMA_NOT_FOUND\020\020\022\030\n\024", + "SCHEMA_NOT_IN_REGION\020\021\022\032\n\026TABLES_EXIST_O" + + "N_SCHEMA\020\022\022\035\n\031UNALLOWED_SCHEMA_MUTATION\020" + + "\023\022%\n!AUTO_PARTITION_SEQUENCE_NOT_FOUND\020\024" + + "\022#\n\037CANNOT_COERCE_AUTO_PARTITION_ID\020\025\022\024\n" + + "\020TOO_MANY_INDEXES\020\026\022\037\n\033UNABLE_TO_CREATE_" + + "CHILD_LINK\020\027\022!\n\035UNABLE_TO_UPDATE_PARENT_" + + "TABLE\020\0302\345\006\n\017MetaDataService\022/\n\010getTable\022" + + "\020.GetTableRequest\032\021.MetaDataResponse\0227\n\014" + + "getFunctions\022\024.GetFunctionsRequest\032\021.Met" + + "aDataResponse\0221\n\tgetSchema\022\021.GetSchemaRe", + "quest\032\021.MetaDataResponse\0225\n\013createTable\022" + + "\023.CreateTableRequest\032\021.MetaDataResponse\022" + + ";\n\016createFunction\022\026.CreateFunctionReques" + + "t\032\021.MetaDataResponse\0227\n\014createSchema\022\024.C" + + "reateSchemaRequest\032\021.MetaDataResponse\0221\n" + + "\tdropTable\022\021.DropTableRequest\032\021.MetaData" + + "Response\0223\n\ndropSchema\022\022.DropSchemaReque" + + "st\032\021.MetaDataResponse\0227\n\014dropFunction\022\024." + + "DropFunctionRequest\032\021.MetaDataResponse\0221" + + "\n\taddColumn\022\021.AddColumnRequest\032\021.MetaDat", + "aResponse\0223\n\ndropColumn\022\022.DropColumnRequ" + + "est\032\021.MetaDataResponse\022?\n\020updateIndexSta" + + "te\022\030.UpdateIndexStateRequest\032\021.MetaDataR" + + "esponse\0225\n\nclearCache\022\022.ClearCacheReques" + + "t\032\023.ClearCacheResponse\0225\n\ngetVersion\022\022.G" + + "etVersionRequest\032\023.GetVersionResponse\022P\n" + + "\023clearTableFromCache\022\033.ClearTableFromCac" + + "heRequest\032\034.ClearTableFromCacheResponseB" + + "B\n(org.apache.phoenix.coprocessor.genera" + + "tedB\016MetaDataProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -17756,13 +17937,13 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_SharedTableState_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SharedTableState_descriptor, - new java.lang.String[] { "TenantId", "SchemaName", "TableName", "Columns", "PhysicalNames", "ViewIndexId", }); + new java.lang.String[] { "TenantId", "SchemaName", "TableName", "Columns", "PhysicalNames", "ViewIndexId", "UseLongViewIndexId", }); internal_static_MetaDataResponse_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_MetaDataResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MetaDataResponse_descriptor, - new java.lang.String[] { "ReturnCode", "MutationTime", "Table", "TablesToDelete", "ColumnName", "FamilyName", "FunctionName", "Function", "SharedTablesToDelete", "Schema", "AutoPartitionNum", "ViewIndexId", }); + new java.lang.String[] { "ReturnCode", "MutationTime", "Table", "TablesToDelete", "ColumnName", "FamilyName", "FunctionName", "Function", "SharedTablesToDelete", "Schema", "AutoPartitionNum", "ViewIndexId", "UseLongViewIndexId", }); internal_static_GetTableRequest_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_GetTableRequest_fieldAccessorTable = new diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java index ab61826755a..d8f5247b059 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java @@ -3495,15 +3495,15 @@ org.apache.phoenix.coprocessor.generated.PTableProtos.PTableOrBuilder getIndexes */ com.google.protobuf.ByteString getTenantId(); - // optional int32 viewIndexId = 21; + // optional int64 viewIndexId = 21; /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ boolean hasViewIndexId(); /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ - int getViewIndexId(); + long getViewIndexId(); // optional bytes indexType = 22; /** @@ -3694,6 +3694,16 @@ org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounterOrBuilder * optional int32 transactionProvider = 38; */ int getTransactionProvider(); + + // optional bool useLongViewIndexId = 39; + /** + * optional bool useLongViewIndexId = 39; + */ + boolean hasUseLongViewIndexId(); + /** + * optional bool useLongViewIndexId = 39; + */ + boolean getUseLongViewIndexId(); } /** * Protobuf type {@code PTable} @@ -3859,7 +3869,7 @@ private PTable( } case 168: { bitField0_ |= 0x00010000; - viewIndexId_ = input.readInt32(); + viewIndexId_ = input.readInt64(); break; } case 178: { @@ -3950,6 +3960,11 @@ private PTable( transactionProvider_ = input.readInt32(); break; } + case 312: { + bitField1_ |= 0x00000002; + useLongViewIndexId_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4395,19 +4410,19 @@ public com.google.protobuf.ByteString getTenantId() { return tenantId_; } - // optional int32 viewIndexId = 21; + // optional int64 viewIndexId = 21; public static final int VIEWINDEXID_FIELD_NUMBER = 21; - private int viewIndexId_; + private long viewIndexId_; /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ public boolean hasViewIndexId() { return ((bitField0_ & 0x00010000) == 0x00010000); } /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ - public int getViewIndexId() { + public long getViewIndexId() { return viewIndexId_; } @@ -4730,6 +4745,22 @@ public int getTransactionProvider() { return transactionProvider_; } + // optional bool useLongViewIndexId = 39; + public static final int USELONGVIEWINDEXID_FIELD_NUMBER = 39; + private boolean useLongViewIndexId_; + /** + * optional bool useLongViewIndexId = 39; + */ + public boolean hasUseLongViewIndexId() { + return ((bitField1_ & 0x00000002) == 0x00000002); + } + /** + * optional bool useLongViewIndexId = 39; + */ + public boolean getUseLongViewIndexId() { + return useLongViewIndexId_; + } + private void initFields() { schemaNameBytes_ = com.google.protobuf.ByteString.EMPTY; tableNameBytes_ = com.google.protobuf.ByteString.EMPTY; @@ -4750,7 +4781,7 @@ private void initFields() { viewStatement_ = com.google.protobuf.ByteString.EMPTY; physicalNames_ = java.util.Collections.emptyList(); tenantId_ = com.google.protobuf.ByteString.EMPTY; - viewIndexId_ = 0; + viewIndexId_ = 0L; indexType_ = com.google.protobuf.ByteString.EMPTY; statsTimeStamp_ = 0L; storeNulls_ = false; @@ -4768,6 +4799,7 @@ private void initFields() { encodedCQCounters_ = java.util.Collections.emptyList(); useStatsForParallelization_ = false; transactionProvider_ = 0; + useLongViewIndexId_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4893,7 +4925,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeBytes(20, tenantId_); } if (((bitField0_ & 0x00010000) == 0x00010000)) { - output.writeInt32(21, viewIndexId_); + output.writeInt64(21, viewIndexId_); } if (((bitField0_ & 0x00020000) == 0x00020000)) { output.writeBytes(22, indexType_); @@ -4946,6 +4978,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField1_ & 0x00000001) == 0x00000001)) { output.writeInt32(38, transactionProvider_); } + if (((bitField1_ & 0x00000002) == 0x00000002)) { + output.writeBool(39, useLongViewIndexId_); + } getUnknownFields().writeTo(output); } @@ -5038,7 +5073,7 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00010000) == 0x00010000)) { size += com.google.protobuf.CodedOutputStream - .computeInt32Size(21, viewIndexId_); + .computeInt64Size(21, viewIndexId_); } if (((bitField0_ & 0x00020000) == 0x00020000)) { size += com.google.protobuf.CodedOutputStream @@ -5108,6 +5143,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt32Size(38, transactionProvider_); } + if (((bitField1_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(39, useLongViewIndexId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -5304,6 +5343,11 @@ public boolean equals(final java.lang.Object obj) { result = result && (getTransactionProvider() == other.getTransactionProvider()); } + result = result && (hasUseLongViewIndexId() == other.hasUseLongViewIndexId()); + if (hasUseLongViewIndexId()) { + result = result && (getUseLongViewIndexId() + == other.getUseLongViewIndexId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5395,7 +5439,7 @@ public int hashCode() { } if (hasViewIndexId()) { hash = (37 * hash) + VIEWINDEXID_FIELD_NUMBER; - hash = (53 * hash) + getViewIndexId(); + hash = (53 * hash) + hashLong(getViewIndexId()); } if (hasIndexType()) { hash = (37 * hash) + INDEXTYPE_FIELD_NUMBER; @@ -5465,6 +5509,10 @@ public int hashCode() { hash = (37 * hash) + TRANSACTIONPROVIDER_FIELD_NUMBER; hash = (53 * hash) + getTransactionProvider(); } + if (hasUseLongViewIndexId()) { + hash = (37 * hash) + USELONGVIEWINDEXID_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getUseLongViewIndexId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -5623,7 +5671,7 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00020000); tenantId_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00040000); - viewIndexId_ = 0; + viewIndexId_ = 0L; bitField0_ = (bitField0_ & ~0x00080000); indexType_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00100000); @@ -5663,6 +5711,8 @@ public Builder clear() { bitField1_ = (bitField1_ & ~0x00000008); transactionProvider_ = 0; bitField1_ = (bitField1_ & ~0x00000010); + useLongViewIndexId_ = false; + bitField1_ = (bitField1_ & ~0x00000020); return this; } @@ -5857,6 +5907,10 @@ public org.apache.phoenix.coprocessor.generated.PTableProtos.PTable buildPartial to_bitField1_ |= 0x00000001; } result.transactionProvider_ = transactionProvider_; + if (((from_bitField1_ & 0x00000020) == 0x00000020)) { + to_bitField1_ |= 0x00000002; + } + result.useLongViewIndexId_ = useLongViewIndexId_; result.bitField0_ = to_bitField0_; result.bitField1_ = to_bitField1_; onBuilt(); @@ -6065,6 +6119,9 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.PTableProtos.P if (other.hasTransactionProvider()) { setTransactionProvider(other.getTransactionProvider()); } + if (other.hasUseLongViewIndexId()) { + setUseLongViewIndexId(other.getUseLongViewIndexId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -7323,35 +7380,35 @@ public Builder clearTenantId() { return this; } - // optional int32 viewIndexId = 21; - private int viewIndexId_ ; + // optional int64 viewIndexId = 21; + private long viewIndexId_ ; /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ public boolean hasViewIndexId() { return ((bitField0_ & 0x00080000) == 0x00080000); } /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ - public int getViewIndexId() { + public long getViewIndexId() { return viewIndexId_; } /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ - public Builder setViewIndexId(int value) { + public Builder setViewIndexId(long value) { bitField0_ |= 0x00080000; viewIndexId_ = value; onChanged(); return this; } /** - * optional int32 viewIndexId = 21; + * optional int64 viewIndexId = 21; */ public Builder clearViewIndexId() { bitField0_ = (bitField0_ & ~0x00080000); - viewIndexId_ = 0; + viewIndexId_ = 0L; onChanged(); return this; } @@ -8177,6 +8234,39 @@ public Builder clearTransactionProvider() { return this; } + // optional bool useLongViewIndexId = 39; + private boolean useLongViewIndexId_ ; + /** + * optional bool useLongViewIndexId = 39; + */ + public boolean hasUseLongViewIndexId() { + return ((bitField1_ & 0x00000020) == 0x00000020); + } + /** + * optional bool useLongViewIndexId = 39; + */ + public boolean getUseLongViewIndexId() { + return useLongViewIndexId_; + } + /** + * optional bool useLongViewIndexId = 39; + */ + public Builder setUseLongViewIndexId(boolean value) { + bitField1_ |= 0x00000020; + useLongViewIndexId_ = value; + onChanged(); + return this; + } + /** + * optional bool useLongViewIndexId = 39; + */ + public Builder clearUseLongViewIndexId() { + bitField1_ = (bitField1_ & ~0x00000020); + useLongViewIndexId_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:PTable) } @@ -8846,7 +8936,7 @@ public Builder clearCounter() { "es\030\002 \003(\014\022\033\n\023guidePostsByteCount\030\003 \001(\003\022\025\n" + "\rkeyBytesCount\030\004 \001(\003\022\027\n\017guidePostsCount\030" + "\005 \001(\005\022!\n\013pGuidePosts\030\006 \001(\0132\014.PGuidePosts" + - "\"\255\007\n\006PTable\022\027\n\017schemaNameBytes\030\001 \002(\014\022\026\n\016" + + "\"\311\007\n\006PTable\022\027\n\017schemaNameBytes\030\001 \002(\014\022\026\n\016" + "tableNameBytes\030\002 \002(\014\022\036\n\ttableType\030\003 \002(\0162" + "\013.PTableType\022\022\n\nindexState\030\004 \001(\t\022\026\n\016sequ" + "enceNumber\030\005 \002(\003\022\021\n\ttimeStamp\030\006 \002(\003\022\023\n\013p" + @@ -8857,7 +8947,7 @@ public Builder clearCounter() { "me\030\016 \001(\014\022\022\n\ndisableWAL\030\017 \002(\010\022\023\n\013multiTen" + "ant\030\020 \002(\010\022\020\n\010viewType\030\021 \001(\014\022\025\n\rviewState" + "ment\030\022 \001(\014\022\025\n\rphysicalNames\030\023 \003(\014\022\020\n\010ten" + - "antId\030\024 \001(\014\022\023\n\013viewIndexId\030\025 \001(\005\022\021\n\tinde" + + "antId\030\024 \001(\014\022\023\n\013viewIndexId\030\025 \001(\003\022\021\n\tinde" + "xType\030\026 \001(\014\022\026\n\016statsTimeStamp\030\027 \001(\003\022\022\n\ns" + "toreNulls\030\030 \001(\010\022\027\n\017baseColumnCount\030\031 \001(\005" + "\022\036\n\026rowKeyOrderOptimizable\030\032 \001(\010\022\025\n\rtran" + @@ -8869,12 +8959,13 @@ public Builder clearCounter() { "eme\030\" \001(\014\022\026\n\016encodingScheme\030# \001(\014\022,\n\021enc" + "odedCQCounters\030$ \003(\0132\021.EncodedCQCounter\022" + "\"\n\032useStatsForParallelization\030% \001(\010\022\033\n\023t" + - "ransactionProvider\030& \001(\005\"6\n\020EncodedCQCou" + - "nter\022\021\n\tcolFamily\030\001 \002(\t\022\017\n\007counter\030\002 \002(\005" + - "*A\n\nPTableType\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004", - "VIEW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apach" + - "e.phoenix.coprocessor.generatedB\014PTableP" + - "rotosH\001\210\001\001\240\001\001" + "ransactionProvider\030& \001(\005\022\032\n\022useLongViewI" + + "ndexId\030\' \001(\010\"6\n\020EncodedCQCounter\022\021\n\tcolF" + + "amily\030\001 \002(\t\022\017\n\007counter\030\002 \002(\005*A\n\nPTableTy", + "pe\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005IN" + + "DEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apache.phoenix.co" + + "processor.generatedB\014PTableProtosH\001\210\001\001\240\001" + + "\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8898,7 +8989,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_PTable_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_PTable_descriptor, - new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", "AutoParititonSeqName", "IsAppendOnlySchema", "ParentNameBytes", "StorageScheme", "EncodingScheme", "EncodedCQCounters", "UseStatsForParallelization", "TransactionProvider", }); + new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", "AutoParititonSeqName", "IsAppendOnlySchema", "ParentNameBytes", "StorageScheme", "EncodingScheme", "EncodedCQCounters", "UseStatsForParallelization", "TransactionProvider", "UseLongViewIndexId", }); internal_static_EncodedCQCounter_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_EncodedCQCounter_fieldAccessorTable = new diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java index f1b03f84cf2..9e1ac1ffa7a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java @@ -2157,6 +2157,16 @@ org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnInfoOrBuilder * required int32 immutableStorageScheme = 21; */ int getImmutableStorageScheme(); + + // optional bool useLongViewIndex = 22; + /** + * optional bool useLongViewIndex = 22; + */ + boolean hasUseLongViewIndex(); + /** + * optional bool useLongViewIndex = 22; + */ + boolean getUseLongViewIndex(); } /** * Protobuf type {@code IndexMaintainer} @@ -2350,6 +2360,11 @@ private IndexMaintainer( immutableStorageScheme_ = input.readInt32(); break; } + case 176: { + bitField0_ |= 0x00010000; + useLongViewIndex_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -2834,6 +2849,22 @@ public int getImmutableStorageScheme() { return immutableStorageScheme_; } + // optional bool useLongViewIndex = 22; + public static final int USELONGVIEWINDEX_FIELD_NUMBER = 22; + private boolean useLongViewIndex_; + /** + * optional bool useLongViewIndex = 22; + */ + public boolean hasUseLongViewIndex() { + return ((bitField0_ & 0x00010000) == 0x00010000); + } + /** + * optional bool useLongViewIndex = 22; + */ + public boolean getUseLongViewIndex() { + return useLongViewIndex_; + } + private void initFields() { saltBuckets_ = 0; isMultiTenant_ = false; @@ -2856,6 +2887,7 @@ private void initFields() { indexedColumnInfo_ = java.util.Collections.emptyList(); encodingScheme_ = 0; immutableStorageScheme_ = 0; + useLongViewIndex_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3016,6 +3048,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00008000) == 0x00008000)) { output.writeInt32(21, immutableStorageScheme_); } + if (((bitField0_ & 0x00010000) == 0x00010000)) { + output.writeBool(22, useLongViewIndex_); + } getUnknownFields().writeTo(output); } @@ -3114,6 +3149,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt32Size(21, immutableStorageScheme_); } + if (((bitField0_ & 0x00010000) == 0x00010000)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(22, useLongViewIndex_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -3227,6 +3266,11 @@ public boolean equals(final java.lang.Object obj) { result = result && (getImmutableStorageScheme() == other.getImmutableStorageScheme()); } + result = result && (hasUseLongViewIndex() == other.hasUseLongViewIndex()); + if (hasUseLongViewIndex()) { + result = result && (getUseLongViewIndex() + == other.getUseLongViewIndex()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3324,6 +3368,10 @@ public int hashCode() { hash = (37 * hash) + IMMUTABLESTORAGESCHEME_FIELD_NUMBER; hash = (53 * hash) + getImmutableStorageScheme(); } + if (hasUseLongViewIndex()) { + hash = (37 * hash) + USELONGVIEWINDEX_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getUseLongViewIndex()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -3500,6 +3548,8 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00080000); immutableStorageScheme_ = 0; bitField0_ = (bitField0_ & ~0x00100000); + useLongViewIndex_ = false; + bitField0_ = (bitField0_ & ~0x00200000); return this; } @@ -3637,6 +3687,10 @@ public org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintai to_bitField0_ |= 0x00008000; } result.immutableStorageScheme_ = immutableStorageScheme_; + if (((from_bitField0_ & 0x00200000) == 0x00200000)) { + to_bitField0_ |= 0x00010000; + } + result.useLongViewIndex_ = useLongViewIndex_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3815,6 +3869,9 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.ServerCachingP if (other.hasImmutableStorageScheme()) { setImmutableStorageScheme(other.getImmutableStorageScheme()); } + if (other.hasUseLongViewIndex()) { + setUseLongViewIndex(other.getUseLongViewIndex()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5579,6 +5636,39 @@ public Builder clearImmutableStorageScheme() { return this; } + // optional bool useLongViewIndex = 22; + private boolean useLongViewIndex_ ; + /** + * optional bool useLongViewIndex = 22; + */ + public boolean hasUseLongViewIndex() { + return ((bitField0_ & 0x00200000) == 0x00200000); + } + /** + * optional bool useLongViewIndex = 22; + */ + public boolean getUseLongViewIndex() { + return useLongViewIndex_; + } + /** + * optional bool useLongViewIndex = 22; + */ + public Builder setUseLongViewIndex(boolean value) { + bitField0_ |= 0x00200000; + useLongViewIndex_ = value; + onChanged(); + return this; + } + /** + * optional bool useLongViewIndex = 22; + */ + public Builder clearUseLongViewIndex() { + bitField0_ = (bitField0_ & ~0x00200000); + useLongViewIndex_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:IndexMaintainer) } @@ -8615,7 +8705,7 @@ public org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServer "ength\030\003 \002(\005\"4\n\017ColumnReference\022\016\n\006family" + "\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(\014\"4\n\nColumnInfo\022" + "\022\n\nfamilyName\030\001 \001(\t\022\022\n\ncolumnName\030\002 \002(\t\"" + - "\306\005\n\017IndexMaintainer\022\023\n\013saltBuckets\030\001 \002(\005" + + "\340\005\n\017IndexMaintainer\022\023\n\013saltBuckets\030\001 \002(\005" + "\022\025\n\risMultiTenant\030\002 \002(\010\022\023\n\013viewIndexId\030\003" + " \001(\014\022(\n\016indexedColumns\030\004 \003(\0132\020.ColumnRef" + "erence\022 \n\030indexedColumnTypeOrdinal\030\005 \003(\005", @@ -8632,23 +8722,23 @@ public org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServer "ed\030\020 \002(\010\022\033\n\023indexRowKeyByteSize\030\021 \002(\005\022\021\n" + "\timmutable\030\022 \002(\010\022&\n\021indexedColumnInfo\030\023 " + "\003(\0132\013.ColumnInfo\022\026\n\016encodingScheme\030\024 \002(\005" + - "\022\036\n\026immutableStorageScheme\030\025 \002(\005\"\334\001\n\025Add" + - "ServerCacheRequest\022\020\n\010tenantId\030\001 \001(\014\022\017\n\007" + - "cacheId\030\002 \002(\014\022)\n\010cachePtr\030\003 \002(\0132\027.Immuta" + - "bleBytesWritable\022)\n\014cacheFactory\030\004 \002(\0132\023" + - ".ServerCacheFactory\022\017\n\007txState\030\005 \001(\014\022\"\n\032" + - "hasProtoBufIndexMaintainer\030\006 \001(\010\022\025\n\rclie" + - "ntVersion\030\007 \001(\005\"(\n\026AddServerCacheRespons", - "e\022\016\n\006return\030\001 \002(\010\"=\n\030RemoveServerCacheRe" + - "quest\022\020\n\010tenantId\030\001 \001(\014\022\017\n\007cacheId\030\002 \002(\014" + - "\"+\n\031RemoveServerCacheResponse\022\016\n\006return\030" + - "\001 \002(\0102\245\001\n\024ServerCachingService\022A\n\016addSer" + - "verCache\022\026.AddServerCacheRequest\032\027.AddSe" + - "rverCacheResponse\022J\n\021removeServerCache\022\031" + - ".RemoveServerCacheRequest\032\032.RemoveServer" + - "CacheResponseBG\n(org.apache.phoenix.copr" + - "ocessor.generatedB\023ServerCachingProtosH\001" + - "\210\001\001\240\001\001" + "\022\036\n\026immutableStorageScheme\030\025 \002(\005\022\030\n\020useL" + + "ongViewIndex\030\026 \001(\010\"\334\001\n\025AddServerCacheReq" + + "uest\022\020\n\010tenantId\030\001 \001(\014\022\017\n\007cacheId\030\002 \002(\014\022" + + ")\n\010cachePtr\030\003 \002(\0132\027.ImmutableBytesWritab" + + "le\022)\n\014cacheFactory\030\004 \002(\0132\023.ServerCacheFa" + + "ctory\022\017\n\007txState\030\005 \001(\014\022\"\n\032hasProtoBufInd" + + "exMaintainer\030\006 \001(\010\022\025\n\rclientVersion\030\007 \001(", + "\005\"(\n\026AddServerCacheResponse\022\016\n\006return\030\001 " + + "\002(\010\"=\n\030RemoveServerCacheRequest\022\020\n\010tenan" + + "tId\030\001 \001(\014\022\017\n\007cacheId\030\002 \002(\014\"+\n\031RemoveServ" + + "erCacheResponse\022\016\n\006return\030\001 \002(\0102\245\001\n\024Serv" + + "erCachingService\022A\n\016addServerCache\022\026.Add" + + "ServerCacheRequest\032\027.AddServerCacheRespo" + + "nse\022J\n\021removeServerCache\022\031.RemoveServerC" + + "acheRequest\032\032.RemoveServerCacheResponseB" + + "G\n(org.apache.phoenix.coprocessor.genera" + + "tedB\023ServerCachingProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8678,7 +8768,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_IndexMaintainer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IndexMaintainer_descriptor, - new java.lang.String[] { "SaltBuckets", "IsMultiTenant", "ViewIndexId", "IndexedColumns", "IndexedColumnTypeOrdinal", "DataTableColRefForCoveredColumns", "IndexTableColRefForCoveredColumns", "IsLocalIndex", "IndexTableName", "RowKeyOrderOptimizable", "DataTableEmptyKeyValueColFamily", "EmptyKeyValueColFamily", "IndexedExpressions", "RowKeyMetadata", "NumDataTableColFamilies", "IndexWalDisabled", "IndexRowKeyByteSize", "Immutable", "IndexedColumnInfo", "EncodingScheme", "ImmutableStorageScheme", }); + new java.lang.String[] { "SaltBuckets", "IsMultiTenant", "ViewIndexId", "IndexedColumns", "IndexedColumnTypeOrdinal", "DataTableColRefForCoveredColumns", "IndexTableColRefForCoveredColumns", "IsLocalIndex", "IndexTableName", "RowKeyOrderOptimizable", "DataTableEmptyKeyValueColFamily", "EmptyKeyValueColFamily", "IndexedExpressions", "RowKeyMetadata", "NumDataTableColFamilies", "IndexWalDisabled", "IndexRowKeyByteSize", "Immutable", "IndexedColumnInfo", "EncodingScheme", "ImmutableStorageScheme", "UseLongViewIndex", }); internal_static_AddServerCacheRequest_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_AddServerCacheRequest_fieldAccessorTable = new diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java index 2f20af290cf..25ce07a2da4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java @@ -314,6 +314,7 @@ private static List deserialize(byte[] buf, int offset, int len } private byte[] viewIndexId; + private PDataType viewIndexType; private boolean isMultiTenant; // indexed expressions that are not present in the row key of the data table, the expression can also refer to a regular column private List indexedExpressions; @@ -371,7 +372,8 @@ private IndexMaintainer(final PTable dataTable, final PTable index, PhoenixConne this(dataTable.getRowKeySchema(), dataTable.getBucketNum() != null); this.rowKeyOrderOptimizable = index.rowKeyOrderOptimizable(); this.isMultiTenant = dataTable.isMultiTenant(); - this.viewIndexId = index.getViewIndexId() == null ? null : MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId()); + this.viewIndexId = index.getViewIndexId() == null ? null : index.getViewIndexType().toBytes(index.getViewIndexId()); + this.viewIndexType = index.getViewIndexType(); this.isLocalIndex = index.getIndexType() == IndexType.LOCAL; this.encodingScheme = index.getEncodingScheme(); @@ -823,7 +825,7 @@ public boolean isNullable() { @Override public PDataType getDataType() { - return MetaDataUtil.getViewIndexIdDataType(); + return viewIndexType; } @Override @@ -1223,7 +1225,9 @@ public void readFields(DataInput input) throws IOException { boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0; if (hasViewIndexId) { // Fixed length - viewIndexId = new byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()]; + //Use leacy viewIndexIdType for clients older than 4.10 release + viewIndexId = new byte[MetaDataUtil.getLegacyViewIndexIdDataType().getByteSize()]; + viewIndexType = MetaDataUtil.getLegacyViewIndexIdDataType(); input.readFully(viewIndexId); } int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1; @@ -1340,6 +1344,9 @@ public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer prot maintainer.nIndexSaltBuckets = proto.getSaltBuckets(); maintainer.isMultiTenant = proto.getIsMultiTenant(); maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null; + maintainer.viewIndexType = proto.hasUseLongViewIndex() + ? MetaDataUtil.getViewIndexIdDataType() + : MetaDataUtil.getLegacyViewIndexIdDataType(); List indexedColumnsList = proto.getIndexedColumnsList(); maintainer.indexedColumns = new HashSet(indexedColumnsList.size()); for (ServerCachingProtos.ColumnReference colRefFromProto : indexedColumnsList) { @@ -1459,6 +1466,7 @@ public static ServerCachingProtos.IndexMaintainer toProto(IndexMaintainer mainta builder.setIsMultiTenant(maintainer.isMultiTenant); if (maintainer.viewIndexId != null) { builder.setViewIndexId(ByteStringer.wrap(maintainer.viewIndexId)); + builder.setUseLongViewIndex(MetaDataUtil.getViewIndexIdDataType().equals(maintainer.viewIndexType)); } for (ColumnReference colRef : maintainer.indexedColumns) { ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java index c87ad985e99..92c941d0bd1 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java @@ -312,7 +312,7 @@ private Collection getLocalIndexNames(HTableInterfaceReference new HashMap(); for (PTable index : indexes) { if (localIndex == null) localIndex = index; - localIndexNames.put(new ImmutableBytesWritable(MetaDataUtil.getViewIndexIdDataType().toBytes( + localIndexNames.put(new ImmutableBytesWritable(index.getViewIndexType().toBytes( index.getViewIndexId())), index.getName().getString()); } if (localIndex == null) { diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java index 1a22f608603..6af5b5a5c95 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java @@ -47,6 +47,7 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.StringUtil; @@ -204,14 +205,19 @@ private void appendPKColumnValue(StringBuilder buf, byte[] range, Boolean isNull range = ptr.get(); } if (changeViewIndexId) { - Short s = (Short) type.toObject(range); - s = (short) (s + (-Short.MAX_VALUE)); - buf.append(s.toString()); + PDataType viewIndexDataType = tableRef.getTable().getViewIndexType(); + buf.append(getViewIndexValue(type, range, viewIndexDataType).toString()); } else { Format formatter = context.getConnection().getFormatter(type); buf.append(type.toStringLiteral(range, formatter)); } } + + private Long getViewIndexValue(PDataType type, byte[] range, PDataType viewIndexDataType){ + boolean useLongViewIndex = MetaDataUtil.getViewIndexIdDataType().equals(viewIndexDataType); + Object s = type.toObject(range); + return (useLongViewIndex ? (Long) s : (Short) s) - (useLongViewIndex ? Long.MAX_VALUE : Short.MAX_VALUE); + } private static class RowKeyValueIterator implements Iterator { private final RowKeySchema schema; diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java index 56d8698500b..7814a85bb70 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java @@ -222,6 +222,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData { public static final byte[] IS_VIEW_REFERENCED_BYTES = Bytes.toBytes(IS_VIEW_REFERENCED); public static final String VIEW_INDEX_ID = "VIEW_INDEX_ID"; public static final byte[] VIEW_INDEX_ID_BYTES = Bytes.toBytes(VIEW_INDEX_ID); + public static final String USE_LONG_VIEW_INDEX = "USE_LONG_VIEW_INDEX"; + public static final byte[] USE_LONG_VIEW_INDEX_BYTES = Bytes.toBytes(USE_LONG_VIEW_INDEX); public static final String BASE_COLUMN_COUNT = "BASE_COLUMN_COUNT"; public static final byte[] BASE_COLUMN_COUNT_BYTES = Bytes.toBytes(BASE_COLUMN_COUNT); public static final String IS_ROW_TIMESTAMP = "IS_ROW_TIMESTAMP"; diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java index 20ac09d5a77..8ac686f1b32 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java @@ -296,7 +296,7 @@ public MetaDataMutationResult createTable(List tableMetaData, byte[] p if (!allocateIndexId) { return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null); } else { - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null, Short.MIN_VALUE); + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null, Long.MIN_VALUE, MetaDataUtil.getViewIndexIdDataType()); } } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java index 9f39e4592ef..2e3ca57e2b8 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java @@ -192,7 +192,8 @@ public enum JoinType {INNER, LEFT_OUTER} DISABLE_WAL + " BOOLEAN,\n" + MULTI_TENANT + " BOOLEAN,\n" + VIEW_TYPE + " UNSIGNED_TINYINT,\n" + - VIEW_INDEX_ID + " SMALLINT,\n" + + VIEW_INDEX_ID + " BIGINT,\n" + + USE_LONG_VIEW_INDEX + " BOOLEAN,\n" + // Column metadata (will be null for table row) DATA_TYPE + " INTEGER," + COLUMN_SIZE + " INTEGER," + diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java index d1b8f1ea20b..50bb722d76e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java @@ -23,6 +23,7 @@ import org.apache.phoenix.hbase.index.util.KeyValueBuilder; import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.transaction.TransactionFactory; public class DelegateTable implements PTable { @@ -207,10 +208,15 @@ public String getViewStatement() { } @Override - public Short getViewIndexId() { + public Long getViewIndexId() { return delegate.getViewIndexId(); } + @Override + public PDataType getViewIndexType() { + return delegate.getViewIndexType(); + } + @Override public PTableKey getKey() { return delegate.getKey(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index cc9a896d1e0..3b9348c5a1f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -90,6 +90,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_LONG_VIEW_INDEX; import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT; import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY; import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; @@ -301,8 +302,9 @@ public class MetaDataClient { GUIDE_POSTS_WIDTH + "," + IMMUTABLE_STORAGE_SCHEME + "," + ENCODING_SCHEME + "," + - USE_STATS_FOR_PARALLELIZATION + - ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + USE_STATS_FOR_PARALLELIZATION +"," + + USE_LONG_VIEW_INDEX + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; private static final String CREATE_SCHEMA = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TABLE_SCHEM + "," + TABLE_NAME + ") VALUES (?,?)"; @@ -1061,7 +1063,7 @@ private PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkCo } } - public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, String viewStatement, ViewType viewType, byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException { + public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, String viewStatement, ViewType viewType, PDataType viewIndexType, byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException { TableName tableName = statement.getTableName(); Map tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); Map commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1); @@ -1122,7 +1124,7 @@ public MutationState createTable(CreateTableStatement statement, byte[][] splits true, NamedTableNode.create(statement.getTableName()), statement.getTableType()); } } - table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced, false, null, null, tableProps, commonFamilyProps); + table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewIndexType, viewColumnConstants, isViewColumnReferenced, false, null, null, tableProps, commonFamilyProps); if (table == null || table.getType() == PTableType.VIEW /*|| table.isTransactional()*/) { return new MutationState(0, 0, connection); @@ -1676,7 +1678,7 @@ public MutationState createIndex(CreateIndexStatement statement, byte[][] splits PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns); tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, dataTable.getName().getString()); CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, null, statement.getBindCount(), null); - table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, allocateIndexId, statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps); + table = createTableInternal(tableStatement, splits, dataTable, null, null, MetaDataUtil.getViewIndexIdDataType(),null, null, allocateIndexId, statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps); break; } catch (ConcurrentTableMutationException e) { // Can happen if parent data table changes while above is in progress if (numRetries<5) { @@ -1872,7 +1874,7 @@ private static boolean checkAndValidateRowTimestampCol(ColumnDef colDef, Primary } private PTable createTableInternal(CreateTableStatement statement, byte[][] splits, - final PTable parent, String viewStatement, ViewType viewType, + final PTable parent, String viewStatement, ViewType viewType, PDataType viewIndexType, final byte[][] viewColumnConstants, final BitSet isViewColumnReferenced, boolean allocateIndexId, IndexType indexType, Date asyncCreatedDate, Map tableProps, @@ -2544,7 +2546,7 @@ else if (!SchemaUtil.isSystemTable(Bytes.toBytes(SchemaUtil.getTableName(schemaN Collections.emptyList(), isImmutableRows, Collections.emptyList(), defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), null, - Boolean.TRUE.equals(disableWAL), false, false, null, null, indexType, true, null, 0, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, true); + Boolean.TRUE.equals(disableWAL), false, false, null, null, null, indexType, true, null, 0, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, true); connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP); } @@ -2709,6 +2711,7 @@ public boolean isViewReferenced() { } else { tableUpsert.setBoolean(28, useStatsForParallelizationProp); } + tableUpsert.setBoolean(29, true); tableUpsert.execute(); if (asyncCreatedDate != null) { @@ -2822,7 +2825,7 @@ public boolean isViewReferenced() { PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns.values(), parent == null ? null : parent.getSchemaName(), parent == null ? null : parent.getTableName(), Collections.emptyList(), isImmutableRows, physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType, - result.getViewIndexId(), indexType, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, immutableStorageScheme, encodingScheme, cqCounterToBe, useStatsForParallelizationProp); + viewIndexType, result.getViewIndexId(), indexType, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, immutableStorageScheme, encodingScheme, cqCounterToBe, useStatsForParallelizationProp); result = new MetaDataMutationResult(code, result.getMutationTime(), table, true); addTableToCache(result); return table; @@ -3835,7 +3838,7 @@ else if (table.isAppendOnlySchema()) { PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), - sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), + sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexType(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization()); TableRef indexTableRef = new TableRef(viewIndexTable); PName indexTableTenantId = sharedTableState.getTenantId(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java index bb81d7652ee..162317542c8 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java @@ -700,7 +700,8 @@ interface QualifierEncoderDecoder { ViewType getViewType(); String getViewStatement(); - Short getViewIndexId(); + Long getViewIndexId(); + PDataType getViewIndexType(); PTableKey getKey(); IndexType getIndexType(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java index eccb2893ad1..aabb3ecaf2a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java @@ -73,6 +73,7 @@ import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EncodedColumnsUtil; +import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.SizedUtil; @@ -138,7 +139,8 @@ public class PTableImpl implements PTable { private boolean storeNulls; private TransactionFactory.Provider transactionProvider; private ViewType viewType; - private Short viewIndexId; + private PDataType viewIndexType; + private Long viewIndexId; private int estimatedSize; private IndexType indexType; private int baseColumnCount; @@ -214,7 +216,7 @@ public PTableImpl(PName tenantId, String schemaName, String tableName, long time // For indexes stored in shared physical tables public PTableImpl(PName tenantId, PName schemaName, PName tableName, long timestamp, List families, - List columns, List physicalNames, Short viewIndexId, boolean multiTenant, boolean isNamespaceMpped, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme, + List columns, List physicalNames,PDataType viewIndexType, Long viewIndexId, boolean multiTenant, boolean isNamespaceMpped, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization) throws SQLException { this.pkColumns = this.allColumns = Collections.emptyList(); this.rowKeySchema = RowKeySchema.EMPTY_SCHEMA; @@ -228,7 +230,7 @@ public PTableImpl(PName tenantId, PName schemaName, PName tableName, long timest this.families = families; init(tenantId, this.schemaName, this.tableName, PTableType.INDEX, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, this.schemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName, - null, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, + null, disableWAL, multiTenant, storeNulls, viewType, viewIndexType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, indexDisableTimestamp, isNamespaceMpped, null, false, storageScheme, qualifierEncodingScheme, encodedCQCounter, useStatsForParallelization); } @@ -293,7 +295,8 @@ public static PTableImpl makePTable(PTable view, PTable baseTable, Collection columns, PName dataSchemaName, PName dataTableName, List indexes, boolean isImmutableRows, List physicalNames, PName defaultFamilyName, String viewExpression, - boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, + boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, PDataType viewIndexType, Long viewIndexId, IndexType indexType, boolean rowKeyOrderOptimizable, TransactionFactory.Provider transactionProvider, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, @@ -348,7 +351,7 @@ public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tabl Boolean useStatsForParallelization) throws SQLException { return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames, - defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, + defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexType, viewIndexId, indexType, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema, storageScheme, qualifierEncodingScheme, encodedCQCounter, @@ -359,7 +362,7 @@ public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tabl PIndexState state, long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum, Collection columns, PName dataSchemaName, PName dataTableName, List indexes, boolean isImmutableRows, List physicalNames, PName defaultFamilyName, String viewExpression, - boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, + boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, PDataType viewIndexType, Long viewIndexId, IndexType indexType, boolean rowKeyOrderOptimizable, TransactionFactory.Provider transactionProvider, long updateCacheFrequency, int baseColumnCount, long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, @@ -367,7 +370,7 @@ public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tabl Boolean useStatsForParallelization) throws SQLException { return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames, - defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, + defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType,viewIndexType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema, storageScheme, qualifierEncodingScheme, encodedCQCounter, useStatsForParallelization); @@ -380,7 +383,7 @@ private PTableImpl(PTable table, boolean rowKeyOrderOptimizable, PIndexState sta init(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(), indexes, table.isImmutableRows(), table.getPhysicalNames(), defaultFamily, viewStatement, table.isWALDisabled(), - table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), + table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexType(), table.getViewIndexId(), table.getIndexType(), baseTableColumnCount, rowKeyOrderOptimizable, table.getTransactionProvider(), updateCacheFrequency, table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), @@ -391,13 +394,13 @@ private PTableImpl(PName tenantId, PName schemaName, PName tableName, PTableType long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum, Collection columns, PName parentSchemaName, PName parentTableName, List indexes, boolean isImmutableRows, List physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant, - boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType, + boolean storeNulls, ViewType viewType, PDataType viewIndexType, Long viewIndexId, IndexType indexType, int baseColumnCount, boolean rowKeyOrderOptimizable, TransactionFactory.Provider transactionProvider, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization) throws SQLException { init(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, parentSchemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName, - viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, + viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema, storageScheme, qualifierEncodingScheme, encodedCQCounter, useStatsForParallelization); } @@ -431,7 +434,7 @@ public int getEstimatedSize() { private void init(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state, long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum, Collection columns, PName parentSchemaName, PName parentTableName, List indexes, boolean isImmutableRows, List physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, - boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, + boolean multiTenant, boolean storeNulls, ViewType viewType,PDataType viewIndexType, Long viewIndexId, IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, TransactionFactory.Provider transactionProvider, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization) throws SQLException { @@ -462,6 +465,7 @@ private void init(PName tenantId, PName schemaName, PName tableName, PTableType this.multiTenant = multiTenant; this.storeNulls = storeNulls; this.viewType = viewType; + this.viewIndexType = viewIndexType; this.viewIndexId = viewIndexId; this.indexType = indexType; this.transactionProvider = transactionProvider; @@ -1201,10 +1205,15 @@ public boolean isWALDisabled() { } @Override - public Short getViewIndexId() { + public Long getViewIndexId() { return viewIndexId; } + @Override + public PDataType getViewIndexType() { + return viewIndexType; + } + @Override public PName getTenantId() { return tenantId; @@ -1233,10 +1242,13 @@ public static PTable createFromProto(PTableProtos.PTable table) { if (table.hasIndexState()) { indexState = PIndexState.fromSerializedValue(table.getIndexState()); } - Short viewIndexId = null; + Long viewIndexId = null; if(table.hasViewIndexId()){ - viewIndexId = (short)table.getViewIndexId(); + viewIndexId = (long)table.getViewIndexId(); } + PDataType viewIndexType = table.hasUseLongViewIndexId() + ? MetaDataUtil.getViewIndexIdDataType() + : MetaDataUtil.getLegacyViewIndexIdDataType(); IndexType indexType = IndexType.getDefault(); if(table.hasIndexType()){ indexType = IndexType.fromSerializedValue(table.getIndexType().toByteArray()[0]); @@ -1350,7 +1362,7 @@ public static PTable createFromProto(PTableProtos.PTable table) { result.init(tenantId, schemaName, tableName, tableType, indexState, timeStamp, sequenceNumber, pkName, (bucketNum == NO_SALTING) ? null : bucketNum, columns, parentSchemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName, viewStatement, disableWAL, - multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, + multiTenant, storeNulls, viewType, viewIndexType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, transactionProvider, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoParititonSeqName, isAppendOnlySchema, storageScheme, qualifierEncodingScheme, encodedColumnQualifierCounter, useStatsForParallelization); return result; @@ -1373,7 +1385,8 @@ public static PTableProtos.PTable toProto(PTable table) { } if(table.getViewIndexId() != null) { builder.setViewIndexId(table.getViewIndexId()); - } + builder.setUseLongViewIndexId(MetaDataUtil.getViewIndexIdDataType().equals(table.getViewIndexType())); + } if(table.getIndexType() != null) { builder.setIndexType(ByteStringer.wrap(new byte[]{table.getIndexType().getSerializedValue()})); } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java index 596c3eb2d28..ea45a8b75a5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java @@ -573,6 +573,10 @@ public static SequenceKey getViewIndexSequenceKey(String tenantId, PName physica } public static PDataType getViewIndexIdDataType() { + return PLong.INSTANCE; + } + + public static PDataType getLegacyViewIndexIdDataType() { return PSmallint.INSTANCE; } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java index 3ab6a1990c5..7b54cf1cc8a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java @@ -50,7 +50,7 @@ public void testOrderByOptimizedOut() throws Exception { conn.createStatement().execute("CREATE INDEX i1 ON v(v2) INCLUDE(v1)"); ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT v1,v2 FROM v WHERE v2 > 'a' ORDER BY v2"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-32768,'me','a'] - [-32768,'me',*]", + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]", QueryUtil.getExplainPlan(rs)); } @@ -194,7 +194,7 @@ public void testViewConstantsOptimizedOut() throws Exception { conn.createStatement().execute("CREATE INDEX i1 ON v(v2)"); ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM v WHERE v2 > 'a' and k2 = 'a' ORDER BY v2,k2"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-32768,'me','a'] - [-32768,'me',*]\n" + + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]\n" + " SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs)); @@ -227,7 +227,7 @@ public void testViewConstantsOptimizedOutOnReadOnlyView() throws Exception { // Confirm that a read-only view on an updatable view still optimizes out the read-only parts of the updatable view ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM v2 WHERE v3 > 'a' and k2 = 'a' ORDER BY v3,k2"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-32768,'me','a'] - [-32768,'me',*]", + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]", QueryUtil.getExplainPlan(rs)); } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java index 2e881b8392b..26caa0de00b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java @@ -261,7 +261,7 @@ private TableRef createProjectedTableFromLiterals(Object[] row) { PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME, PTableType.SUBQUERY, null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null, Collections.emptyList(), - false, Collections.emptyList(), null, null, false, false, false, null, + false, Collections.emptyList(), null, null, false, false, false, null, null, null, null, true, null, 0, 0L, Boolean.FALSE, null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, EncodedCQCounter.NULL_COUNTER, true); TableRef sourceTable = new TableRef(pTable); List sourceColumnRefs = Lists. newArrayList(); diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java index 6bf298ef4b8..f400d0bed24 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java @@ -184,7 +184,7 @@ private TableRef createProjectedTableFromLiterals(Object[] row) { PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME, PTableType.SUBQUERY, null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null, Collections. emptyList(), false, Collections. emptyList(), null, null, false, false, - false, null, null, null, true, null, 0, 0L, false, null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, EncodedCQCounter.NULL_COUNTER, true); + false, null, null, null, null, true, null, 0, 0L, false, null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, EncodedCQCounter.NULL_COUNTER, true); TableRef sourceTable = new TableRef(pTable); List sourceColumnRefs = Lists. newArrayList(); for (PColumn column : sourceTable.getTable().getColumns()) { diff --git a/phoenix-protocol/src/main/MetaDataService.proto b/phoenix-protocol/src/main/MetaDataService.proto index 13d8f1aa8ba..d7bd3a6fc36 100644 --- a/phoenix-protocol/src/main/MetaDataService.proto +++ b/phoenix-protocol/src/main/MetaDataService.proto @@ -60,7 +60,8 @@ message SharedTableState { required bytes tableName = 3; repeated PColumn columns = 4; repeated bytes physicalNames = 5; - required int32 viewIndexId = 6; + required int64 viewIndexId = 6; + optional bool useLongViewIndexId = 7; } message MetaDataResponse { @@ -75,7 +76,8 @@ message MetaDataResponse { repeated SharedTableState sharedTablesToDelete = 9; optional PSchema schema = 10; optional int64 autoPartitionNum = 11; - optional int32 viewIndexId = 12; + optional int64 viewIndexId = 12; + optional bool useLongViewIndexId = 13; } message GetTableRequest { diff --git a/phoenix-protocol/src/main/PTable.proto b/phoenix-protocol/src/main/PTable.proto index bc868bcee68..15d2e33c152 100644 --- a/phoenix-protocol/src/main/PTable.proto +++ b/phoenix-protocol/src/main/PTable.proto @@ -85,7 +85,7 @@ message PTable { optional bytes viewStatement = 18; repeated bytes physicalNames = 19; optional bytes tenantId = 20; - optional int32 viewIndexId = 21; + optional int64 viewIndexId = 21; optional bytes indexType = 22; optional int64 statsTimeStamp = 23; optional bool storeNulls = 24; @@ -103,6 +103,7 @@ message PTable { repeated EncodedCQCounter encodedCQCounters = 36; optional bool useStatsForParallelization = 37; optional int32 transactionProvider = 38; + optional bool useLongViewIndexId = 39; } message EncodedCQCounter { diff --git a/phoenix-protocol/src/main/ServerCachingService.proto b/phoenix-protocol/src/main/ServerCachingService.proto index c059a1a4b51..7532a4b123d 100644 --- a/phoenix-protocol/src/main/ServerCachingService.proto +++ b/phoenix-protocol/src/main/ServerCachingService.proto @@ -62,6 +62,7 @@ message IndexMaintainer { repeated ColumnInfo indexedColumnInfo = 19; required int32 encodingScheme = 20; required int32 immutableStorageScheme = 21; + optional bool useLongViewIndex = 22; } message AddServerCacheRequest { From 394ccf8512fd4d8fcfb1ddbb5d4126861111ba9c Mon Sep 17 00:00:00 2001 From: Mahdi Salarkia Date: Fri, 27 Jul 2018 22:27:46 -0700 Subject: [PATCH 02/14] PHOENIX-3547 Supporting more number of indices per table. Currently the number of indices per Phoenix table is bound to maximum of 65535 (java.lang.Short) which is a limitation for applications requiring to have unlimited number of indices. This change will consider any new table created in Phoenix to support view index ids to be in the range of -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807 (java.lang.Long) which is undoubtably big enough to cover this requirement. Any existing Phoenix table will still continue to support only maximum of 65535 of indices. A new boolean column (USE_LONG_VIEW_INDEX BOOLEAN DEFAULT FALSE) is added to SYSTEM.CATALOG to specify each Phoenix table's support for large number of indices. On each new Phoenix table creation the value for USE_LONG_VIEW_INDEX will be set to `true` while this value would be false for any existing table. --- .../phoenix/coprocessor/MetaDataEndpointImpl.java | 10 ++-------- .../org/apache/phoenix/index/IndexMaintainer.java | 2 +- .../java/org/apache/phoenix/iterate/ExplainTable.java | 11 +++++------ .../phoenix/query/ConnectionQueryServicesImpl.java | 8 ++++++++ 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index 848613a31e9..8b0409a8c7f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -2223,12 +2223,6 @@ public void createTable(RpcController controller, CreateTableRequest request, throw sqlExceptions[0]; } long seqValue = seqValues[0]; - if (seqValue > Long.MAX_VALUE) { - builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata); NavigableMap> familyCellMap = tableHeaderPut.getFamilyCellMap(); @@ -2246,7 +2240,7 @@ public void createTable(RpcController controller, CreateTableRequest request, VIEW_INDEX_ID_BYTES.length, cell.getTimestamp(), bytes, 0, bytes.length, cell.getType()); cells.add(indexIdCell); - indexId = (long) seqValue; + indexId = seqValue; } } @@ -4572,4 +4566,4 @@ private TableName getParentPhysicalTableName(PTable table) { table.getTableName(), table.isNamespaceMapped()) .getBytes()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java index 25ce07a2da4..f5827081a46 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java @@ -1225,7 +1225,7 @@ public void readFields(DataInput input) throws IOException { boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0; if (hasViewIndexId) { // Fixed length - //Use leacy viewIndexIdType for clients older than 4.10 release + //Use legacy viewIndexIdType for clients older than 4.10 release viewIndexId = new byte[MetaDataUtil.getLegacyViewIndexIdDataType().getByteSize()]; viewIndexType = MetaDataUtil.getLegacyViewIndexIdDataType(); input.readFully(viewIndexId); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java index 6af5b5a5c95..2671044a78f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java @@ -205,20 +205,19 @@ private void appendPKColumnValue(StringBuilder buf, byte[] range, Boolean isNull range = ptr.get(); } if (changeViewIndexId) { - PDataType viewIndexDataType = tableRef.getTable().getViewIndexType(); - buf.append(getViewIndexValue(type, range, viewIndexDataType).toString()); + buf.append(getViewIndexValue(type, range).toString()); } else { Format formatter = context.getConnection().getFormatter(type); buf.append(type.toStringLiteral(range, formatter)); } } - private Long getViewIndexValue(PDataType type, byte[] range, PDataType viewIndexDataType){ - boolean useLongViewIndex = MetaDataUtil.getViewIndexIdDataType().equals(viewIndexDataType); - Object s = type.toObject(range); + private Long getViewIndexValue(PDataType type, byte[] range) { + boolean useLongViewIndex = MetaDataUtil.getViewIndexIdDataType().equals(type); + Object s = type.toObject(range); return (useLongViewIndex ? (Long) s : (Short) s) - (useLongViewIndex ? Long.MAX_VALUE : Short.MAX_VALUE); } - + private static class RowKeyValueIterator implements Iterator { private final RowKeySchema schema; private ImmutableBytesWritable ptr = new ImmutableBytesWritable(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 605a6359d41..863b4655f72 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -3037,6 +3037,14 @@ protected PhoenixConnection upgradeSystemCatalogIfRequired(PhoenixConnection met addViewIndexToParentLinks(metaConnection); moveChildLinks(metaConnection); } + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0) { + metaConnection = addColumnsIfNotExists( + metaConnection, + PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0, + PhoenixDatabaseMetaData.USE_LONG_VIEW_INDEX + " " + + PBoolean.INSTANCE.getSqlTypeName()); + } return metaConnection; } From 8b270b86d8779369829e65119227c6f194eae20f Mon Sep 17 00:00:00 2001 From: Thomas D'Silva Date: Sat, 28 Jul 2018 11:49:08 -0700 Subject: [PATCH 03/14] PHOENIX-4824 Update BRANCH_NAMES in dev/test-patch.properties --- dev/test-patch.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev/test-patch.properties b/dev/test-patch.properties index ea8d75dda94..b3dc46f7079 100644 --- a/dev/test-patch.properties +++ b/dev/test-patch.properties @@ -27,7 +27,7 @@ MAX_LINE_LENGTH=100 # All supported branches for testing with precommit build # be sure to consider branch name prefixes in the order, ie, 4.x should appear # before 4 since the latter is a prefix -BRANCH_NAMES="4.x-HBase-0.98 4.x-HBase-1.1 4.x-HBase-1.2 master" +BRANCH_NAMES="4.x-HBase-1.2 4.x-HBase-1.3 4.x-HBase-1.4 master" # All supported Hadoop versions that we want to test the compilation with From 1b2457066ba374efff8062406d9271394932c5a2 Mon Sep 17 00:00:00 2001 From: Ankit Singhal Date: Mon, 30 Jul 2018 13:51:43 -0700 Subject: [PATCH 04/14] PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64 --- .../apache/phoenix/end2end/QueryMoreIT.java | 7 +++-- .../phoenix/mapreduce/CsvBulkImportUtil.java | 8 +++-- .../util/PhoenixConfigurationUtil.java | 7 +++-- .../phoenix/schema/types/PVarbinary.java | 4 +-- .../phoenix/util/csv/CsvUpsertExecutor.java | 4 +-- .../phoenix/util/json/JsonUpsertExecutor.java | 4 +-- .../util/AbstractUpsertExecutorTest.java | 12 ++++---- .../util/TenantIdByteConversionTest.java | 30 +++++++++++++++---- 8 files changed, 50 insertions(+), 26 deletions(-) diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java index 04272fa5df5..528fe7f85c7 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java @@ -31,12 +31,13 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; -import org.apache.hadoop.hbase.util.Base64; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.QueryServices; @@ -278,7 +279,7 @@ private String[] getRecordsOutofCursorTable(String tableOrViewName, boolean quer values[i] = rs.getObject(i + 1); } conn = getTenantSpecificConnection(tenantId); - pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, tableOrViewName.toUpperCase(), values, columns))); + pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn, tableOrViewName.toUpperCase(), values, columns)))); } return pkIds.toArray(new String[pkIds.size()]); } @@ -296,7 +297,7 @@ private List doQueryMore(boolean queryAgainstTenantView, String tenantId PreparedStatement stmt = conn.prepareStatement(query); int bindCounter = 1; for (int i = 0; i < cursorIds.length; i++) { - Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns); + Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns); for (int j = 0; j < pkParts.length; j++) { stmt.setObject(bindCounter++, pkParts[j]); } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java index ff9ff727116..bf5a538cc5d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java @@ -17,9 +17,11 @@ */ package org.apache.phoenix.mapreduce; +import java.util.Base64; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.util.Base64; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; @@ -68,7 +70,7 @@ public static void configurePreUpsertProcessor(Configuration conf, @VisibleForTesting static void setChar(Configuration conf, String confKey, char charValue) { - conf.set(confKey, Base64.encodeBytes(Character.toString(charValue).getBytes())); + conf.set(confKey, Bytes.toString(Base64.getEncoder().encode(Character.toString(charValue).getBytes()))); } @VisibleForTesting @@ -77,7 +79,7 @@ static Character getCharacter(Configuration conf, String confKey) { if (strValue == null) { return null; } - return new String(Base64.decode(strValue)).charAt(0); + return new String(Base64.getDecoder().decode(strValue)).charAt(0); } public static Path getOutputPath(Path outputdir, String tableName) { diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java index f3f0415edd4..3b63f668c0a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; +import java.util.Base64; import java.util.List; import java.util.Map; import java.util.Properties; @@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; -import org.apache.hadoop.hbase.util.Base64; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable; import org.apache.hadoop.mapreduce.lib.db.DBWritable; @@ -506,14 +507,14 @@ public static ImportPreUpsertKeyValueProcessor loadPreUpsertProcessor(Configurat public static byte[] getIndexMaintainers(final Configuration configuration){ Preconditions.checkNotNull(configuration); - return Base64.decode(configuration.get(INDEX_MAINTAINERS)); + return Base64.getDecoder().decode(configuration.get(INDEX_MAINTAINERS)); } public static void setIndexMaintainers(final Configuration configuration, final ImmutableBytesWritable indexMetaDataPtr) { Preconditions.checkNotNull(configuration); Preconditions.checkNotNull(indexMetaDataPtr); - configuration.set(INDEX_MAINTAINERS, Base64.encodeBytes(indexMetaDataPtr.get())); + configuration.set(INDEX_MAINTAINERS,Bytes.toString(Base64.getEncoder().encode(indexMetaDataPtr.get()))); } public static void setDisableIndexes(Configuration configuration, String indexName) { diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java index b3ce57ad4be..e165a9cd086 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java @@ -19,8 +19,8 @@ import java.sql.Types; import java.text.Format; +import java.util.Base64; -import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.util.ByteUtil; @@ -131,7 +131,7 @@ public Object toObject(String value) { if (value == null || value.length() == 0) { return null; } - Object object = Base64.decode(value); + Object object = Base64.getDecoder().decode(value); if (object == null) { throw newIllegalDataException( "Input: [" + value + "] is not base64 encoded"); } return object; diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java index cd40b4415f7..4f98adab644 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java @@ -21,13 +21,13 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Types; +import java.util.Base64; import java.util.List; import java.util.Properties; import javax.annotation.Nullable; import org.apache.commons.csv.CSVRecord; -import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.expression.function.EncodeFormat; import org.apache.phoenix.query.QueryServices; @@ -189,7 +189,7 @@ public Object apply(@Nullable String input) { Object object = null; switch (format) { case BASE64: - object = Base64.decode(input); + object = Base64.getDecoder().decode(input); if (object == null) { throw new IllegalDataException( "Input: [" + input + "] is not base64 encoded"); } break; diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java index ffa797dfa9d..867a4cb06b7 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java @@ -22,13 +22,13 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Types; +import java.util.Base64; import java.util.List; import java.util.Map; import java.util.Properties; import javax.annotation.Nullable; -import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.expression.function.EncodeFormat; import org.apache.phoenix.query.QueryServices; @@ -212,7 +212,7 @@ public Object apply(@Nullable Object input) { Object object = null; switch (format) { case BASE64: - object = Base64.decode(input.toString()); + object = Base64.getDecoder().decode(input.toString()); if (object == null) { throw new IllegalDataException( "Input: [" + input + "] is not base64 encoded"); } break; diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java index 2b2544dd531..02bf548077f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java @@ -30,10 +30,10 @@ import java.sql.SQLException; import java.sql.Types; import java.util.Arrays; +import java.util.Base64; import java.util.List; import java.util.Properties; -import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.query.BaseConnectionlessQueryTest; import org.apache.phoenix.query.QueryServices; @@ -81,7 +81,7 @@ public void tearDown() throws SQLException { @Test public void testExecute() throws Exception { byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Base64.encodeBytes(binaryData); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); getUpsertExecutor().execute(createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), true, encodedBinaryData)); @@ -110,7 +110,7 @@ public void testExecute_TooFewFields() throws Exception { @Test public void testExecute_TooManyFields() throws Exception { byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Base64.encodeBytes(binaryData); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); R recordWithTooManyFields = createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), true, encodedBinaryData, "garbage"); getUpsertExecutor().execute(recordWithTooManyFields); @@ -131,7 +131,7 @@ public void testExecute_TooManyFields() throws Exception { @Test public void testExecute_NullField() throws Exception { byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Base64.encodeBytes(binaryData); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); getUpsertExecutor().execute(createRecord(123L, "NameValue", null, Arrays.asList(1, 2, 3), false, encodedBinaryData)); @@ -151,7 +151,7 @@ public void testExecute_NullField() throws Exception { @Test public void testExecute_InvalidType() throws Exception { byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Base64.encodeBytes(binaryData); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); R recordWithInvalidType = createRecord(123L, "NameValue", "ThisIsNotANumber", Arrays.asList(1, 2, 3), true, encodedBinaryData); getUpsertExecutor().execute(recordWithInvalidType); @@ -163,7 +163,7 @@ public void testExecute_InvalidType() throws Exception { @Test public void testExecute_InvalidBoolean() throws Exception { byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Base64.encodeBytes(binaryData); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); R csvRecordWithInvalidType = createRecord("123,NameValue,42,1:2:3,NotABoolean,"+encodedBinaryData); getUpsertExecutor().execute(csvRecordWithInvalidType); diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java index fb70d228796..3ef9230f966 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java @@ -22,20 +22,40 @@ import static org.junit.Assert.fail; import java.sql.SQLException; -import org.apache.hadoop.hbase.util.Base64; +import java.util.Base64; import java.util.Collection; import java.util.List; -import org.apache.phoenix.schema.*; -import org.apache.phoenix.schema.types.*; +import org.apache.phoenix.schema.PDatum; +import org.apache.phoenix.schema.PName; +import org.apache.phoenix.schema.PNameFactory; +import org.apache.phoenix.schema.RowKeySchema; import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder; +import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.types.PBinary; +import org.apache.phoenix.schema.types.PBoolean; +import org.apache.phoenix.schema.types.PChar; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; +import org.apache.phoenix.schema.types.PDouble; +import org.apache.phoenix.schema.types.PFloat; +import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.schema.types.PSmallint; +import org.apache.phoenix.schema.types.PTinyint; +import org.apache.phoenix.schema.types.PUnsignedDouble; +import org.apache.phoenix.schema.types.PUnsignedFloat; +import org.apache.phoenix.schema.types.PUnsignedInt; +import org.apache.phoenix.schema.types.PUnsignedLong; +import org.apache.phoenix.schema.types.PUnsignedSmallint; +import org.apache.phoenix.schema.types.PUnsignedTinyint; +import org.apache.phoenix.schema.types.PVarchar; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; import com.google.common.collect.Lists; -import org.mockito.Mockito; /*Test the getTenantIdBytes method in ScanUtil*/ @RunWith(Parameterized.class) @@ -201,7 +221,7 @@ public static Collection data() { //Binary byte[] bytes = new byte[] {0, 1, 2, 3}; - String byteString = new String( Base64.encodeBytes(bytes) ); + String byteString = new String( Base64.getEncoder().encode(bytes) ); testCases.add(new Object[] { getDataSchema(PBinary.INSTANCE, SortOrder.getDefault()), false, From 0126a934488f935459cb1a4a7ee0948c263b4456 Mon Sep 17 00:00:00 2001 From: Ankit Singhal Date: Mon, 30 Jul 2018 13:52:21 -0700 Subject: [PATCH 05/14] PHOENIX-4826 Changes to support HBase 2.0.1 --- .../covered/data/DelegateComparator.java | 83 +++++++++++++++++++ .../index/covered/data/IndexMemStore.java | 6 +- .../index/covered/data/TestIndexMemStore.java | 6 +- pom.xml | 2 +- 4 files changed, 90 insertions(+), 7 deletions(-) create mode 100644 phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java new file mode 100644 index 00000000000..478d98bf2cc --- /dev/null +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.hbase.index.covered.data; + +import java.util.Comparator; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; + +public class DelegateComparator implements CellComparator { + + private CellComparator delegate; + + public DelegateComparator(CellComparator delegate) { + this.delegate=delegate; + } + + @Override + public int compare(Cell leftCell, Cell rightCell) { + return delegate.compare(leftCell, rightCell); + } + + @Override + public int compareRows(Cell leftCell, Cell rightCell) { + return delegate.compareRows(leftCell, rightCell); + } + + @Override + public int compareRows(Cell cell, byte[] bytes, int offset, int length) { + return delegate.compareRows(cell, bytes, offset, length); + } + + @Override + public int compareWithoutRow(Cell leftCell, Cell rightCell) { + return delegate.compareWithoutRow(leftCell, rightCell); + } + + @Override + public int compareFamilies(Cell leftCell, Cell rightCell) { + return delegate.compareFamilies(leftCell, rightCell); + } + + @Override + public int compareQualifiers(Cell leftCell, Cell rightCell) { + return delegate.compareQualifiers(leftCell, rightCell); + } + + @Override + public int compareTimestamps(Cell leftCell, Cell rightCell) { + return delegate.compareTimestamps(leftCell, rightCell); + } + + @Override + public int compareTimestamps(long leftCellts, long rightCellts) { + return delegate.compareTimestamps(leftCellts, rightCellts); + } + + @Override + public int compare(Cell leftCell, Cell rightCell, boolean ignoreSequenceid) { + return delegate.compare(leftCell, rightCell, ignoreSequenceid); + } + + @Override + public Comparator getSimpleComparator() { + return delegate.getSimpleComparator(); + } + +} diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java index 824749678b8..301d8257c39 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java @@ -79,10 +79,10 @@ public class IndexMemStore implements KeyValueStore { private CellComparator comparator; public IndexMemStore() { - this(new CellComparatorImpl(){ + this(new DelegateComparator(new CellComparatorImpl()){ @Override - public int compare(Cell a, Cell b) { - return super.compare(a, b, true); + public int compare(Cell leftCell, Cell rightCell) { + return super.compare(leftCell, rightCell, true); } }); } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java index 0f5f995a00e..e40cdd7f3c4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java @@ -39,10 +39,10 @@ public class TestIndexMemStore { @Test public void testCorrectOverwritting() throws Exception { - IndexMemStore store = new IndexMemStore(new CellComparatorImpl(){ + IndexMemStore store = new IndexMemStore(new DelegateComparator(new CellComparatorImpl()){ @Override - public int compare(Cell a, Cell b) { - return super.compare(a, b, true); + public int compare(Cell leftCell, Cell rightCell) { + return super.compare(leftCell, rightCell, true); } }); long ts = 10; diff --git a/pom.xml b/pom.xml index 8515d714735..7fe1b3937b3 100644 --- a/pom.xml +++ b/pom.xml @@ -66,7 +66,7 @@ ${project.basedir} - 2.0.0 + 2.0.1 3.0.0 From 38021c78f5531e89f0dce70de0b155fbc8342e6d Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Tue, 31 Jul 2018 15:53:11 -0400 Subject: [PATCH 06/14] PHOENIX-4822 Ensure the provided timezone is used client-side (Jaanai Zhang) --- .../apache/phoenix/end2end/DateTimeIT.java | 77 +++++++++++++++++++ .../phoenix/compile/StatementContext.java | 11 +-- .../phoenix/jdbc/PhoenixConnection.java | 8 +- .../org/apache/phoenix/util/DateUtil.java | 22 +++--- 4 files changed, 101 insertions(+), 17 deletions(-) diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java index c976114a149..cc7c7a788d9 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java @@ -54,12 +54,19 @@ import java.util.Calendar; import java.util.GregorianCalendar; import java.util.Properties; +import java.util.TimeZone; +import org.apache.commons.lang.time.FastDateFormat; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.compile.StatementContext; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDate; +import org.apache.phoenix.schema.types.PTime; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.DateUtil; @@ -1880,4 +1887,74 @@ public void testTimestamp() throws Exception { conn.close(); } } + + @Test + public void testDateFormatTimeZone()throws Exception { + String[] timeZoneIDs = {DateUtil.DEFAULT_TIME_ZONE_ID, "Asia/Yerevan", "Australia/Adelaide", "Asia/Tokyo"}; + for (String timeZoneID : timeZoneIDs) { + testDateFormatTimeZone(timeZoneID); + } + } + + public void testDateFormatTimeZone(String timeZoneId) throws Exception { + Properties props = new Properties(); + props.setProperty("phoenix.query.dateFormatTimeZone", timeZoneId); + Connection conn1 = DriverManager.getConnection(getUrl(), props); + + String tableName = generateUniqueName(); + String ddl = "CREATE TABLE IF NOT EXISTS " + tableName + + " (k1 INTEGER PRIMARY KEY," + + " v_date DATE," + + " v_time TIME," + + " v_timestamp TIMESTAMP)"; + try { + conn1.createStatement().execute(ddl); + + PhoenixConnection pConn = conn1.unwrap(PhoenixConnection.class); + verifyTimeZoneIDWithConn(pConn, PDate.INSTANCE, timeZoneId); + verifyTimeZoneIDWithConn(pConn, PTime.INSTANCE, timeZoneId); + verifyTimeZoneIDWithConn(pConn, PTimestamp.INSTANCE, timeZoneId); + + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId)); + cal.setTime(date); + String dateStr = DateUtil.getDateFormatter(DateUtil.DEFAULT_MS_DATE_FORMAT).format(date); + + String dml = "UPSERT INTO " + tableName + " VALUES (" + + "1," + + "'" + dateStr + "'," + + "'" + dateStr + "'," + + "'" + dateStr + "'" + + ")"; + conn1.createStatement().execute(dml); + conn1.commit(); + + PhoenixStatement stmt = conn1.createStatement().unwrap(PhoenixStatement.class); + ResultSet rs = stmt.executeQuery("SELECT v_date, v_time, v_timestamp FROM " + tableName); + + assertTrue(rs.next()); + assertEquals(rs.getDate(1).toString(), new Date(cal.getTimeInMillis()).toString()); + assertEquals(rs.getTime(2).toString(), new Time(cal.getTimeInMillis()).toString()); + assertEquals(rs.getTimestamp(3).getTime(), cal.getTimeInMillis()); + assertFalse(rs.next()); + + StatementContext stmtContext = stmt.getQueryPlan().getContext(); + verifyTimeZoneIDWithFormatter(stmtContext.getDateFormatter(), timeZoneId); + verifyTimeZoneIDWithFormatter(stmtContext.getTimeFormatter(), timeZoneId); + verifyTimeZoneIDWithFormatter(stmtContext.getTimestampFormatter(), timeZoneId); + + stmt.close(); + } finally { + conn1.close(); + } + } + + private void verifyTimeZoneIDWithConn(PhoenixConnection conn, PDataType dataType, String timeZoneId) { + Format formatter = conn.getFormatter(dataType); + verifyTimeZoneIDWithFormatter(formatter, timeZoneId); + } + + private void verifyTimeZoneIDWithFormatter(Format formatter, String timeZoneId) { + assertTrue(formatter instanceof FastDateFormat); + assertEquals(((FastDateFormat)formatter).getTimeZone().getID(), timeZoneId); + } } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java index fe60bb9aca1..eb195c2bdb2 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java @@ -120,14 +120,15 @@ public StatementContext(PhoenixStatement statement, ColumnResolver resolver, Sca this.expressions = new ExpressionManager(); PhoenixConnection connection = statement.getConnection(); ReadOnlyProps props = connection.getQueryServices().getProps(); + String timeZoneID = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, + DateUtil.DEFAULT_TIME_ZONE_ID); this.dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT); - this.dateFormatter = DateUtil.getDateFormatter(dateFormat); + this.dateFormatter = DateUtil.getDateFormatter(dateFormat, timeZoneID); this.timeFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT); - this.timeFormatter = DateUtil.getTimeFormatter(timeFormat); + this.timeFormatter = DateUtil.getTimeFormatter(timeFormat, timeZoneID); this.timestampFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT); - this.timestampFormatter = DateUtil.getTimestampFormatter(timestampFormat); - this.dateFormatTimeZone = DateUtil.getTimeZone(props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, - DateUtil.DEFAULT_TIME_ZONE_ID)); + this.timestampFormatter = DateUtil.getTimestampFormatter(timestampFormat, timeZoneID); + this.dateFormatTimeZone = DateUtil.getTimeZone(timeZoneID); this.numberFormat = props.get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT); this.tempPtr = new ImmutableBytesWritable(); this.currentTable = resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null; diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java index 3d9b2614b07..6da579f315e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java @@ -327,9 +327,11 @@ public ReadOnlyProps getProps() { int maxSizeBytes = this.services.getProps().getInt( QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); - Format dateFormat = DateUtil.getDateFormatter(datePattern); - Format timeFormat = DateUtil.getDateFormatter(timePattern); - Format timestampFormat = DateUtil.getDateFormatter(timestampPattern); + String timeZoneID = this.services.getProps().get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, + DateUtil.DEFAULT_TIME_ZONE_ID); + Format dateFormat = DateUtil.getDateFormatter(datePattern, timeZoneID); + Format timeFormat = DateUtil.getDateFormatter(timePattern, timeZoneID); + Format timestampFormat = DateUtil.getDateFormatter(timestampPattern, timeZoneID); formatters.put(PDate.INSTANCE, dateFormat); formatters.put(PTime.INSTANCE, timeFormat); formatters.put(PTimestamp.INSTANCE, timestampFormat); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java index 9e37eca085b..f67f152fac5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/DateUtil.java @@ -97,7 +97,7 @@ public static PDataCodec getCodecFor(PDataType type) { public static TimeZone getTimeZone(String timeZoneId) { TimeZone parserTimeZone; - if (timeZoneId == null) { + if (timeZoneId == null || timeZoneId.equals(DateUtil.DEFAULT_TIME_ZONE_ID)) { parserTimeZone = DateUtil.DEFAULT_TIME_ZONE; } else if (LOCAL_TIME_ZONE_ID.equalsIgnoreCase(timeZoneId)) { parserTimeZone = TimeZone.getDefault(); @@ -164,21 +164,25 @@ public static DateTimeParser getDateTimeParser(String pattern, PDataType pDataTy } public static Format getDateFormatter(String pattern) { - return DateUtil.DEFAULT_DATE_FORMAT.equals(pattern) + return getDateFormatter(pattern, DateUtil.DEFAULT_TIME_ZONE_ID); + } + + public static Format getDateFormatter(String pattern, String timeZoneID) { + return DateUtil.DEFAULT_DATE_FORMAT.equals(pattern) && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) ? DateUtil.DEFAULT_DATE_FORMATTER - : FastDateFormat.getInstance(pattern, DateUtil.DEFAULT_TIME_ZONE); + : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); } - public static Format getTimeFormatter(String pattern) { - return DateUtil.DEFAULT_TIME_FORMAT.equals(pattern) + public static Format getTimeFormatter(String pattern, String timeZoneID) { + return DateUtil.DEFAULT_TIME_FORMAT.equals(pattern) && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) ? DateUtil.DEFAULT_TIME_FORMATTER - : FastDateFormat.getInstance(pattern, DateUtil.DEFAULT_TIME_ZONE); + : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); } - public static Format getTimestampFormatter(String pattern) { - return DateUtil.DEFAULT_TIMESTAMP_FORMAT.equals(pattern) + public static Format getTimestampFormatter(String pattern, String timeZoneID) { + return DateUtil.DEFAULT_TIMESTAMP_FORMAT.equals(pattern) && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) ? DateUtil.DEFAULT_TIMESTAMP_FORMATTER - : FastDateFormat.getInstance(pattern, DateUtil.DEFAULT_TIME_ZONE); + : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); } private static long parseDateTime(String dateTimeValue) { From 9846a6c46813715f7363b5d635b92fcd5f500536 Mon Sep 17 00:00:00 2001 From: Jimmy Casey Date: Sun, 29 Jul 2018 21:43:55 +0000 Subject: [PATCH 07/14] PHOENIX-4822 Fixed Spelling. Closes #318 Signed-off-by: Josh Elser --- .../src/main/java/org/apache/phoenix/log/LogWriter.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java index dab03e7c2d5..a7a21809bec 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java @@ -21,7 +21,7 @@ import java.sql.SQLException; /** - * Used by the event handler to write RingBufferEvent, this is done in a seperate thread from the application configured + * Used by the event handler to write RingBufferEvent, this is done in a separate thread from the application configured * during disruptor */ public interface LogWriter { From fe9c079441b52f4dc1ca504a12ea5359e605ea0c Mon Sep 17 00:00:00 2001 From: Gerald Sangudi Date: Thu, 14 Jun 2018 12:49:30 -0700 Subject: [PATCH 08/14] PHOENIX-4751 Implement client-side hash aggregation --- .../end2end/ClientHashAggregateIT.java | 208 +++++++++++++++++ .../phoenix/execute/ClientAggregatePlan.java | 40 +++- .../ClientHashAggregatingResultIterator.java | 210 ++++++++++++++++++ .../org/apache/phoenix/parse/HintNode.java | 5 + 4 files changed, 453 insertions(+), 10 deletions(-) create mode 100644 phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientHashAggregateIT.java create mode 100644 phoenix-core/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientHashAggregateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientHashAggregateIT.java new file mode 100644 index 00000000000..bdc638bf504 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientHashAggregateIT.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end; + +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.Properties; + +import org.apache.phoenix.util.PropertiesUtil; +import org.apache.phoenix.util.QueryUtil; +import org.junit.Test; + +public class ClientHashAggregateIT extends ParallelStatsDisabledIT { + + @Test + public void testSalted() throws Exception { + + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + + try { + String table = createSalted(conn); + testTable(conn, table); + } finally { + conn.close(); + } + } + + @Test + public void testUnsalted() throws Exception { + + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + + try { + String table = createUnsalted(conn); + testTable(conn, table); + } finally { + conn.close(); + } + } + + private void testTable(Connection conn, String table) throws Exception { + verifyExplain(conn, table, false, false); + verifyExplain(conn, table, false, true); + verifyExplain(conn, table, true, false); + verifyExplain(conn, table, true, true); + + verifyResults(conn, table, 13, 0, false, false); + verifyResults(conn, table, 13, 0, false, true); + verifyResults(conn, table, 13, 0, true, false); + verifyResults(conn, table, 13, 0, true, true); + + verifyResults(conn, table, 13, 17, false, true); + verifyResults(conn, table, 13, 17, true, true); + + dropTable(conn, table); + } + + private String createSalted(Connection conn) throws Exception { + + String table = "SALTED_" + generateUniqueName(); + String create = "CREATE TABLE " + table + " (" + + " keyA BIGINT NOT NULL," + + " keyB BIGINT NOT NULL," + + " val SMALLINT," + + " CONSTRAINT pk PRIMARY KEY (keyA, keyB)" + + ") SALT_BUCKETS = 4"; + + conn.createStatement().execute(create); + return table; + } + + private String createUnsalted(Connection conn) throws Exception { + + String table = "UNSALTED_" + generateUniqueName(); + String create = "CREATE TABLE " + table + " (" + + " keyA BIGINT NOT NULL," + + " keyB BIGINT NOT NULL," + + " val SMALLINT," + + " CONSTRAINT pk PRIMARY KEY (keyA, keyB)" + + ")"; + + conn.createStatement().execute(create); + return table; + } + + private String getQuery(String table, boolean hash, boolean swap, boolean sort) { + + String query = "SELECT /*+ USE_SORT_MERGE_JOIN" + + (hash ? " HASH_AGGREGATE" : "") + " */" + + " t1.val v1, t2.val v2, COUNT(*) c" + + " FROM " + table + " t1 JOIN " + table + " t2" + + " ON (t1.keyB = t2.keyB)" + + " WHERE t1.keyA = 10 AND t2.keyA = 20" + + " GROUP BY " + + (swap ? "t2.val, t1.val" : "t1.val, t2.val") + + (sort ? " ORDER BY t1.val, t2.val" : "") + ; + + return query; + } + + private void verifyExplain(Connection conn, String table, boolean swap, boolean sort) throws Exception { + + String query = "EXPLAIN " + getQuery(table, true, swap, sort); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery(query); + String plan = QueryUtil.getExplainPlan(rs); + rs.close(); + assertTrue(plan != null && plan.contains("CLIENT HASH AGGREGATE")); + assertTrue(plan != null && (sort == plan.contains("CLIENT SORTED BY"))); + } + + private void verifyResults(Connection conn, String table, int c1, int c2, boolean swap, boolean sort) throws Exception { + + String upsert = "UPSERT INTO " + table + "(keyA, keyB, val) VALUES(?, ?, ?)"; + PreparedStatement upsertStmt = conn.prepareStatement(upsert); + for (int i = 0; i < c1; i++) { + upsertStmt.setInt(1, 10); + upsertStmt.setInt(2, 100+i); + upsertStmt.setInt(3, 1); + upsertStmt.execute(); + + upsertStmt.setInt(1, 20); + upsertStmt.setInt(2, 100+i); + upsertStmt.setInt(3, 2); + upsertStmt.execute(); + } + for (int i = 0; i < c2; i++) { + upsertStmt.setInt(1, 10); + upsertStmt.setInt(2, 200+i); + upsertStmt.setInt(3, 2); + upsertStmt.execute(); + + upsertStmt.setInt(1, 20); + upsertStmt.setInt(2, 200+i); + upsertStmt.setInt(3, 1); + upsertStmt.execute(); + } + conn.commit(); + + String hashQuery = getQuery(table, true, swap, sort); + String sortQuery = getQuery(table, false, swap, sort); + Statement stmt = conn.createStatement(); + ResultSet hrs = stmt.executeQuery(hashQuery); + ResultSet srs = stmt.executeQuery(sortQuery); + + try { + if (c1 > 0) { + assertTrue(hrs.next()); + assertTrue(srs.next()); + assertEquals(hrs.getInt("v1"), srs.getInt("v1")); + assertEquals(hrs.getInt("v2"), srs.getInt("v2")); + assertEquals(hrs.getInt("c"), srs.getInt("c")); + assertEquals(hrs.getInt("v1"), 1); + assertEquals(hrs.getInt("v2"), 2); + assertEquals(hrs.getInt("c"), c1); + } + if (c2 > 0) { + assertTrue(hrs.next()); + assertTrue(srs.next()); + assertEquals(hrs.getInt("v1"), srs.getInt("v1")); + assertEquals(hrs.getInt("v2"), srs.getInt("v2")); + assertEquals(hrs.getInt("c"), srs.getInt("c")); + assertEquals(hrs.getInt("v1"), 2); + assertEquals(hrs.getInt("v2"), 1); + assertEquals(hrs.getInt("c"), c2); + } + assertFalse(hrs.next()); + assertFalse(srs.next()); + } finally { + hrs.close(); + srs.close(); + } + } + + private void dropTable(Connection conn, String table) throws Exception { + + String drop = "DROP TABLE " + table; + Statement stmt = conn.createStatement(); + stmt.execute(drop); + stmt.close(); + } +} diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java index 2db441a5075..60451a5c264 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java @@ -45,6 +45,7 @@ import org.apache.phoenix.expression.aggregator.ServerAggregators; import org.apache.phoenix.iterate.AggregatingResultIterator; import org.apache.phoenix.iterate.BaseGroupedAggregatingResultIterator; +import org.apache.phoenix.iterate.ClientHashAggregatingResultIterator; import org.apache.phoenix.iterate.DistinctAggregatingResultIterator; import org.apache.phoenix.iterate.FilterAggregatingResultIterator; import org.apache.phoenix.iterate.FilterResultIterator; @@ -61,6 +62,7 @@ import org.apache.phoenix.iterate.UngroupedAggregatingResultIterator; import org.apache.phoenix.optimize.Cost; import org.apache.phoenix.parse.FilterableStatement; +import org.apache.phoenix.parse.HintNode; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.TableRef; @@ -76,6 +78,7 @@ public class ClientAggregatePlan extends ClientProcessingPlan { private final Expression having; private final ServerAggregators serverAggregators; private final ClientAggregators clientAggregators; + private final boolean useHashAgg; public ClientAggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, Expression where, OrderBy orderBy, GroupBy groupBy, Expression having, QueryPlan delegate) { @@ -89,6 +92,10 @@ public ClientAggregatePlan(StatementContext context, FilterableStatement stateme // another one. this.serverAggregators = ServerAggregators.deserialize(context.getScan() .getAttribute(BaseScannerRegionObserver.AGGREGATORS), context.getConnection().getQueryServices().getConfiguration(), null); + + // Extract hash aggregate hint, if any. + HintNode hints = statement.getHint(); + useHashAgg = hints != null && hints.hasHint(HintNode.Hint.HASH_AGGREGATE); } @Override @@ -134,17 +141,25 @@ public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throw aggResultIterator = new ClientUngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators); aggResultIterator = new UngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator), clientAggregators); } else { - if (!groupBy.isOrderPreserving()) { - int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt( - QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES); - List keyExpressions = groupBy.getKeyExpressions(); + List keyExpressions = groupBy.getKeyExpressions(); + if (groupBy.isOrderPreserving()) { + aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, keyExpressions); + } else { + int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt + (QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES); List keyExpressionOrderBy = Lists.newArrayListWithExpectedSize(keyExpressions.size()); for (Expression keyExpression : keyExpressions) { keyExpressionOrderBy.add(new OrderByExpression(keyExpression, false, true)); } - iterator = new OrderedResultIterator(iterator, keyExpressionOrderBy, thresholdBytes, null, null, projector.getEstimatedRowByteSize()); + + if (useHashAgg) { + // Pass in orderBy to apply any sort that has been optimized away + aggResultIterator = new ClientHashAggregatingResultIterator(context, iterator, serverAggregators, keyExpressions, orderBy); + } else { + iterator = new OrderedResultIterator(iterator, keyExpressionOrderBy, thresholdBytes, null, null, projector.getEstimatedRowByteSize()); + aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, keyExpressions); + } } - aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, groupBy.getKeyExpressions()); aggResultIterator = new GroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator), clientAggregators); } @@ -182,13 +197,18 @@ public ExplainPlan getExplainPlan() throws SQLException { if (where != null) { planSteps.add("CLIENT FILTER BY " + where.toString()); } - if (!groupBy.isEmpty()) { - if (!groupBy.isOrderPreserving()) { + if (groupBy.isEmpty()) { + planSteps.add("CLIENT AGGREGATE INTO SINGLE ROW"); + } else if (groupBy.isOrderPreserving()) { + planSteps.add("CLIENT AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + } else if (useHashAgg) { + planSteps.add("CLIENT HASH AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + if (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { planSteps.add("CLIENT SORTED BY " + groupBy.getKeyExpressions().toString()); } - planSteps.add("CLIENT AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); } else { - planSteps.add("CLIENT AGGREGATE INTO SINGLE ROW"); + planSteps.add("CLIENT SORTED BY " + groupBy.getKeyExpressions().toString()); + planSteps.add("CLIENT AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); } if (having != null) { planSteps.add("CLIENT AFTER-AGGREGATION FILTER BY " + having.toString()); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java new file mode 100644 index 00000000000..d4df198ba77 --- /dev/null +++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java @@ -0,0 +1,210 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.iterate; + +import static org.apache.phoenix.query.QueryConstants.AGG_TIMESTAMP; +import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN; +import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN_FAMILY; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.phoenix.compile.StatementContext; +import org.apache.phoenix.compile.OrderByCompiler.OrderBy; +import org.apache.phoenix.expression.Expression; +import org.apache.phoenix.expression.aggregator.Aggregator; +import org.apache.phoenix.expression.aggregator.Aggregators; +import org.apache.phoenix.memory.MemoryManager.MemoryChunk; +import org.apache.phoenix.schema.tuple.MultiKeyValueTuple; +import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.util.PhoenixKeyValueUtil; +import org.apache.phoenix.util.SizedUtil; +import org.apache.phoenix.util.TupleUtil; + +/** + * + * This class implements client-side hash aggregation in memory. + * Issue https://issues.apache.org/jira/browse/PHOENIX-4751. + * + */ +public class ClientHashAggregatingResultIterator + implements AggregatingResultIterator { + + private static final int HASH_AGG_INIT_SIZE = 64*1024; + private static final int CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE = 64*1024; + private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; + private final ResultIterator resultIterator; + private final Aggregators aggregators; + private final List groupByExpressions; + private final OrderBy orderBy; + private final MemoryChunk memoryChunk; + private HashMap hash; + private List keyList; + private Iterator keyIterator; + + public ClientHashAggregatingResultIterator(StatementContext context, ResultIterator resultIterator, + Aggregators aggregators, List groupByExpressions, OrderBy orderBy) { + + Objects.requireNonNull(resultIterator); + Objects.requireNonNull(aggregators); + Objects.requireNonNull(groupByExpressions); + this.resultIterator = resultIterator; + this.aggregators = aggregators; + this.groupByExpressions = groupByExpressions; + this.orderBy = orderBy; + memoryChunk = context.getConnection().getQueryServices().getMemoryManager().allocate(CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); + } + + @Override + public Tuple next() throws SQLException { + if (keyIterator == null) { + hash = populateHash(); + /******** + * + * Perform a post-aggregation sort only when required. There are 3 possible scenarios: + * (1) The query DOES NOT have an ORDER BY -- in this case, we DO NOT perform a sort, and the results will be in random order. + * (2) The query DOES have an ORDER BY, the ORDER BY keys match the GROUP BY keys, and all the ORDER BY keys are ASCENDING + * -- in this case, we DO perform a sort. THE ORDER BY has been optimized away, because the non-hash client aggregation + * generates results in ascending order of the GROUP BY keys. + * (3) The query DOES have an ORDER BY, but the ORDER BY keys do not match the GROUP BY keys, or at least one ORDER BY key is DESCENDING + * -- in this case, we DO NOT perform a sort, because the ORDER BY has not been optimized away and will be performed later by the + * client aggregation code. + * + * Finally, we also handle optimization of reverse sort here. This is currently defensive, because reverse sort is not optimized away. + * + ********/ + if (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { + keyList = sortKeys(); + keyIterator = keyList.iterator(); + } else { + keyIterator = hash.keySet().iterator(); + } + } + + if (!keyIterator.hasNext()) { + return null; + } + + ImmutableBytesWritable key = keyIterator.next(); + Aggregator[] rowAggregators = hash.get(key); + byte[] value = aggregators.toBytes(rowAggregators); + Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(key, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); + return tuple; + } + + @Override + public void close() throws SQLException { + keyIterator = null; + keyList = null; + hash = null; + try { + memoryChunk.close(); + } finally { + resultIterator.close(); + } + } + + @Override + public Aggregator[] aggregate(Tuple result) { + Aggregator[] rowAggregators = aggregators.getAggregators(); + aggregators.reset(rowAggregators); + aggregators.aggregate(rowAggregators, result); + return rowAggregators; + } + + @Override + public void explain(List planSteps) { + resultIterator.explain(planSteps); + } + + @Override + public String toString() { + return "ClientHashAggregatingResultIterator [resultIterator=" + + resultIterator + ", aggregators=" + aggregators + ", groupByExpressions=" + + groupByExpressions + "]"; + } + + // Copied from ClientGroupedAggregatingResultIterator + protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) throws SQLException { + try { + ImmutableBytesWritable key = TupleUtil.getConcatenatedValue(tuple, groupByExpressions); + ptr.set(key.get(), key.getOffset(), key.getLength()); + return ptr; + } catch (IOException e) { + throw new SQLException(e); + } + } + + // Copied from ClientGroupedAggregatingResultIterator + protected Tuple wrapKeyValueAsResult(Cell keyValue) { + return new MultiKeyValueTuple(Collections. singletonList(keyValue)); + } + + private HashMap populateHash() throws SQLException { + + hash = new HashMap(HASH_AGG_INIT_SIZE, 0.75f); + final int aggSize = aggregators.getEstimatedByteSize(); + long keySize = 0; + + for (Tuple result = resultIterator.next(); result != null; result = resultIterator.next()) { + ImmutableBytesWritable key = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); + key = getGroupingKey(result, key); + Aggregator[] rowAggregators = hash.get(key); + if (rowAggregators == null) { + keySize += key.getSize(); + long hashSize = SizedUtil.sizeOfMap(hash.size() + 1, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, aggSize) + keySize; + if (hashSize > memoryChunk.getSize() + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE) { + // This will throw InsufficientMemoryException if necessary + memoryChunk.resize(hashSize + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); + } + + rowAggregators = aggregators.newAggregators(); + hash.put(key, rowAggregators); + } + + aggregators.aggregate(rowAggregators, result); + } + + return hash; + } + + private List sortKeys() { + // This will throw InsufficientMemoryException if necessary + memoryChunk.resize(memoryChunk.getSize() + SizedUtil.sizeOfArrayList(hash.size())); + + keyList = new ArrayList(hash.size()); + keyList.addAll(hash.keySet()); + Comparator comp = new ImmutableBytesWritable.Comparator(); + if (orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { + comp = Collections.reverseOrder(comp); + } + Collections.sort(keyList, comp); + return keyList; + } +} diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java index 39e9b05d4a7..02a44ad7bd7 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java @@ -108,6 +108,11 @@ public enum Hint { * Enforces a forward scan. */ FORWARD_SCAN, + /** + * Prefer a hash aggregate over a sort plus streaming aggregate. + * Issue https://issues.apache.org/jira/browse/PHOENIX-4751. + */ + HASH_AGGREGATE, }; private final Map hints; From d19051733af1e8d47389dd6d49dde4af17aa3296 Mon Sep 17 00:00:00 2001 From: Thomas D'Silva Date: Tue, 24 Jul 2018 10:49:31 -0700 Subject: [PATCH 09/14] PHOENIX-4799 Write cells using checkAndMutate to prevent conflicting changes --- .../end2end/AlterTableWithViewsIT.java | 2 +- .../phoenix/end2end/BasePermissionsIT.java | 65 ++--- .../phoenix/end2end/ChangePermissionsIT.java | 4 +- ...igrateSystemTablesToSystemNamespaceIT.java | 13 +- .../end2end/QueryDatabaseMetaDataIT.java | 4 + .../end2end/TableDDLPermissionsIT.java | 8 + .../end2end/TenantSpecificTablesDDLIT.java | 2 + .../org/apache/phoenix/end2end/UpgradeIT.java | 33 +-- .../org/apache/phoenix/end2end/ViewIT.java | 242 ++++++++++++++++-- .../phoenix/jdbc/PhoenixDatabaseMetaData.java | 2 + .../query/ConnectionQueryServices.java | 14 + .../query/ConnectionQueryServicesImpl.java | 163 ++++++------ .../ConnectionlessQueryServicesImpl.java | 20 ++ .../DelegateConnectionQueryServices.java | 11 + .../apache/phoenix/query/QueryConstants.java | 14 + .../apache/phoenix/schema/MetaDataClient.java | 95 ++++++- 16 files changed, 529 insertions(+), 163 deletions(-) diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java index e39d492d207..e97a40d37a5 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java @@ -530,7 +530,7 @@ public void testAddExistingViewPkColumnToBaseTableWithMultipleViews() throws Exc try { // should fail because there are two view with different pk columns - conn.createStatement().execute("ALTER TABLE " + tableName + " ADD VIEW_COL1 DECIMAL PRIMARY KEY, VIEW_COL2 VARCHAR PRIMARY KEY"); + conn.createStatement().execute("ALTER TABLE " + tableName + " ADD VIEW_COL1 DECIMAL(10,2) PRIMARY KEY, VIEW_COL2 VARCHAR(256) PRIMARY KEY"); fail(); } catch (SQLException e) { diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java index 7698fca8bc5..515de4741eb 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java @@ -16,8 +16,28 @@ */ package org.apache.phoenix.end2end; -import com.google.common.base.Joiner; -import com.google.common.base.Throwables; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.lang.reflect.UndeclaredThrowableException; +import java.security.PrivilegedExceptionAction; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -38,34 +58,13 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.PhoenixRuntime; -import org.apache.phoenix.util.QueryUtil; import org.junit.After; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.io.IOException; -import java.lang.reflect.UndeclaredThrowableException; -import java.security.PrivilegedExceptionAction; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Properties; -import java.util.Set; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import com.google.common.base.Joiner; +import com.google.common.base.Throwables; @RunWith(Parameterized.class) public class BasePermissionsIT extends BaseTest { @@ -75,17 +74,23 @@ public class BasePermissionsIT extends BaseTest { static String SUPERUSER; static HBaseTestingUtility testUtil; - static final Set PHOENIX_SYSTEM_TABLES = new HashSet<>(Arrays.asList( - "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION")); + static final Set PHOENIX_SYSTEM_TABLES = + new HashSet<>(Arrays.asList("SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", + "SYSTEM.FUNCTION", "SYSTEM.MUTEX")); - static final Set PHOENIX_SYSTEM_TABLES_IDENTIFIERS = new HashSet<>(Arrays.asList( - "SYSTEM.\"CATALOG\"", "SYSTEM.\"SEQUENCE\"", "SYSTEM.\"STATS\"", "SYSTEM.\"FUNCTION\"")); + static final Set PHOENIX_SYSTEM_TABLES_IDENTIFIERS = + new HashSet<>(Arrays.asList("SYSTEM.\"CATALOG\"", "SYSTEM.\"SEQUENCE\"", + "SYSTEM.\"STATS\"", "SYSTEM.\"FUNCTION\"", "SYSTEM.\"MUTEX\"")); static final String SYSTEM_SEQUENCE_IDENTIFIER = QueryConstants.SYSTEM_SCHEMA_NAME + "." + "\"" + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE+ "\""; + static final String SYSTEM_MUTEX_IDENTIFIER = + QueryConstants.SYSTEM_SCHEMA_NAME + "." + "\"" + + PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME + "\""; + static final Set PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new HashSet<>(Arrays.asList( - "SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION")); + "SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION", "SYSTEM:MUTEX")); // Create Multiple users so that we can use Hadoop UGI to run tasks as various users // Permissions can be granted or revoke by superusers and admins only diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java index 106438fc463..65f44c04f73 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java @@ -57,7 +57,8 @@ private void grantSystemTableAccess(User superUser, User... users) throws Except } else { verifyAllowed(grantPermissions("RX", user, PHOENIX_SYSTEM_TABLES_IDENTIFIERS, false), superUser); } - verifyAllowed(grantPermissions("W", user, SYSTEM_SEQUENCE_IDENTIFIER, false), superUser); + verifyAllowed(grantPermissions("RWX", user, SYSTEM_SEQUENCE_IDENTIFIER, false), superUser); + verifyAllowed(grantPermissions("RWX", user, SYSTEM_MUTEX_IDENTIFIER, false), superUser); } } @@ -69,6 +70,7 @@ private void revokeSystemTableAccess(User superUser, User... users) throws Excep verifyAllowed(revokePermissions(user, PHOENIX_SYSTEM_TABLES_IDENTIFIERS, false), superUser); } verifyAllowed(revokePermissions(user, SYSTEM_SEQUENCE_IDENTIFIER, false), superUser); + verifyAllowed(revokePermissions(user, SYSTEM_MUTEX_IDENTIFIER, false), superUser); } } diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java index d253f6ebb9a..ffac4d6afbb 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java @@ -54,7 +54,6 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.SchemaUtil; import org.junit.After; -import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -275,16 +274,15 @@ public Void run() throws Exception { private void changeMutexLock(Properties clientProps, boolean acquire) throws SQLException, IOException { ConnectionQueryServices services = null; - byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE); try (Connection conn = DriverManager.getConnection(getJdbcUrl(), clientProps)) { services = conn.unwrap(PhoenixConnection.class).getQueryServices(); if(acquire) { assertTrue(((ConnectionQueryServicesImpl) services) - .acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, mutexRowKey)); + .acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP)); } else { - ((ConnectionQueryServicesImpl) services).releaseUpgradeMutex(mutexRowKey); + services.deleteMutexCell(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null); } } } @@ -397,14 +395,13 @@ private void verifySyscatData(Properties clientProps, String connName, Statement } } - // The set will contain SYSMUTEX table since that table is not exposed in SYSCAT if (systemTablesMapped) { if (!systemSchemaExists) { fail(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + " entry doesn't exist in SYSTEM.CATALOG table."); } - assertTrue(namespaceMappedSystemTablesSet.size() == 1); + assertTrue(namespaceMappedSystemTablesSet.isEmpty()); } else { - assertTrue(systemTablesSet.size() == 1); + assertTrue(systemTablesSet.isEmpty()); } } diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java index 278b7f4f98b..6e446d02279 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java @@ -21,6 +21,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE; import static org.apache.phoenix.util.TestUtil.ATABLE_NAME; import static org.apache.phoenix.util.TestUtil.CUSTOM_ENTITY_DATA_FULL_NAME; @@ -178,6 +179,9 @@ public void testTableMetadataScan() throws SQLException { assertEquals(PTableType.SYSTEM.toString(), rs.getString("TABLE_TYPE")); assertTrue(rs.next()); assertEquals(SYSTEM_CATALOG_SCHEMA, rs.getString("TABLE_SCHEM")); + assertEquals(SYSTEM_MUTEX_TABLE_NAME, rs.getString("TABLE_NAME")); + assertTrue(rs.next()); + assertEquals(SYSTEM_CATALOG_SCHEMA, rs.getString("TABLE_SCHEM")); assertEquals(TYPE_SEQUENCE, rs.getString("TABLE_NAME")); assertEquals(PTableType.SYSTEM.toString(), rs.getString("TABLE_TYPE")); assertTrue(rs.next()); diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java index 07d754de3f3..eb79c079221 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java @@ -58,6 +58,10 @@ private void grantSystemTableAccess() throws Exception{ Action.READ, Action.EXEC); grantPermissions(unprivilegedUser.getName(), Collections.singleton("SYSTEM:SEQUENCE"), Action.WRITE, Action.READ, Action.EXEC); + grantPermissions(regularUser1.getShortName(), Collections.singleton("SYSTEM:MUTEX"), Action.WRITE, + Action.READ, Action.EXEC); + grantPermissions(unprivilegedUser.getShortName(), Collections.singleton("SYSTEM:MUTEX"), Action.WRITE, + Action.READ, Action.EXEC); } else { grantPermissions(regularUser1.getName(), PHOENIX_SYSTEM_TABLES, Action.READ, Action.EXEC); @@ -68,6 +72,10 @@ private void grantSystemTableAccess() throws Exception{ Action.READ, Action.EXEC); grantPermissions(unprivilegedUser.getName(), Collections.singleton("SYSTEM:SEQUENCE"), Action.WRITE, Action.READ, Action.EXEC); + grantPermissions(regularUser1.getShortName(), Collections.singleton("SYSTEM.MUTEX"), Action.WRITE, + Action.READ, Action.EXEC); + grantPermissions(unprivilegedUser.getShortName(), Collections.singleton("SYSTEM.MUTEX"), Action.WRITE, + Action.READ, Action.EXEC); } } catch (Throwable e) { if (e instanceof Exception) { diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java index 6447cb53d2e..0a4ccd6bc73 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java @@ -498,6 +498,8 @@ public void testTableMetadataScan() throws Exception { assertTrue(rs.next()); assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_LOG_TABLE, PTableType.SYSTEM); assertTrue(rs.next()); + assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME, PTableType.SYSTEM); + assertTrue(rs.next()); assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.TYPE_SEQUENCE, PTableType.SYSTEM); assertTrue(rs.next()); assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, PTableType.SYSTEM); diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java index b0786e68044..632a2bb3e66 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java @@ -18,8 +18,6 @@ package org.apache.phoenix.end2end; import static com.google.common.base.Preconditions.checkNotNull; -import static org.apache.phoenix.query.ConnectionQueryServicesImpl.UPGRADE_MUTEX; -import static org.apache.phoenix.query.ConnectionQueryServicesImpl.UPGRADE_MUTEX_UNLOCKED; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -43,8 +41,6 @@ import org.apache.curator.shaded.com.google.common.collect.Sets; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.coprocessor.MetaDataProtocol; @@ -425,38 +421,21 @@ public boolean isUpgradeRequired() { } } - private void putUnlockKVInSysMutex(byte[] row) throws Exception { - try (Connection conn = getConnection(false, null)) { - ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); - try (Table sysMutexTable = services.getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) { - byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; - byte[] qualifier = UPGRADE_MUTEX; - Put put = new Put(row); - put.addColumn(family, qualifier, UPGRADE_MUTEX_UNLOCKED); - sysMutexTable.put(put); - } - } - } - @Test public void testAcquiringAndReleasingUpgradeMutex() throws Exception { ConnectionQueryServices services = null; - byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, - generateUniqueName()); try (Connection conn = getConnection(false, null)) { services = conn.unwrap(PhoenixConnection.class).getQueryServices(); - putUnlockKVInSysMutex(mutexRowKey); assertTrue(((ConnectionQueryServicesImpl)services) - .acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0, mutexRowKey)); + .acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0)); try { ((ConnectionQueryServicesImpl)services) - .acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0, mutexRowKey); + .acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0); fail(); } catch (UpgradeInProgressException expected) { } - assertTrue(((ConnectionQueryServicesImpl)services).releaseUpgradeMutex(mutexRowKey)); - assertFalse(((ConnectionQueryServicesImpl)services).releaseUpgradeMutex(mutexRowKey)); + ((ConnectionQueryServicesImpl)services).releaseUpgradeMutex(); } } @@ -470,7 +449,6 @@ public void testConcurrentUpgradeThrowsUprgadeInProgressException() throws Excep final byte[] mutexKey = Bytes.toBytes(generateUniqueName()); try (Connection conn = getConnection(false, null)) { services = conn.unwrap(PhoenixConnection.class).getQueryServices(); - putUnlockKVInSysMutex(mutexKey); FutureTask task1 = new FutureTask<>(new AcquireMutexRunnable(mutexStatus1, services, latch, numExceptions, mutexKey)); FutureTask task2 = new FutureTask<>(new AcquireMutexRunnable(mutexStatus2, services, latch, numExceptions, mutexKey)); Thread t1 = new Thread(task1); @@ -508,12 +486,15 @@ public AcquireMutexRunnable(AtomicBoolean acquireStatus, ConnectionQueryServices public Void call() throws Exception { try { ((ConnectionQueryServicesImpl)services).acquireUpgradeMutex( - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0, mutexRowKey); + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0); acquireStatus.set(true); } catch (UpgradeInProgressException e) { numExceptions.incrementAndGet(); } finally { latch.countDown(); + if (acquireStatus.get()) { + ((ConnectionQueryServicesImpl)services).releaseUpgradeMutex(); + } } return null; } diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java index 1821e9e5b38..c759694bfd6 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java @@ -20,11 +20,13 @@ import static com.google.common.collect.Lists.newArrayListWithExpectedSize; import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MODIFY_VIEW_PK; import static org.apache.phoenix.exception.SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.apache.phoenix.util.TestUtil.analyzeTable; import static org.apache.phoenix.util.TestUtil.getAllSplits; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -42,7 +44,15 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; import org.apache.curator.shaded.com.google.common.collect.Lists; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -53,8 +63,9 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -67,6 +78,7 @@ import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.ColumnAlreadyExistsException; +import org.apache.phoenix.schema.ConcurrentTableMutationException; import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; @@ -74,6 +86,7 @@ import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.PhoenixRuntime; +import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.SchemaUtil; @@ -93,6 +106,17 @@ public class ViewIT extends SplitSystemCatalogIT { protected String tableDDLOptions; protected boolean transactional; + + private static final String FAILED_VIEWNAME = "FAILED_VIEW"; + private static final byte[] FAILED_ROWKEY_BYTES = + SchemaUtil.getTableKey(null, Bytes.toBytes(SCHEMA2), Bytes.toBytes(FAILED_VIEWNAME)); + private static final String SLOW_VIEWNAME_PREFIX = "SLOW_VIEW"; + private static final byte[] SLOW_ROWKEY_PREFIX_BYTES = + SchemaUtil.getTableKey(null, Bytes.toBytes(SCHEMA2), + Bytes.toBytes(SLOW_VIEWNAME_PREFIX)); + + private static volatile CountDownLatch latch1 = null; + private static volatile CountDownLatch latch2 = null; public ViewIT(boolean transactional) { StringBuilder optionBuilder = new StringBuilder(); @@ -114,7 +138,7 @@ public static void doSetup() throws Exception { Map props = Collections.emptyMap(); boolean splitSystemCatalog = (driver == null); Map serverProps = Maps.newHashMapWithExpectedSize(1); - serverProps.put("hbase.coprocessor.region.classes", FailingRegionObserver.class.getName()); + serverProps.put("hbase.coprocessor.region.classes", TestMetaDataRegionObserver.class.getName()); serverProps.put("hbase.coprocessor.abortonerror", "false"); setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(props.entrySet().iterator())); // Split SYSTEM.CATALOG once after the mini-cluster is started @@ -123,6 +147,54 @@ public static void doSetup() throws Exception { } } + public static class TestMetaDataRegionObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + if (shouldFail(c, miniBatchOp.getOperation(0))) { + // throwing anything other than instances of IOException result + // in this coprocessor being unloaded + // DoNotRetryIOException tells HBase not to retry this mutation + // multiple times + throw new DoNotRetryIOException(); + } else if (shouldSlowDown(c, miniBatchOp.getOperation(0))) { + // simulate a slow write to SYSTEM.CATALOG + if (latch1 != null) { + latch1.countDown(); + } + if (latch2 != null) { + try { + // wait till the second task is complete before completing the first task + boolean result = latch2.await(2, TimeUnit.MINUTES); + if (!result) { + throw new RuntimeException("Second task took took long to complete"); + } + } catch (InterruptedException e) { + } + } + } + } + + private boolean shouldFail(ObserverContext c, Mutation m) { + TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable(); + return tableName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME) + && (Bytes.equals(FAILED_ROWKEY_BYTES, m.getRow())); + } + + private boolean shouldSlowDown(ObserverContext c, + Mutation m) { + TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable(); + byte[] rowKeyPrefix = Arrays.copyOf(m.getRow(), SLOW_ROWKEY_PREFIX_BYTES.length); + return tableName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME) + && (Bytes.equals(SLOW_ROWKEY_PREFIX_BYTES, rowKeyPrefix)); + } + } + @Test public void testReadOnlyOnUpdatableView() throws Exception { String fullTableName = SchemaUtil.getTableName(SCHEMA1, generateUniqueName()); @@ -1278,28 +1350,160 @@ public void testChildViewCreationFails() throws Exception { PhoenixRuntime.getTableNoCache(conn, fullViewName2); } - private static final String FAILED_VIEWNAME = "FAILED_VIEW"; - private static final byte[] ROWKEY_TO_FAIL_BYTES = SchemaUtil.getTableKey(null, Bytes.toBytes(SCHEMA2), - Bytes.toBytes(FAILED_VIEWNAME)); - - public static class FailingRegionObserver extends SimpleRegionObserver { + @Test + public void testConcurrentViewCreationAndTableDrop() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + String fullTableName = SchemaUtil.getTableName(SCHEMA1, generateUniqueName()); + String fullViewName1 = + SchemaUtil.getTableName(SCHEMA2, + SLOW_VIEWNAME_PREFIX + "_" + generateUniqueName()); + String fullViewName2 = SchemaUtil.getTableName(SCHEMA3, generateUniqueName()); + latch1 = new CountDownLatch(1); + latch2 = new CountDownLatch(1); + String tableDdl = + "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + + tableDDLOptions; + conn.createStatement().execute(tableDdl); + + ExecutorService executorService = Executors.newFixedThreadPool(1, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setDaemon(true); + t.setPriority(Thread.MIN_PRIORITY); + return t; + } + }); + + // create the view in a separate thread (which will take some time + // to complete) + Future future = + executorService.submit(new CreateViewRunnable(fullTableName, fullViewName1)); + // wait till the thread makes the rpc to create the view + latch1.await(); + tableDdl = "DROP TABLE " + fullTableName; + try { + // drop table should fail as we are concurrently adding a view + conn.createStatement().execute(tableDdl); + fail("Creating a view while concurrently dropping the base table should fail"); + } catch (ConcurrentTableMutationException e) { + } + latch2.countDown(); + + Exception e = future.get(); + assertNull(e); + + // create another view to ensure that the cell used to prevent + // concurrent modifications was removed + String ddl = + "CREATE VIEW " + fullViewName2 + " (v2 VARCHAR) AS SELECT * FROM " + + fullTableName + " WHERE k = 6"; + conn.createStatement().execute(ddl); + } + } + + @Test + public void testConcurrentAddColumn() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + String fullTableName = SchemaUtil.getTableName(SCHEMA1, generateUniqueName()); + String fullViewName1 = + SchemaUtil.getTableName(SCHEMA2, + SLOW_VIEWNAME_PREFIX + "_" + generateUniqueName()); + String fullViewName2 = SchemaUtil.getTableName(SCHEMA3, generateUniqueName()); + // create base table + String tableDdl = + "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + + tableDDLOptions; + conn.createStatement().execute(tableDdl); + // create a view + String ddl = + "CREATE VIEW " + fullViewName1 + " (v2 VARCHAR) AS SELECT * FROM " + + fullTableName + " WHERE k = 6"; + conn.createStatement().execute(ddl); + + latch1 = new CountDownLatch(1); + latch2 = new CountDownLatch(1); + ExecutorService executorService = Executors.newFixedThreadPool(1, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setDaemon(true); + t.setPriority(Thread.MIN_PRIORITY); + return t; + } + }); + + // add a column to the view in a separate thread (which will take + // some time to complete) + Future future = executorService.submit(new AddColumnRunnable(fullViewName1)); + // wait till the thread makes the rpc to create the view + boolean result = latch1.await(2, TimeUnit.MINUTES); + if (!result) { + fail("The create view rpc look too long"); + } + tableDdl = "ALTER TABLE " + fullTableName + " ADD v3 INTEGER"; + try { + // add the same column to the base table with a different type + conn.createStatement().execute(tableDdl); + fail("Creating a view while concurrently dropping the base table should fail"); + } catch (ConcurrentTableMutationException e) { + } + latch2.countDown(); + + Exception e = future.get(); + assertNull(e); + + // add a the same column to the another view to ensure that the cell used + // to prevent concurrent modifications was removed + ddl = "CREATE VIEW " + fullViewName2 + " (v2 VARCHAR) AS SELECT * FROM " + + fullTableName + " WHERE k = 6"; + conn.createStatement().execute(ddl); + tableDdl = "ALTER VIEW " + fullViewName2 + " ADD v3 INTEGER"; + conn.createStatement().execute(tableDdl); + } + } + + private class CreateViewRunnable implements Callable { + private final String fullTableName; + private final String fullViewName; + + public CreateViewRunnable(String fullTableName, String fullViewName) { + this.fullTableName = fullTableName; + this.fullViewName = fullViewName; + } + @Override - public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { - if (shouldFail(c, miniBatchOp.getOperation(0))) { - // throwing anything other than instances of IOException result - // in this coprocessor being unloaded - // DoNotRetryIOException tells HBase not to retry this mutation - // multiple times - throw new DoNotRetryIOException(); + public Exception call() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + String ddl = + "CREATE VIEW " + fullViewName + " (v2 VARCHAR) AS SELECT * FROM " + + fullTableName + " WHERE k = 5"; + conn.createStatement().execute(ddl); + } catch (SQLException e) { + return e; } + return null; } + } - private boolean shouldFail(ObserverContext c, Mutation m) { - TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable(); - return tableName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME) - && (Bytes.equals(ROWKEY_TO_FAIL_BYTES, m.getRow())); + private class AddColumnRunnable implements Callable { + private final String fullViewName; + + public AddColumnRunnable(String fullViewName) { + this.fullViewName = fullViewName; } + @Override + public Exception call() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + String ddl = "ALTER VIEW " + fullViewName + " ADD v3 CHAR(15)"; + conn.createStatement().execute(ddl); + } catch (SQLException e) { + return e; + } + return null; + } } } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java index 7814a85bb70..945a0e872ab 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java @@ -335,6 +335,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData { public static final String SEQUENCE_TABLE_TYPE = SYSTEM_SEQUENCE_TABLE; public static final String SYNC_INDEX_CREATED_DATE = "SYNC_INDEX_CREATED_DATE"; + public static final String SYSTEM_MUTEX_COLUMN_NAME = "MUTEX_VALUE"; + public static final byte[] SYSTEM_MUTEX_COLUMN_NAME_BYTES = Bytes.toBytes(SYSTEM_MUTEX_COLUMN_NAME); public static final String SYSTEM_MUTEX_TABLE_NAME = "MUTEX"; public static final String SYSTEM_MUTEX_NAME = SchemaUtil.getTableName(QueryConstants.SYSTEM_SCHEMA_NAME, SYSTEM_MUTEX_TABLE_NAME); public static final TableName SYSTEM_MUTEX_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_MUTEX_NAME); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java index 60cf9bfd5ae..5394d052a6e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java @@ -170,4 +170,18 @@ public enum Feature {LOCAL_INDEX, RENEW_LEASE}; public QueryLoggerDisruptor getQueryDisruptor(); public PhoenixTransactionClient initTransactionClient(TransactionFactory.Provider provider); + + /** + * Writes a cell to SYSTEM.MUTEX using checkAndPut to ensure only a single client can execute a + * particular task. The params are used to generate the rowkey. + * @return true if this client was able to successfully acquire the mutex + */ + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException; + + /** + * Deletes a cell that was written to SYSTEM.MUTEX. The params are used to generate the rowkey. + */ + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException; } \ No newline at end of file diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 863b4655f72..318d30cf790 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -65,6 +65,7 @@ import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0; import java.io.IOException; +import java.lang.management.ManagementFactory; import java.lang.ref.WeakReference; import java.sql.PreparedStatement; import java.sql.ResultSetMetaData; @@ -114,6 +115,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -331,9 +333,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement private final AtomicBoolean upgradeRequired = new AtomicBoolean(false); private final int maxConnectionsAllowed; private final boolean shouldThrottleNumConnections; - public static final byte[] UPGRADE_MUTEX = "UPGRADE_MUTEX".getBytes(); - public static final byte[] UPGRADE_MUTEX_LOCKED = "UPGRADE_MUTEX_LOCKED".getBytes(); - public static final byte[] UPGRADE_MUTEX_UNLOCKED = "UPGRADE_MUTEX_UNLOCKED".getBytes(); + public static final byte[] MUTEX_LOCKED = "MUTEX_LOCKED".getBytes(); private static interface FeatureSupported { boolean isSupported(ConnectionQueryServices services); @@ -2535,6 +2535,10 @@ private String setSystemLogDDLProperties(String ddl) { protected String getChildLinkDDL() { return setSystemDDLProperties(QueryConstants.CREATE_CHILD_LINK_METADATA); } + + protected String getMutexDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADTA); + } private String setSystemDDLProperties(String ddl) { return String.format(ddl, @@ -2708,13 +2712,6 @@ void createSysMutexTableIfNotExists(Admin admin) throws IOException, SQLExceptio .setTimeToLive(TTL_FOR_MUTEX).build()) .build(); admin.createTable(tableDesc); - try (Table sysMutexTable = getTable(mutexTableName.getName())) { - byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE); - Put put = new Put(mutexRowKey); - put.addColumn(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED); - sysMutexTable.put(put); - } } catch (IOException e) { if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), AccessDeniedException.class)) || !Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), org.apache.hadoop.hbase.TableNotFoundException.class))) { @@ -2751,13 +2748,10 @@ private void createOtherSystemTables(PhoenixConnection metaConnection, Admin hba try { metaConnection.createStatement().executeUpdate(getChildLinkDDL()); } catch (TableAlreadyExistsException e) {} - // Catch the IOException to log the error message and then bubble it up for the client to retry. try { - createSysMutexTableIfNotExists(hbaseAdmin); - } catch (IOException exception) { - logger.error("Failed to created SYSMUTEX table. Upgrade or migration is not possible without it. Please retry."); - throw exception; - } + metaConnection.createStatement().executeUpdate(getMutexDDL()); + } catch (TableAlreadyExistsException e) {} + // Catch the IOException to log the error message and then bubble it up for the client to retry. } /** @@ -3060,8 +3054,6 @@ public void upgradeSystemTables(final String url, final Properties props) throws String sysCatalogTableName = null; SQLException toThrow = null; boolean acquiredMutexLock = false; - byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE); boolean snapshotCreated = false; try { if (!isUpgradeRequired()) { @@ -3092,7 +3084,7 @@ public void upgradeSystemTables(final String url, final Properties props) throws sysCatalogTableName = SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getNameAsString(); if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, ConnectionQueryServicesImpl.this.getProps())) { // Try acquiring a lock in SYSMUTEX table before migrating the tables since it involves disabling the table. - if (acquiredMutexLock = acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, mutexRowKey)) { + if (acquiredMutexLock = acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP)) { logger.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace " + "and/or upgrading " + sysCatalogTableName); } @@ -3111,7 +3103,7 @@ public void upgradeSystemTables(final String url, final Properties props) throws // Try acquiring a lock in SYSMUTEX table before upgrading SYSCAT. If we cannot acquire the lock, // it means some old client is either migrating SYSTEM tables or trying to upgrade the schema of // SYSCAT table and hence it should not be interrupted - if (acquiredMutexLock = acquireUpgradeMutex(currentServerSideTableTimeStamp, mutexRowKey)) { + if (acquiredMutexLock = acquireUpgradeMutex(currentServerSideTableTimeStamp)) { logger.debug("Acquired lock in SYSMUTEX table for upgrading " + sysCatalogTableName); snapshotName = getSysCatalogSnapshotName(currentServerSideTableTimeStamp); createSnapshot(snapshotName, sysCatalogTableName); @@ -3211,6 +3203,9 @@ public void upgradeSystemTables(final String url, final Properties props) throws try { metaConnection.createStatement().executeUpdate(getChildLinkDDL()); } catch (NewerTableAlreadyExistsException e) {} catch (TableAlreadyExistsException e) {} + try { + metaConnection.createStatement().executeUpdate(getMutexDDL()); + } catch (NewerTableAlreadyExistsException e) {} catch (TableAlreadyExistsException e) {} // In case namespace mapping is enabled and system table to system namespace mapping is also enabled, // create an entry for the SYSTEM namespace in the SYSCAT table, so that GRANT/REVOKE commands can work @@ -3254,7 +3249,7 @@ public void upgradeSystemTables(final String url, final Properties props) throws } finally { if (acquiredMutexLock) { try { - releaseUpgradeMutex(mutexRowKey); + releaseUpgradeMutex(); } catch (IOException e) { logger.warn("Release of upgrade mutex failed ", e); } @@ -3448,17 +3443,10 @@ void ensureSystemTablesMigratedToSystemNamespace() // No tables exist matching "SYSTEM\..*", they are all already in "SYSTEM:.*" if (tableNames.size() == 0) { return; } // Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:" - if (tableNames.size() > 5) { - logger.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames); + if (tableNames.size() > 7) { + logger.warn("Expected 7 system tables but found " + tableNames.size() + ":" + tableNames); } - // Handle the upgrade of SYSMUTEX table separately since it doesn't have any entries in SYSCAT - logger.info("Migrating SYSTEM.MUTEX table to SYSTEM namespace."); - String sysMutexSrcTableName = PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME; - String sysMutexDestTableName = SchemaUtil.getPhysicalName(sysMutexSrcTableName.getBytes(), this.getProps()).getNameAsString(); - UpgradeUtil.mapTableToNamespace(admin, sysMutexSrcTableName, sysMutexDestTableName, PTableType.SYSTEM); - tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME); - byte[] mappedSystemTable = SchemaUtil .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName(); metatable = getTable(mappedSystemTable); @@ -3502,64 +3490,95 @@ void ensureSystemTablesMigratedToSystemNamespace() * @throws SQLException */ @VisibleForTesting - public boolean acquireUpgradeMutex(long currentServerSideTableTimestamp, byte[] rowToLock) throws IOException, + public boolean acquireUpgradeMutex(long currentServerSideTableTimestamp) + throws IOException, SQLException { Preconditions.checkArgument(currentServerSideTableTimestamp < MIN_SYSTEM_TABLE_TIMESTAMP); - byte[] sysMutexPhysicalTableNameBytes = getSysMutexPhysicalTableNameBytes(); if(sysMutexPhysicalTableNameBytes == null) { throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp), getVersion(MIN_SYSTEM_TABLE_TIMESTAMP)); } + if (!writeMutexCell(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null)) { + throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp), + getVersion(MIN_SYSTEM_TABLE_TIMESTAMP)); + } + return true; + } - try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) { - byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; - byte[] qualifier = UPGRADE_MUTEX; - byte[] oldValue = UPGRADE_MUTEX_UNLOCKED; - byte[] newValue = UPGRADE_MUTEX_LOCKED; - Put put = new Put(rowToLock); - put.addColumn(family, qualifier, newValue); - boolean acquired = sysMutexTable.checkAndPut(rowToLock, family, qualifier, oldValue, put); - if (!acquired) { - /* - * Because of TTL on the SYSTEM_MUTEX_FAMILY, it is very much possible that the cell - * has gone away. So we need to retry with an old value of null. Note there is a small - * race condition here that between the two checkAndPut calls, it is possible that another - * request would have set the value back to UPGRADE_MUTEX_UNLOCKED. In that scenario this - * following checkAndPut would still return false even though the lock was available. - */ - acquired = sysMutexTable.checkAndPut(rowToLock, family, qualifier, null, put); - if (!acquired) { - throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp), - getVersion(MIN_SYSTEM_TABLE_TIMESTAMP)); + @Override + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + try { + byte[] rowKey = + columnName != null + ? SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, + familyName) + : SchemaUtil.getTableKey(tenantId, schemaName, tableName); + // at this point the system mutex table should have been created or + // an exception thrown + byte[] sysMutexPhysicalTableNameBytes = getSysMutexPhysicalTableNameBytes(); + try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) { + byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; + byte[] qualifier = PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES; + byte[] value = MUTEX_LOCKED; + Put put = new Put(rowKey); + put.addColumn(family, qualifier, value); + boolean checkAndPut = + sysMutexTable.checkAndPut(rowKey, family, qualifier, null, put); + String processName = ManagementFactory.getRuntimeMXBean().getName(); + String msg = + " tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : " + + tableName + " columnName : " + columnName + " familyName : " + + familyName; + if (!checkAndPut) { + logger.error(processName + " failed to acquire mutex for "+ msg); + } + else { + logger.debug(processName + " acquired mutex for "+ msg); } + return checkAndPut; } - return true; + } catch (IOException e) { + throw ServerUtil.parseServerException(e); } } @VisibleForTesting - public boolean releaseUpgradeMutex(byte[] mutexRowKey) throws IOException, SQLException { - boolean released = false; - - byte[] sysMutexPhysicalTableNameBytes = getSysMutexPhysicalTableNameBytes(); - if(sysMutexPhysicalTableNameBytes == null) { - // We shouldn't never be really in this situation where neither SYSMUTEX or SYS:MUTEX exists - return true; - } + public void releaseUpgradeMutex() throws IOException, SQLException { + deleteMutexCell(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null); + } - try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) { - byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; - byte[] qualifier = UPGRADE_MUTEX; - byte[] expectedValue = UPGRADE_MUTEX_LOCKED; - byte[] newValue = UPGRADE_MUTEX_UNLOCKED; - Put put = new Put(mutexRowKey); - put.addColumn(family, qualifier, newValue); - released = sysMutexTable.checkAndPut(mutexRowKey, family, qualifier, expectedValue, put); - } catch (Exception e) { - logger.warn("Release of upgrade mutex failed", e); + @Override + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + try { + byte[] rowKey = + columnName != null + ? SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, + familyName) + : SchemaUtil.getTableKey(tenantId, schemaName, tableName); + // at this point the system mutex table should have been created or + // an exception thrown + byte[] sysMutexPhysicalTableNameBytes = getSysMutexPhysicalTableNameBytes(); + try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) { + byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; + byte[] qualifier = PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES; + Delete delete = new Delete(rowKey); + delete.addColumn(family, qualifier); + sysMutexTable.delete(delete); + String processName = ManagementFactory.getRuntimeMXBean().getName(); + String msg = + " tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : " + + tableName + " columnName : " + columnName + " familyName : " + + familyName; + logger.debug(processName + " released mutex for "+ msg); + } + } catch (IOException e) { + throw ServerUtil.parseServerException(e); } - return released; } private byte[] getSysMutexPhysicalTableNameBytes() throws IOException, SQLException { diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java index 8ac686f1b32..6623e617a1d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java @@ -181,6 +181,10 @@ private String setSystemLogDDLProperties(String ddl) { protected String getChildLinkDDL() { return setSystemDDLProperties(QueryConstants.CREATE_CHILD_LINK_METADATA); } + + protected String getMutexDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADTA); + } private String setSystemDDLProperties(String ddl) { return String.format(ddl, @@ -382,6 +386,11 @@ public void init(String url, Properties props) throws SQLException { .executeUpdate(getChildLinkDDL()); } catch (NewerTableAlreadyExistsException ignore) { } + try { + metaConnection.createStatement() + .executeUpdate(getMutexDDL()); + } catch (NewerTableAlreadyExistsException ignore) { + } } catch (SQLException e) { sqlE = e; } finally { @@ -733,4 +742,15 @@ public QueryLoggerDisruptor getQueryDisruptor() { public PhoenixTransactionClient initTransactionClient(Provider provider) { return null; // Client is not necessary } + + @Override + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + return true; + } + + @Override + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + } } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java index 1d746b39181..57ff6e87e07 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java @@ -371,4 +371,15 @@ public QueryLoggerDisruptor getQueryDisruptor() { public PhoenixTransactionClient initTransactionClient(Provider provider) { return getDelegate().initTransactionClient(provider); } + + @Override + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + return true; + } + + @Override + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + } } \ No newline at end of file diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java index 2e3ca57e2b8..5df636c6937 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java @@ -356,5 +356,19 @@ public enum JoinType {INNER, LEFT_OUTER} + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + HConstants.VERSIONS + "=%s,\n" + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; + + public static final String CREATE_MUTEX_METADTA = + "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_MUTEX_TABLE_NAME + "\"(\n" + + // Pk columns + TENANT_ID + " VARCHAR NULL," + + TABLE_SCHEM + " VARCHAR NULL," + + TABLE_NAME + " VARCHAR NOT NULL," + + COLUMN_NAME + " VARCHAR NULL," + // null for table row + COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to uniqueness for columns + "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + "," + + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + + HConstants.VERSIONS + "=%s,\n" + + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + + PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; } \ No newline at end of file diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index 3b9348c5a1f..56eed5d86ea 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -1872,6 +1872,24 @@ private static boolean checkAndValidateRowTimestampCol(ColumnDef colDef, Primary } return false; } + + /** + * If we are creating a view we write a cell to the SYSTEM.MUTEX table with the rowkey of the + * parent table to prevent concurrent modifications + */ + private boolean writeCell(String tenantId, String schemaName, String tableName, String columnName) + throws SQLException { + return connection.getQueryServices().writeMutexCell(tenantId, schemaName, tableName, columnName, null); + } + + /** + * Remove the cell that was written to to the SYSTEM.MUTEX table with the rowkey of the + * parent table to prevent concurrent modifications + */ + private void deleteCell(String tenantId, String schemaName, String tableName, String columnName) + throws SQLException { + connection.getQueryServices().deleteMutexCell(tenantId, schemaName, tableName, columnName, null); + } private PTable createTableInternal(CreateTableStatement statement, byte[][] splits, final PTable parent, String viewStatement, ViewType viewType, PDataType viewIndexType, @@ -1882,6 +1900,7 @@ private PTable createTableInternal(CreateTableStatement statement, byte[][] spli final PTableType tableType = statement.getTableType(); boolean wasAutoCommit = connection.getAutoCommit(); connection.rollback(); + boolean acquiredMutex = false; try { connection.setAutoCommit(false); List tableMetaData = Lists.newArrayListWithExpectedSize(statement.getColumnDefs().size() + 3); @@ -1911,6 +1930,21 @@ private PTable createTableInternal(CreateTableStatement statement, byte[][] spli boolean isLocalIndex = indexType == IndexType.LOCAL; QualifierEncodingScheme encodingScheme = NON_ENCODED_QUALIFIERS; ImmutableStorageScheme immutableStorageScheme = ONE_CELL_PER_COLUMN; + + if (tableType == PTableType.VIEW) { + PName physicalName = parent.getPhysicalName(); + String physicalSchemaName = + SchemaUtil.getSchemaNameFromFullName(physicalName.getString()); + String physicalTableName = + SchemaUtil.getTableNameFromFullName(physicalName.getString()); + // acquire the mutex using the global physical table name to + // prevent creating views while concurrently dropping the base + // table + acquiredMutex = writeCell(null, physicalSchemaName, physicalTableName, null); + if (!acquiredMutex) { + throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); + } + } if (parent != null && tableType == PTableType.INDEX) { timestamp = TransactionUtil.getTableTimestamp(connection, transactionProvider != null, transactionProvider); storeNulls = parent.getStoreNulls(); @@ -2832,6 +2866,16 @@ public boolean isViewReferenced() { } } finally { connection.setAutoCommit(wasAutoCommit); + if (acquiredMutex && tableType == PTableType.VIEW) { + PName physicalName = parent.getPhysicalName(); + String physicalSchemaName = + SchemaUtil.getSchemaNameFromFullName(physicalName.getString()); + String physicalTableName = + SchemaUtil.getTableNameFromFullName(physicalName.getString()); + // releasing mutex on the table (required to prevent creating views while concurrently + // dropping the base table) + deleteCell(null, physicalSchemaName, physicalTableName, null); + } } } @@ -2941,9 +2985,11 @@ MutationState dropTable(String schemaName, String tableName, String parentTableN boolean ifExists, boolean cascade, boolean skipAddingParentColumns) throws SQLException { connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); + PName tenantId = connection.getTenantId(); + String tenantIdStr = tenantId == null ? null : tenantId.getString(); + boolean acquiredMutex = false; + String physicalTableName = SchemaUtil.getTableName(schemaName, tableName); try { - PName tenantId = connection.getTenantId(); - String tenantIdStr = tenantId == null ? null : tenantId.getString(); byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName); Long scn = connection.getSCN(); long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; @@ -2956,6 +3002,14 @@ MutationState dropTable(String schemaName, String tableName, String parentTableN Delete linkDelete = new Delete(linkKey, clientTimeStamp); tableMetaData.add(linkDelete); } + if (tableType == PTableType.TABLE) { + // acquire a mutex on the table to prevent creating views while concurrently + // dropping the base table + acquiredMutex = writeCell(null, schemaName, tableName, null); + if (!acquiredMutex) { + throw new ConcurrentTableMutationException(schemaName, schemaName); + } + } MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade, skipAddingParentColumns); MutationCode code = result.getMutationCode(); PTable table = result.getTable(); @@ -3033,6 +3087,11 @@ MutationState dropTable(String schemaName, String tableName, String parentTableN return new MutationState(0, 0, connection); } finally { connection.setAutoCommit(wasAutoCommit); + // releasing mutex on the table (required to prevent creating views while concurrently + // dropping the base table) + if (acquiredMutex && tableType == PTableType.TABLE) { + deleteCell(null, schemaName, tableName, null); + } } } @@ -3251,11 +3310,18 @@ public MutationState addColumn(PTable table, List origColumnDefs, throws SQLException { connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); + List columns = Lists.newArrayListWithExpectedSize(origColumnDefs != null ? origColumnDefs.size() : 0); + PName tenantId = connection.getTenantId(); + String schemaName = table.getSchemaName().getString(); + String tableName = table.getTableName().getString(); + PName physicalName = table.getPhysicalName(); + String physicalSchemaName = + SchemaUtil.getSchemaNameFromFullName(physicalName.getString()); + String physicalTableName = + SchemaUtil.getTableNameFromFullName(physicalName.getString()); + Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); try { connection.setAutoCommit(false); - PName tenantId = connection.getTenantId(); - String schemaName = table.getSchemaName().getString(); - String tableName = table.getTableName().getString(); List columnDefs = null; if (table.isAppendOnlySchema()) { @@ -3335,7 +3401,6 @@ public MutationState addColumn(PTable table, List origColumnDefs, boolean willBeTxnl = metaProperties.getNonTxToTx(); Long timeStamp = TransactionUtil.getTableTimestamp(connection, table.isTransactional() || willBeTxnl, table.isTransactional() ? table.getTransactionProvider() : metaPropertiesEvaluated.getTransactionProvider()); int numPkColumnsAdded = 0; - List columns = Lists.newArrayListWithExpectedSize(numCols); Set colFamiliesForPColumnsToBeAdded = new LinkedHashSet<>(); Set families = new LinkedHashSet<>(); PTable tableForCQCounters = tableType == PTableType.VIEW ? PhoenixRuntime.getTable(connection, table.getPhysicalName().getString()) : table; @@ -3532,6 +3597,18 @@ public MutationState addColumn(PTable table, List origColumnDefs, } } + boolean acquiredMutex = true; + for (PColumn pColumn : columns) { + // acquire the mutex using the global physical table name to + // prevent creating the same column on a table or view with + // a conflicting type etc + acquiredMutex = writeCell(null, physicalSchemaName, physicalTableName, + pColumn.getName().getString()); + if (!acquiredMutex) { + throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); + } + acquiredColumnMutexSet.add(pColumn.getName().getString()); + } MetaDataMutationResult result = connection.getQueryServices().addColumn(tableMetaData, table, properties, colFamiliesForPColumnsToBeAdded, columns); try { MutationCode code = processMutationResult(schemaName, tableName, result); @@ -3602,6 +3679,12 @@ public MutationState addColumn(PTable table, List origColumnDefs, } } finally { connection.setAutoCommit(wasAutoCommit); + if (!acquiredColumnMutexSet.isEmpty()) { + for (String columnName : acquiredColumnMutexSet) { + // release the mutex (used to prevent concurrent conflicting add column changes) + deleteCell(null, physicalSchemaName, physicalTableName, columnName); + } + } } } From 332c9aa07359d812fb89ac48fef34b07b868f5a8 Mon Sep 17 00:00:00 2001 From: Karan Mehta Date: Tue, 7 Aug 2018 15:47:33 -0700 Subject: [PATCH 10/14] PHOENIX-4834 PhoenixMetricsLog interface methods should not depend on specific logger --- .../apache/phoenix/monitoring/PhoenixMetricsIT.java | 9 ++++----- .../phoenix/jdbc/LoggingPhoenixConnection.java | 11 ++++++----- .../phoenix/jdbc/LoggingPhoenixResultSet.java | 7 ++----- .../org/apache/phoenix/jdbc/PhoenixMetricsLog.java | 13 ++++++------- 4 files changed, 18 insertions(+), 22 deletions(-) diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java index f13391fa686..4c5c59259ac 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java @@ -76,7 +76,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.mockito.internal.util.reflection.Whitebox; -import org.slf4j.Logger; import com.google.common.base.Joiner; import com.google.common.collect.Lists; @@ -1046,25 +1045,25 @@ public void testPhoenixMetricsLogged() throws Exception { LoggingPhoenixConnection protectedConn = new LoggingPhoenixConnection(conn, new PhoenixMetricsLog() { @Override - public void logOverAllReadRequestMetrics(Logger logger, + public void logOverAllReadRequestMetrics( Map overAllQueryMetrics) { overAllQueryMetricsMap.putAll(overAllQueryMetrics); } @Override - public void logRequestReadMetrics(Logger logger, + public void logRequestReadMetrics( Map> requestReadMetrics) { requestReadMetricsMap.putAll(requestReadMetrics); } @Override - public void logWriteMetricsfoForMutations(Logger logger, + public void logWriteMetricsfoForMutations( Map> mutationWriteMetrics) { mutationWriteMetricsMap.putAll(mutationWriteMetrics); } @Override - public void logReadMetricInfoForMutationsSinceLastReset(Logger logger, + public void logReadMetricInfoForMutationsSinceLastReset( Map> mutationReadMetrics) { mutationReadMetricsMap.putAll(mutationReadMetrics); } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java index e1b5dee6567..d98da832a5a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java @@ -23,12 +23,9 @@ import java.sql.Statement; import org.apache.phoenix.util.PhoenixRuntime; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class LoggingPhoenixConnection extends DelegateConnection { - private static final Logger logger = LoggerFactory.getLogger(LoggingPhoenixResultSet.class); private PhoenixMetricsLog phoenixMetricsLog; public LoggingPhoenixConnection(Connection conn, @@ -37,6 +34,10 @@ public LoggingPhoenixConnection(Connection conn, this.phoenixMetricsLog = phoenixMetricsLog; } + public PhoenixMetricsLog getPhoenixMetricsLog() { + return phoenixMetricsLog; + } + @Override public Statement createStatement() throws SQLException { return new LoggingPhoenixStatement(super.createStatement(), phoenixMetricsLog); @@ -101,8 +102,8 @@ public PreparedStatement prepareStatement(String sql, String[] columnNames) @Override public void commit() throws SQLException { super.commit(); - phoenixMetricsLog.logWriteMetricsfoForMutations(logger, PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn)); - phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset(logger, PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn)); + phoenixMetricsLog.logWriteMetricsfoForMutations(PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn)); + phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset(PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn)); PhoenixRuntime.resetMetrics(conn); } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java index cc3b567d5ff..fbde499f26e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java @@ -21,12 +21,9 @@ import java.sql.SQLException; import org.apache.phoenix.util.PhoenixRuntime; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class LoggingPhoenixResultSet extends DelegateResultSet { - private static final Logger logger = LoggerFactory.getLogger(LoggingPhoenixResultSet.class); private PhoenixMetricsLog phoenixMetricsLog; public LoggingPhoenixResultSet(ResultSet rs, PhoenixMetricsLog phoenixMetricsLog) { @@ -36,8 +33,8 @@ public LoggingPhoenixResultSet(ResultSet rs, PhoenixMetricsLog phoenixMetricsLog @Override public void close() throws SQLException { - phoenixMetricsLog.logOverAllReadRequestMetrics(logger, PhoenixRuntime.getOverAllReadRequestMetricInfo(rs)); - phoenixMetricsLog.logRequestReadMetrics(logger, PhoenixRuntime.getRequestReadMetricInfo(rs)); + phoenixMetricsLog.logOverAllReadRequestMetrics(PhoenixRuntime.getOverAllReadRequestMetricInfo(rs)); + phoenixMetricsLog.logRequestReadMetrics(PhoenixRuntime.getRequestReadMetricInfo(rs)); PhoenixRuntime.resetMetrics(rs); super.close(); } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java index eac9bb8c494..be08e52e4c4 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java @@ -20,16 +20,15 @@ import java.util.Map; import org.apache.phoenix.monitoring.MetricType; -import org.slf4j.Logger; public interface PhoenixMetricsLog { - void logOverAllReadRequestMetrics(Logger logger, Map overAllQueryMetrics); + void logOverAllReadRequestMetrics(Map overAllQueryMetrics); - void logRequestReadMetrics(Logger logger, Map> requestReadMetrics); - - void logWriteMetricsfoForMutations(Logger logger, Map> mutationWriteMetrics); - - void logReadMetricInfoForMutationsSinceLastReset(Logger logger, Map> mutationReadMetrics); + void logRequestReadMetrics(Map> requestReadMetrics); + + void logWriteMetricsfoForMutations(Map> mutationWriteMetrics); + + void logReadMetricInfoForMutationsSinceLastReset(Map> mutationReadMetrics); } From 9d2175b3211d3a1771a1db541fdf28dcbdcd751e Mon Sep 17 00:00:00 2001 From: Karan Mehta Date: Wed, 8 Aug 2018 13:18:50 -0700 Subject: [PATCH 11/14] PHOENIX-4805 Move Avatica version to 1.12 for PQS --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 7fe1b3937b3..45477a80b9f 100644 --- a/pom.xml +++ b/pom.xml @@ -97,7 +97,7 @@ 1.6 2.1.2 - 1.11.0 + 1.12.0 9.3.19.v20170502 0.14.0-incubating 2.3.0 From e8e604ea4ac4c31b9535eced66cda6a2ffda6741 Mon Sep 17 00:00:00 2001 From: Xu Cang Date: Mon, 6 Aug 2018 23:56:00 -0700 Subject: [PATCH 12/14] PHOENIX-4647 Column header doesn't handle optional arguments correctly --- .../expression/function/SubstrFunction.java | 20 +++++++++++++++++++ .../phoenix/compile/WhereOptimizerTest.java | 12 +++++++++++ .../org/apache/phoenix/util/TestUtil.java | 5 +++++ 3 files changed, 37 insertions(+) diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java index feaa07a187b..0d6d1c9542c 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java @@ -211,5 +211,25 @@ protected boolean extractNode() { public String getName() { return NAME; } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(getName() + "("); + if (children.size()==0) + return buf.append(")").toString(); + if (hasLengthExpression) { + buf.append(getStrExpression()); + buf.append(", "); + buf.append(getOffsetExpression()); + buf.append(", "); + buf.append(getLengthExpression()); + } else { + buf.append(getStrExpression()); + buf.append(", "); + buf.append(getOffsetExpression()); + } + buf.append(")"); + return buf.toString(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java index e5555d6b876..cc6f55ad2f9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java @@ -29,6 +29,7 @@ import static org.apache.phoenix.util.TestUtil.not; import static org.apache.phoenix.util.TestUtil.rowKeyFilter; import static org.apache.phoenix.util.TestUtil.substr; +import static org.apache.phoenix.util.TestUtil.substr2; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -62,6 +63,7 @@ import org.apache.phoenix.compile.WhereOptimizer.KeyExpressionVisitor.SlotsIterator; import org.apache.phoenix.compile.WhereOptimizer.KeyExpressionVisitor.TrailingRangeIterator; import org.apache.phoenix.expression.Expression; +import org.apache.phoenix.expression.function.SubstrFunction; import org.apache.phoenix.filter.BooleanExpressionFilter; import org.apache.phoenix.filter.RowKeyComparisonFilter; import org.apache.phoenix.filter.SingleCQKeyValueComparisonFilter; @@ -589,6 +591,16 @@ public void testOverlappingKeyExpression() throws SQLException { assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); } + @Test + public void testSubstrExpressionWithoutLengthVariable() { + assertEquals("SUBSTR(ENTITY_ID, 1)",((SubstrFunction)substr2(ENTITY_ID,1)).toString()); + } + + @Test + public void testSubstrExpressionWithLengthVariable() { + assertEquals("SUBSTR(ENTITY_ID, 1, 10)",((SubstrFunction)substr(ENTITY_ID,1, 10)).toString()); + } + @Test public void testTrailingSubstrExpression() throws SQLException { String tenantId = "0xD000000000001"; diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java index 788b38b5131..a7183ea2385 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java @@ -354,6 +354,11 @@ public static Expression substr(Expression e, Object offset, Object length) { return new SubstrFunction(Arrays.asList(e, LiteralExpression.newConstant(offset), LiteralExpression.newConstant(length))); } + public static Expression substr2(Expression e, Object offset) { + + return new SubstrFunction(Arrays.asList(e, LiteralExpression.newConstant(offset), LiteralExpression.newConstant(null))); + } + public static Expression columnComparison(CompareOp op, Expression c1, Expression c2) { return new ComparisonExpression(Arrays.asList(c1, c2), op); } From 6ae5cb0c8107235b02677825f94e4aed29d7987c Mon Sep 17 00:00:00 2001 From: Thomas D'Silva Date: Thu, 9 Aug 2018 17:33:09 -0700 Subject: [PATCH 13/14] PHOENIX-4843 InListExpression toString() converts the values in the list to ASC sort order always --- .../org/apache/phoenix/end2end/InListIT.java | 72 +++++++++++++++++++ .../phoenix/expression/InListExpression.java | 11 ++- 2 files changed, 80 insertions(+), 3 deletions(-) diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java index fe88dc8fc03..2820fdd5230 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java @@ -18,6 +18,7 @@ package org.apache.phoenix.end2end; import static java.util.Collections.singletonList; +import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -27,6 +28,7 @@ import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.Statement; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -46,6 +48,8 @@ public class InListIT extends ParallelStatsDisabledIT { + + private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + TENANT_ID_ATTRIB + "=tenant1"; @Test public void testLeadingPKWithTrailingRVC() throws Exception { @@ -481,5 +485,73 @@ private void testWithFixedLengthPK(SortOrder sortOrder) throws Exception { conn.close(); } + + @Test + public void testInListExpressionWithDesc() throws Exception { + String fullTableName = generateUniqueName(); + String fullViewName = generateUniqueName(); + String tenantView = generateUniqueName(); + // create base table and global view using global connection + try (Connection conn = DriverManager.getConnection(getUrl())) { + Statement stmt = conn.createStatement(); + stmt.execute("CREATE TABLE " + fullTableName + "(\n" + + " TENANT_ID CHAR(15) NOT NULL,\n" + + " KEY_PREFIX CHAR(3) NOT NULL,\n" + + " CREATED_DATE DATE,\n" + + " CREATED_BY CHAR(15),\n" + + " SYSTEM_MODSTAMP DATE\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + + " TENANT_ID," + + " KEY_PREFIX" + + ")) MULTI_TENANT=TRUE"); + + stmt.execute("CREATE VIEW " + fullViewName + "(\n" + + " MODEL VARCHAR NOT NULL,\n" + + " MILEAGE BIGINT NOT NULL,\n" + + " MILES_DRIVEN BIGINT NOT NULL,\n" + + " MAKE VARCHAR,\n" + + " CONSTRAINT PKVIEW PRIMARY KEY\n" + + " (\n" + + " MODEL, MILEAGE DESC, MILES_DRIVEN\n" + + ")) AS SELECT * FROM " + fullTableName + " WHERE KEY_PREFIX = '0CY'"); + + } + + // create and use a tenant specific view to write data + try (Connection viewConn = DriverManager.getConnection(TENANT_SPECIFIC_URL1) ) { + Statement stmt = viewConn.createStatement(); + stmt.execute("CREATE VIEW IF NOT EXISTS " + tenantView + " AS SELECT * FROM " + fullViewName ); + viewConn.createStatement().execute("UPSERT INTO " + tenantView + "(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, MAKE) VALUES ('005xx000001Sv6o', 1532458254819, 1532458254819, 'a5', 23, 10000, 'AUDI')"); + viewConn.createStatement().execute("UPSERT INTO " + tenantView + "(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, MAKE) VALUES ('005xx000001Sv6o', 1532458254819, 1532458254819, 'a4', 27, 30000, 'AUDI')"); + viewConn.createStatement().execute("UPSERT INTO " + tenantView + "(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, MAKE) VALUES ('005xx000001Sv6o', 1532458254819, 1532458254819, '328i', 32, 40000, 'BMW')"); + viewConn.commit(); + + ResultSet rs = stmt.executeQuery("SELECT Make, Model FROM " + tenantView + " WHERE MILEAGE IN (32, 27)"); + assertTrue(rs.next()); + assertEquals("BMW", rs.getString(1)); + assertEquals("328i", rs.getString(2)); + assertTrue(rs.next()); + assertEquals("AUDI", rs.getString(1)); + assertEquals("a4", rs.getString(2)); + assertFalse(rs.next()); + + rs = stmt.executeQuery("SELECT Make, Model FROM " + tenantView + " WHERE MILES_DRIVEN IN (30000, 40000)"); + assertTrue(rs.next()); + assertEquals("BMW", rs.getString(1)); + assertEquals("328i", rs.getString(2)); + assertTrue(rs.next()); + assertEquals("AUDI", rs.getString(1)); + assertEquals("a4", rs.getString(2)); + assertFalse(rs.next()); + + viewConn.createStatement().execute("DELETE FROM " + tenantView + " WHERE MILEAGE IN (27, 32)"); + viewConn.commit(); + rs = stmt.executeQuery("SELECT Make, Model FROM " + tenantView); + assertTrue(rs.next()); + assertEquals("AUDI", rs.getString(1)); + assertEquals("a5", rs.getString(2)); + assertFalse(rs.next()); + } + } } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java index a977f1fd509..61fa05c19c1 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java @@ -273,10 +273,15 @@ public String toString() { PDataType type = firstChild.getDataType(); StringBuilder buf = new StringBuilder(firstChild + " IN ("); for (ImmutableBytesPtr value : values) { - if (firstChild.getSortOrder() != null) { - type.coerceBytes(value, type, firstChild.getSortOrder(), SortOrder.getDefault()); + ImmutableBytesWritable currValue = value; + if (firstChild.getSortOrder() != null && !firstChild.getSortOrder().equals(SortOrder.getDefault())) { + // if we have to invert the bytes create a new ImmutableBytesWritable so that the + // original value is not changed + currValue = new ImmutableBytesWritable(value); + type.coerceBytes(currValue, type, firstChild.getSortOrder(), + SortOrder.getDefault()); } - buf.append(type.toStringLiteral(value, null)); + buf.append(type.toStringLiteral(currValue, null)); buf.append(','); if (buf.length() >= maxToStringLen) { buf.append("... "); From c98097cc692f3d82827427e4c96687a71e8ded17 Mon Sep 17 00:00:00 2001 From: Mahdi Salarkia Date: Fri, 27 Jul 2018 22:27:46 -0700 Subject: [PATCH 14/14] PHOENIX-3547 Supporting more number of indices per table. Currently the number of indices per Phoenix table is bound to maximum of 65535 (java.lang.Short) which is a limitation for applications requiring to have unlimited number of indices. This change will consider any new table created in Phoenix to support view index ids to be in the range of -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807 (java.lang.Long) which is undoubtably big enough to cover this requirement. Any existing Phoenix table will still continue to support only maximum of 65535 of indices. A new int column (VIEW_INDEX_ID_DATA_TYPE TINYINT) is added to SYSTEM.CATALOG to specify each Phoenix table's vewIndex data type. On each new Phoenix table creation the value for VIEW_INDEX_ID_DATA_TYPE will be set to `Long` while this value would be `Short` for any existing table. --- .../coprocessor/MetaDataEndpointImpl.java | 47 +-- .../phoenix/coprocessor/MetaDataProtocol.java | 23 +- .../coprocessor/generated/MetaDataProtos.java | 356 +++++++++--------- .../coprocessor/generated/PTableProtos.java | 99 +++-- .../generated/ServerCachingProtos.java | 120 +++--- .../apache/phoenix/index/IndexMaintainer.java | 7 +- .../phoenix/jdbc/PhoenixDatabaseMetaData.java | 4 +- .../query/ConnectionQueryServicesImpl.java | 4 +- .../apache/phoenix/query/QueryConstants.java | 4 +- .../apache/phoenix/schema/MetaDataClient.java | 8 +- .../org/apache/phoenix/schema/PTableImpl.java | 11 +- .../org/apache/phoenix/util/MetaDataUtil.java | 1 + .../src/main/MetaDataService.proto | 4 +- phoenix-protocol/src/main/PTable.proto | 2 +- .../src/main/ServerCachingService.proto | 2 +- 15 files changed, 344 insertions(+), 348 deletions(-) diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index 8b0409a8c7f..b96cd2b8071 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -72,9 +72,10 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_LONG_VIEW_INDEX_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES; import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT; import static org.apache.phoenix.schema.PTableType.INDEX; import static org.apache.phoenix.schema.PTableType.TABLE; @@ -321,7 +322,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr /** * A designator for choosing the right type for viewIndex (Short vs Long) to be backward compatible. * **/ - private static final KeyValue USE_LONG_VIEW_INDEX_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_LONG_VIEW_INDEX_BYTES); + private static final KeyValue VIEW_INDEX_ID_DATA_TYPE_BYTES_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_DATA_TYPE_BYTES); private static final KeyValue INDEX_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES); private static final KeyValue INDEX_DISABLE_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES); private static final KeyValue STORE_NULLS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES); @@ -333,11 +334,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr private static final KeyValue UPDATE_CACHE_FREQUENCY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES); private static final KeyValue IS_NAMESPACE_MAPPED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES); - private static final KeyValue AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES); - private static final KeyValue APPEND_ONLY_SCHEMA_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES); - private static final KeyValue STORAGE_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES); - private static final KeyValue ENCODING_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES); - private static final KeyValue USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES); + private static final Cell AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES); + private static final Cell APPEND_ONLY_SCHEMA_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES); + private static final Cell STORAGE_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES); + private static final Cell ENCODING_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES); + private static final Cell USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES); private static final List TABLE_KV_COLUMNS = Arrays.asList( EMPTY_KEYVALUE_KV, @@ -355,7 +356,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr MULTI_TENANT_KV, VIEW_TYPE_KV, VIEW_INDEX_ID_KV, - USE_LONG_VIEW_INDEX_ID_KV, + VIEW_INDEX_ID_DATA_TYPE_BYTES_KV, INDEX_TYPE_KV, INDEX_DISABLE_TIMESTAMP_KV, STORE_NULLS_KV, @@ -388,7 +389,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr private static final int DISABLE_WAL_INDEX = TABLE_KV_COLUMNS.indexOf(DISABLE_WAL_KV); private static final int MULTI_TENANT_INDEX = TABLE_KV_COLUMNS.indexOf(MULTI_TENANT_KV); private static final int VIEW_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_TYPE_KV); - private static final int USE_LONG_VIEW_INDEX = TABLE_KV_COLUMNS.indexOf(USE_LONG_VIEW_INDEX_ID_KV); + private static final int VIEW_INDEX_ID_DATA_TYPE = TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_DATA_TYPE_BYTES_KV); private static final int VIEW_INDEX_ID_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_KV); private static final int INDEX_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_TYPE_KV); private static final int STORE_NULLS_INDEX = TABLE_KV_COLUMNS.indexOf(STORE_NULLS_KV); @@ -1449,31 +1450,23 @@ private Long getViewIndexId(Cell[] tableKeyValues, PDataType viewIndexType) { } /** - * check the value for {@value USE_LONG_VIEW_INDEX} and if its present consider viewIndexId as long otherwise - * read as short and convert it to long + * Returns viewIndexId based on its underlying data type * * @param tableKeyValues * @param viewIndexType * @return */ - private Long decodeViewIndexId(Cell viewIndexIdKv, PDataType viewIndexType) { - boolean useLongViewIndex = MetaDataUtil.getViewIndexIdDataType().equals(viewIndexType); - return new Long( - useLongViewIndex - ? viewIndexType.getCodec().decodeLong(viewIndexIdKv.getValueArray(), - viewIndexIdKv.getValueOffset(), SortOrder.getDefault()) - : MetaDataUtil.getLegacyViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(), - viewIndexIdKv.getValueOffset(), SortOrder.getDefault()) - ); + private Long decodeViewIndexId(Cell viewIndexIdKv, PDataType viewIndexType) { + return viewIndexType.getCodec().decodeLong(viewIndexIdKv.getValueArray(), + viewIndexIdKv.getValueOffset(), SortOrder.getDefault()); } private PDataType getViewIndexType(Cell[] tableKeyValues) { - Cell useLongViewIndexKv = tableKeyValues[USE_LONG_VIEW_INDEX]; - boolean useLongViewIndex = useLongViewIndexKv != null; - return useLongViewIndex ? - MetaDataUtil.getViewIndexIdDataType() - : MetaDataUtil.getLegacyViewIndexIdDataType(); + Cell dataTypeKv = tableKeyValues[VIEW_INDEX_ID_DATA_TYPE]; + return PDataType.fromTypeId(PInteger.INSTANCE.getCodec() + .decodeInt(dataTypeKv.getValueArray(), dataTypeKv.getValueOffset(), SortOrder.getDefault())); } + private boolean isQualifierCounterKV(Cell kv) { int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), @@ -2316,8 +2309,8 @@ public void createTable(RpcController controller, CreateTableRequest request, long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); if (indexId != null) { - builder.setViewIndexId(indexId); - builder.setUseLongViewIndexId(true); + builder.setViewIndexId(indexId); + builder.setViewIndexType(PLong.INSTANCE.getSqlType()); } builder.setMutationTime(currentTimeStamp); done.run(builder.build()); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java index 611c466b328..8861a33ffd7 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java @@ -210,8 +210,8 @@ public PName apply(ByteString physicalName) { } }); this.viewIndexId = sharedTable.getViewIndexId(); - this.viewIndexType = sharedTable.hasUseLongViewIndexId() - ? MetaDataUtil.getViewIndexIdDataType() + this.viewIndexType = sharedTable.hasViewIndexType() + ? PDataType.fromTypeId(sharedTable.getViewIndexType()) : MetaDataUtil.getLegacyViewIndexIdDataType(); } @@ -409,11 +409,12 @@ public static MetaDataMutationResult constructFromProto(MetaDataResponse proto) result.autoPartitionNum = proto.getAutoPartitionNum(); } if (proto.hasViewIndexId()) { - result.viewIndexId = proto.getViewIndexId(); + result.viewIndexId = proto.getViewIndexId(); } - result.viewIndexType = proto.hasUseLongViewIndexId() - ? MetaDataUtil.getViewIndexIdDataType() - : MetaDataUtil.getLegacyViewIndexIdDataType(); + + result.viewIndexType = proto.hasViewIndexType() + ? PDataType.fromTypeId(proto.getViewIndexType()) + : MetaDataUtil.getLegacyViewIndexIdDataType(); return result; } @@ -454,7 +455,7 @@ public static MetaDataResponse toProto(MetaDataMutationResult result) { sharedTableStateBuilder.setSchemaName(ByteStringer.wrap(sharedTableState.getSchemaName().getBytes())); sharedTableStateBuilder.setTableName(ByteStringer.wrap(sharedTableState.getTableName().getBytes())); sharedTableStateBuilder.setViewIndexId(sharedTableState.getViewIndexId()); - sharedTableStateBuilder.setUseLongViewIndexId(MetaDataUtil.getViewIndexIdDataType().equals(sharedTableState.viewIndexType)); + sharedTableStateBuilder.setViewIndexType(sharedTableState.viewIndexType.getSqlType()); builder.addSharedTablesToDelete(sharedTableStateBuilder.build()); } } @@ -462,10 +463,10 @@ public static MetaDataResponse toProto(MetaDataMutationResult result) { builder.setSchema(PSchema.toProto(result.schema)); } builder.setAutoPartitionNum(result.getAutoPartitionNum()); - if (result.getViewIndexId() != null) { - builder.setViewIndexId(result.getViewIndexId()); - builder.setUseLongViewIndexId(MetaDataUtil.getViewIndexIdDataType().equals(result.getViewIndexType())); - } + if (result.getViewIndexId() != null) { + builder.setViewIndexId(result.getViewIndexId()); + builder.setViewIndexType(result.getViewIndexType().getSqlType()); + } } return builder.build(); } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java index fd54ce8c37e..360dd771874 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java @@ -379,15 +379,15 @@ org.apache.phoenix.coprocessor.generated.PTableProtos.PColumnOrBuilder getColumn */ long getViewIndexId(); - // optional bool useLongViewIndexId = 7; + // optional int32 viewIndexType = 7 [default = 5]; /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - boolean hasUseLongViewIndexId(); + boolean hasViewIndexType(); /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - boolean getUseLongViewIndexId(); + int getViewIndexType(); } /** * Protobuf type {@code SharedTableState} @@ -478,7 +478,7 @@ private SharedTableState( } case 56: { bitField0_ |= 0x00000010; - useLongViewIndexId_ = input.readBool(); + viewIndexType_ = input.readInt32(); break; } } @@ -650,20 +650,20 @@ public long getViewIndexId() { return viewIndexId_; } - // optional bool useLongViewIndexId = 7; - public static final int USELONGVIEWINDEXID_FIELD_NUMBER = 7; - private boolean useLongViewIndexId_; + // optional int32 viewIndexType = 7 [default = 5]; + public static final int VIEWINDEXTYPE_FIELD_NUMBER = 7; + private int viewIndexType_; /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - public boolean hasUseLongViewIndexId() { + public boolean hasViewIndexType() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - public boolean getUseLongViewIndexId() { - return useLongViewIndexId_; + public int getViewIndexType() { + return viewIndexType_; } private void initFields() { @@ -673,7 +673,7 @@ private void initFields() { columns_ = java.util.Collections.emptyList(); physicalNames_ = java.util.Collections.emptyList(); viewIndexId_ = 0L; - useLongViewIndexId_ = false; + viewIndexType_ = 5; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -724,7 +724,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeInt64(6, viewIndexId_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(7, useLongViewIndexId_); + output.writeInt32(7, viewIndexType_); } getUnknownFields().writeTo(output); } @@ -766,7 +766,7 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(7, useLongViewIndexId_); + .computeInt32Size(7, viewIndexType_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -815,10 +815,10 @@ public boolean equals(final java.lang.Object obj) { result = result && (getViewIndexId() == other.getViewIndexId()); } - result = result && (hasUseLongViewIndexId() == other.hasUseLongViewIndexId()); - if (hasUseLongViewIndexId()) { - result = result && (getUseLongViewIndexId() - == other.getUseLongViewIndexId()); + result = result && (hasViewIndexType() == other.hasViewIndexType()); + if (hasViewIndexType()) { + result = result && (getViewIndexType() + == other.getViewIndexType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -857,9 +857,9 @@ public int hashCode() { hash = (37 * hash) + VIEWINDEXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getViewIndexId()); } - if (hasUseLongViewIndexId()) { - hash = (37 * hash) + USELONGVIEWINDEXID_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getUseLongViewIndexId()); + if (hasViewIndexType()) { + hash = (37 * hash) + VIEWINDEXTYPE_FIELD_NUMBER; + hash = (53 * hash) + getViewIndexType(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; @@ -987,7 +987,7 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000010); viewIndexId_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); - useLongViewIndexId_ = false; + viewIndexType_ = 5; bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -1050,7 +1050,7 @@ public org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000010; } - result.useLongViewIndexId_ = useLongViewIndexId_; + result.viewIndexType_ = viewIndexType_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1115,8 +1115,8 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos if (other.hasViewIndexId()) { setViewIndexId(other.getViewIndexId()); } - if (other.hasUseLongViewIndexId()) { - setUseLongViewIndexId(other.getUseLongViewIndexId()); + if (other.hasViewIndexType()) { + setViewIndexType(other.getViewIndexType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -1616,35 +1616,35 @@ public Builder clearViewIndexId() { return this; } - // optional bool useLongViewIndexId = 7; - private boolean useLongViewIndexId_ ; + // optional int32 viewIndexType = 7 [default = 5]; + private int viewIndexType_ = 5; /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - public boolean hasUseLongViewIndexId() { + public boolean hasViewIndexType() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - public boolean getUseLongViewIndexId() { - return useLongViewIndexId_; + public int getViewIndexType() { + return viewIndexType_; } /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - public Builder setUseLongViewIndexId(boolean value) { + public Builder setViewIndexType(int value) { bitField0_ |= 0x00000040; - useLongViewIndexId_ = value; + viewIndexType_ = value; onChanged(); return this; } /** - * optional bool useLongViewIndexId = 7; + * optional int32 viewIndexType = 7 [default = 5]; */ - public Builder clearUseLongViewIndexId() { + public Builder clearViewIndexType() { bitField0_ = (bitField0_ & ~0x00000040); - useLongViewIndexId_ = false; + viewIndexType_ = 5; onChanged(); return this; } @@ -1825,15 +1825,15 @@ org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableStateOrBuilde */ long getViewIndexId(); - // optional bool useLongViewIndexId = 13; + // optional int32 viewIndexType = 13 [default = 5]; /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - boolean hasUseLongViewIndexId(); + boolean hasViewIndexType(); /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - boolean getUseLongViewIndexId(); + int getViewIndexType(); } /** * Protobuf type {@code MetaDataResponse} @@ -1979,7 +1979,7 @@ private MetaDataResponse( } case 104: { bitField0_ |= 0x00000200; - useLongViewIndexId_ = input.readBool(); + viewIndexType_ = input.readInt32(); break; } } @@ -2282,20 +2282,20 @@ public long getViewIndexId() { return viewIndexId_; } - // optional bool useLongViewIndexId = 13; - public static final int USELONGVIEWINDEXID_FIELD_NUMBER = 13; - private boolean useLongViewIndexId_; + // optional int32 viewIndexType = 13 [default = 5]; + public static final int VIEWINDEXTYPE_FIELD_NUMBER = 13; + private int viewIndexType_; /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - public boolean hasUseLongViewIndexId() { + public boolean hasViewIndexType() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - public boolean getUseLongViewIndexId() { - return useLongViewIndexId_; + public int getViewIndexType() { + return viewIndexType_; } private void initFields() { @@ -2311,7 +2311,7 @@ private void initFields() { schema_ = org.apache.phoenix.coprocessor.generated.PSchemaProtos.PSchema.getDefaultInstance(); autoPartitionNum_ = 0L; viewIndexId_ = 0L; - useLongViewIndexId_ = false; + viewIndexType_ = 5; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2386,7 +2386,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeInt64(12, viewIndexId_); } if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeBool(13, useLongViewIndexId_); + output.writeInt32(13, viewIndexType_); } getUnknownFields().writeTo(output); } @@ -2452,7 +2452,7 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(13, useLongViewIndexId_); + .computeInt32Size(13, viewIndexType_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -2528,10 +2528,10 @@ public boolean equals(final java.lang.Object obj) { result = result && (getViewIndexId() == other.getViewIndexId()); } - result = result && (hasUseLongViewIndexId() == other.hasUseLongViewIndexId()); - if (hasUseLongViewIndexId()) { - result = result && (getUseLongViewIndexId() - == other.getUseLongViewIndexId()); + result = result && (hasViewIndexType() == other.hasViewIndexType()); + if (hasViewIndexType()) { + result = result && (getViewIndexType() + == other.getViewIndexType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -2594,9 +2594,9 @@ public int hashCode() { hash = (37 * hash) + VIEWINDEXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getViewIndexId()); } - if (hasUseLongViewIndexId()) { - hash = (37 * hash) + USELONGVIEWINDEXID_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getUseLongViewIndexId()); + if (hasViewIndexType()) { + hash = (37 * hash) + VIEWINDEXTYPE_FIELD_NUMBER; + hash = (53 * hash) + getViewIndexType(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; @@ -2751,7 +2751,7 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000400); viewIndexId_ = 0L; bitField0_ = (bitField0_ & ~0x00000800); - useLongViewIndexId_ = false; + viewIndexType_ = 5; bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -2851,7 +2851,7 @@ public org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse if (((from_bitField0_ & 0x00001000) == 0x00001000)) { to_bitField0_ |= 0x00000200; } - result.useLongViewIndexId_ = useLongViewIndexId_; + result.viewIndexType_ = viewIndexType_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -2957,8 +2957,8 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos if (other.hasViewIndexId()) { setViewIndexId(other.getViewIndexId()); } - if (other.hasUseLongViewIndexId()) { - setUseLongViewIndexId(other.getUseLongViewIndexId()); + if (other.hasViewIndexType()) { + setViewIndexType(other.getViewIndexType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -4040,35 +4040,35 @@ public Builder clearViewIndexId() { return this; } - // optional bool useLongViewIndexId = 13; - private boolean useLongViewIndexId_ ; + // optional int32 viewIndexType = 13 [default = 5]; + private int viewIndexType_ = 5; /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - public boolean hasUseLongViewIndexId() { + public boolean hasViewIndexType() { return ((bitField0_ & 0x00001000) == 0x00001000); } /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - public boolean getUseLongViewIndexId() { - return useLongViewIndexId_; + public int getViewIndexType() { + return viewIndexType_; } /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - public Builder setUseLongViewIndexId(boolean value) { + public Builder setViewIndexType(int value) { bitField0_ |= 0x00001000; - useLongViewIndexId_ = value; + viewIndexType_ = value; onChanged(); return this; } /** - * optional bool useLongViewIndexId = 13; + * optional int32 viewIndexType = 13 [default = 5]; */ - public Builder clearUseLongViewIndexId() { + public Builder clearViewIndexType() { bitField0_ = (bitField0_ & ~0x00001000); - useLongViewIndexId_ = false; + viewIndexType_ = 5; onChanged(); return this; } @@ -17827,105 +17827,105 @@ public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCac static { java.lang.String[] descriptorData = { "\n\025MetaDataService.proto\032\014PTable.proto\032\017P" + - "Function.proto\032\rPSchema.proto\"\256\001\n\020Shared" + + "Function.proto\032\rPSchema.proto\"\254\001\n\020Shared" + "TableState\022\020\n\010tenantId\030\001 \001(\014\022\022\n\nschemaNa" + "me\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\031\n\007columns\030\004" + " \003(\0132\010.PColumn\022\025\n\rphysicalNames\030\005 \003(\014\022\023\n" + - "\013viewIndexId\030\006 \002(\003\022\032\n\022useLongViewIndexId" + - "\030\007 \001(\010\"\355\002\n\020MetaDataResponse\022!\n\nreturnCod" + - "e\030\001 \001(\0162\r.MutationCode\022\024\n\014mutationTime\030\002" + - " \001(\003\022\026\n\005table\030\003 \001(\0132\007.PTable\022\026\n\016tablesTo" + - "Delete\030\004 \003(\014\022\022\n\ncolumnName\030\005 \001(\014\022\022\n\nfami", - "lyName\030\006 \001(\014\022\024\n\014functionName\030\007 \001(\014\022\034\n\010fu" + - "nction\030\010 \003(\0132\n.PFunction\022/\n\024sharedTables" + - "ToDelete\030\t \003(\0132\021.SharedTableState\022\030\n\006sch" + - "ema\030\n \001(\0132\010.PSchema\022\030\n\020autoPartitionNum\030" + - "\013 \001(\003\022\023\n\013viewIndexId\030\014 \001(\003\022\032\n\022useLongVie" + - "wIndexId\030\r \001(\010\"\364\001\n\017GetTableRequest\022\020\n\010te" + - "nantId\030\001 \002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\ttabl" + - "eName\030\003 \002(\014\022\026\n\016tableTimestamp\030\004 \002(\003\022\027\n\017c" + - "lientTimestamp\030\005 \002(\003\022\025\n\rclientVersion\030\006 " + - "\001(\005\022\037\n\027skipAddingParentColumns\030\007 \001(\010\022\031\n\021", - "skipAddingIndexes\030\010 \001(\010\022$\n\023lockedAncesto" + - "rTable\030\t \001(\0132\007.PTable\"\212\001\n\023GetFunctionsRe" + - "quest\022\020\n\010tenantId\030\001 \002(\014\022\025\n\rfunctionNames" + - "\030\002 \003(\014\022\032\n\022functionTimestamps\030\003 \003(\003\022\027\n\017cl" + - "ientTimestamp\030\004 \002(\003\022\025\n\rclientVersion\030\005 \001" + - "(\005\"V\n\020GetSchemaRequest\022\022\n\nschemaName\030\001 \002" + - "(\t\022\027\n\017clientTimestamp\030\002 \002(\003\022\025\n\rclientVer" + - "sion\030\003 \002(\005\"d\n\022CreateTableRequest\022\036\n\026tabl" + - "eMetadataMutations\030\001 \003(\014\022\025\n\rclientVersio" + - "n\030\002 \001(\005\022\027\n\017allocateIndexId\030\003 \001(\010\"r\n\025Crea", - "teFunctionRequest\022\036\n\026tableMetadataMutati" + - "ons\030\001 \003(\014\022\021\n\ttemporary\030\002 \002(\010\022\017\n\007replace\030" + - "\003 \001(\010\022\025\n\rclientVersion\030\004 \001(\005\"`\n\023CreateSc" + - "hemaRequest\022\036\n\026tableMetadataMutations\030\001 " + - "\003(\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n\rclientVersion" + - "\030\003 \002(\005\"\216\001\n\020DropTableRequest\022\036\n\026tableMeta" + - "dataMutations\030\001 \003(\014\022\021\n\ttableType\030\002 \002(\t\022\017" + - "\n\007cascade\030\003 \001(\010\022\025\n\rclientVersion\030\004 \001(\005\022\037" + - "\n\027skipAddingParentColumns\030\005 \001(\010\"_\n\021DropS" + - "chemaRequest\022\037\n\027schemaMetadataMutations\030", - "\001 \003(\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n\rclientVersi" + - "on\030\003 \002(\005\"I\n\020AddColumnRequest\022\036\n\026tableMet" + + "\013viewIndexId\030\006 \002(\003\022\030\n\rviewIndexType\030\007 \001(" + + "\005:\0015\"\353\002\n\020MetaDataResponse\022!\n\nreturnCode\030" + + "\001 \001(\0162\r.MutationCode\022\024\n\014mutationTime\030\002 \001" + + "(\003\022\026\n\005table\030\003 \001(\0132\007.PTable\022\026\n\016tablesToDe" + + "lete\030\004 \003(\014\022\022\n\ncolumnName\030\005 \001(\014\022\022\n\nfamily", + "Name\030\006 \001(\014\022\024\n\014functionName\030\007 \001(\014\022\034\n\010func" + + "tion\030\010 \003(\0132\n.PFunction\022/\n\024sharedTablesTo" + + "Delete\030\t \003(\0132\021.SharedTableState\022\030\n\006schem" + + "a\030\n \001(\0132\010.PSchema\022\030\n\020autoPartitionNum\030\013 " + + "\001(\003\022\023\n\013viewIndexId\030\014 \001(\003\022\030\n\rviewIndexTyp" + + "e\030\r \001(\005:\0015\"\364\001\n\017GetTableRequest\022\020\n\010tenant" + + "Id\030\001 \002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\ttableNam" + + "e\030\003 \002(\014\022\026\n\016tableTimestamp\030\004 \002(\003\022\027\n\017clien" + + "tTimestamp\030\005 \002(\003\022\025\n\rclientVersion\030\006 \001(\005\022" + + "\037\n\027skipAddingParentColumns\030\007 \001(\010\022\031\n\021skip", + "AddingIndexes\030\010 \001(\010\022$\n\023lockedAncestorTab" + + "le\030\t \001(\0132\007.PTable\"\212\001\n\023GetFunctionsReques" + + "t\022\020\n\010tenantId\030\001 \002(\014\022\025\n\rfunctionNames\030\002 \003" + + "(\014\022\032\n\022functionTimestamps\030\003 \003(\003\022\027\n\017client" + + "Timestamp\030\004 \002(\003\022\025\n\rclientVersion\030\005 \001(\005\"V" + + "\n\020GetSchemaRequest\022\022\n\nschemaName\030\001 \002(\t\022\027" + + "\n\017clientTimestamp\030\002 \002(\003\022\025\n\rclientVersion" + + "\030\003 \002(\005\"d\n\022CreateTableRequest\022\036\n\026tableMet" + "adataMutations\030\001 \003(\014\022\025\n\rclientVersion\030\002 " + - "\001(\005\"J\n\021DropColumnRequest\022\036\n\026tableMetadat" + + "\001(\005\022\027\n\017allocateIndexId\030\003 \001(\010\"r\n\025CreateFu", + "nctionRequest\022\036\n\026tableMetadataMutations\030" + + "\001 \003(\014\022\021\n\ttemporary\030\002 \002(\010\022\017\n\007replace\030\003 \001(" + + "\010\022\025\n\rclientVersion\030\004 \001(\005\"`\n\023CreateSchema" + + "Request\022\036\n\026tableMetadataMutations\030\001 \003(\014\022" + + "\022\n\nschemaName\030\002 \002(\t\022\025\n\rclientVersion\030\003 \002" + + "(\005\"\216\001\n\020DropTableRequest\022\036\n\026tableMetadata" + + "Mutations\030\001 \003(\014\022\021\n\ttableType\030\002 \002(\t\022\017\n\007ca" + + "scade\030\003 \001(\010\022\025\n\rclientVersion\030\004 \001(\005\022\037\n\027sk" + + "ipAddingParentColumns\030\005 \001(\010\"_\n\021DropSchem" + + "aRequest\022\037\n\027schemaMetadataMutations\030\001 \003(", + "\014\022\022\n\nschemaName\030\002 \002(\t\022\025\n\rclientVersion\030\003" + + " \002(\005\"I\n\020AddColumnRequest\022\036\n\026tableMetadat" + "aMutations\030\001 \003(\014\022\025\n\rclientVersion\030\002 \001(\005\"" + - "^\n\023DropFunctionRequest\022\036\n\026tableMetadataM" + - "utations\030\001 \003(\014\022\020\n\010ifExists\030\002 \001(\010\022\025\n\rclie" + - "ntVersion\030\003 \001(\005\"P\n\027UpdateIndexStateReque" + - "st\022\036\n\026tableMetadataMutations\030\001 \003(\014\022\025\n\rcl" + - "ientVersion\030\002 \001(\005\"*\n\021ClearCacheRequest\022\025", - "\n\rclientVersion\030\001 \001(\005\"*\n\022ClearCacheRespo" + - "nse\022\024\n\014unfreedBytes\030\001 \001(\003\"*\n\021GetVersionR" + - "equest\022\025\n\rclientVersion\030\001 \001(\005\"E\n\022GetVers" + - "ionResponse\022\017\n\007version\030\001 \002(\003\022\036\n\026systemCa" + - "talogTimestamp\030\002 \001(\003\"\205\001\n\032ClearTableFromC" + - "acheRequest\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaN" + - "ame\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\027\n\017clientTi" + - "mestamp\030\004 \002(\003\022\025\n\rclientVersion\030\005 \001(\005\"\035\n\033" + - "ClearTableFromCacheResponse*\271\005\n\014Mutation" + - "Code\022\030\n\024TABLE_ALREADY_EXISTS\020\000\022\023\n\017TABLE_", - "NOT_FOUND\020\001\022\024\n\020COLUMN_NOT_FOUND\020\002\022\031\n\025COL" + - "UMN_ALREADY_EXISTS\020\003\022\035\n\031CONCURRENT_TABLE" + - "_MUTATION\020\004\022\027\n\023TABLE_NOT_IN_REGION\020\005\022\025\n\021" + - "NEWER_TABLE_FOUND\020\006\022\034\n\030UNALLOWED_TABLE_M" + - "UTATION\020\007\022\021\n\rNO_PK_COLUMNS\020\010\022\032\n\026PARENT_T" + - "ABLE_NOT_FOUND\020\t\022\033\n\027FUNCTION_ALREADY_EXI" + - "STS\020\n\022\026\n\022FUNCTION_NOT_FOUND\020\013\022\030\n\024NEWER_F" + - "UNCTION_FOUND\020\014\022\032\n\026FUNCTION_NOT_IN_REGIO" + - "N\020\r\022\031\n\025SCHEMA_ALREADY_EXISTS\020\016\022\026\n\022NEWER_" + - "SCHEMA_FOUND\020\017\022\024\n\020SCHEMA_NOT_FOUND\020\020\022\030\n\024", - "SCHEMA_NOT_IN_REGION\020\021\022\032\n\026TABLES_EXIST_O" + - "N_SCHEMA\020\022\022\035\n\031UNALLOWED_SCHEMA_MUTATION\020" + - "\023\022%\n!AUTO_PARTITION_SEQUENCE_NOT_FOUND\020\024" + - "\022#\n\037CANNOT_COERCE_AUTO_PARTITION_ID\020\025\022\024\n" + - "\020TOO_MANY_INDEXES\020\026\022\037\n\033UNABLE_TO_CREATE_" + - "CHILD_LINK\020\027\022!\n\035UNABLE_TO_UPDATE_PARENT_" + - "TABLE\020\0302\345\006\n\017MetaDataService\022/\n\010getTable\022" + - "\020.GetTableRequest\032\021.MetaDataResponse\0227\n\014" + - "getFunctions\022\024.GetFunctionsRequest\032\021.Met" + - "aDataResponse\0221\n\tgetSchema\022\021.GetSchemaRe", - "quest\032\021.MetaDataResponse\0225\n\013createTable\022" + - "\023.CreateTableRequest\032\021.MetaDataResponse\022" + - ";\n\016createFunction\022\026.CreateFunctionReques" + - "t\032\021.MetaDataResponse\0227\n\014createSchema\022\024.C" + - "reateSchemaRequest\032\021.MetaDataResponse\0221\n" + - "\tdropTable\022\021.DropTableRequest\032\021.MetaData" + - "Response\0223\n\ndropSchema\022\022.DropSchemaReque" + - "st\032\021.MetaDataResponse\0227\n\014dropFunction\022\024." + - "DropFunctionRequest\032\021.MetaDataResponse\0221" + - "\n\taddColumn\022\021.AddColumnRequest\032\021.MetaDat", - "aResponse\0223\n\ndropColumn\022\022.DropColumnRequ" + - "est\032\021.MetaDataResponse\022?\n\020updateIndexSta" + - "te\022\030.UpdateIndexStateRequest\032\021.MetaDataR" + - "esponse\0225\n\nclearCache\022\022.ClearCacheReques" + - "t\032\023.ClearCacheResponse\0225\n\ngetVersion\022\022.G" + - "etVersionRequest\032\023.GetVersionResponse\022P\n" + - "\023clearTableFromCache\022\033.ClearTableFromCac" + - "heRequest\032\034.ClearTableFromCacheResponseB" + - "B\n(org.apache.phoenix.coprocessor.genera" + - "tedB\016MetaDataProtosH\001\210\001\001\240\001\001" + "J\n\021DropColumnRequest\022\036\n\026tableMetadataMut" + + "ations\030\001 \003(\014\022\025\n\rclientVersion\030\002 \001(\005\"^\n\023D" + + "ropFunctionRequest\022\036\n\026tableMetadataMutat" + + "ions\030\001 \003(\014\022\020\n\010ifExists\030\002 \001(\010\022\025\n\rclientVe" + + "rsion\030\003 \001(\005\"P\n\027UpdateIndexStateRequest\022\036" + + "\n\026tableMetadataMutations\030\001 \003(\014\022\025\n\rclient" + + "Version\030\002 \001(\005\"*\n\021ClearCacheRequest\022\025\n\rcl", + "ientVersion\030\001 \001(\005\"*\n\022ClearCacheResponse\022" + + "\024\n\014unfreedBytes\030\001 \001(\003\"*\n\021GetVersionReque" + + "st\022\025\n\rclientVersion\030\001 \001(\005\"E\n\022GetVersionR" + + "esponse\022\017\n\007version\030\001 \002(\003\022\036\n\026systemCatalo" + + "gTimestamp\030\002 \001(\003\"\205\001\n\032ClearTableFromCache" + + "Request\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaName\030" + + "\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\027\n\017clientTimest" + + "amp\030\004 \002(\003\022\025\n\rclientVersion\030\005 \001(\005\"\035\n\033Clea" + + "rTableFromCacheResponse*\271\005\n\014MutationCode" + + "\022\030\n\024TABLE_ALREADY_EXISTS\020\000\022\023\n\017TABLE_NOT_", + "FOUND\020\001\022\024\n\020COLUMN_NOT_FOUND\020\002\022\031\n\025COLUMN_" + + "ALREADY_EXISTS\020\003\022\035\n\031CONCURRENT_TABLE_MUT" + + "ATION\020\004\022\027\n\023TABLE_NOT_IN_REGION\020\005\022\025\n\021NEWE" + + "R_TABLE_FOUND\020\006\022\034\n\030UNALLOWED_TABLE_MUTAT" + + "ION\020\007\022\021\n\rNO_PK_COLUMNS\020\010\022\032\n\026PARENT_TABLE" + + "_NOT_FOUND\020\t\022\033\n\027FUNCTION_ALREADY_EXISTS\020" + + "\n\022\026\n\022FUNCTION_NOT_FOUND\020\013\022\030\n\024NEWER_FUNCT" + + "ION_FOUND\020\014\022\032\n\026FUNCTION_NOT_IN_REGION\020\r\022" + + "\031\n\025SCHEMA_ALREADY_EXISTS\020\016\022\026\n\022NEWER_SCHE" + + "MA_FOUND\020\017\022\024\n\020SCHEMA_NOT_FOUND\020\020\022\030\n\024SCHE", + "MA_NOT_IN_REGION\020\021\022\032\n\026TABLES_EXIST_ON_SC" + + "HEMA\020\022\022\035\n\031UNALLOWED_SCHEMA_MUTATION\020\023\022%\n" + + "!AUTO_PARTITION_SEQUENCE_NOT_FOUND\020\024\022#\n\037" + + "CANNOT_COERCE_AUTO_PARTITION_ID\020\025\022\024\n\020TOO" + + "_MANY_INDEXES\020\026\022\037\n\033UNABLE_TO_CREATE_CHIL" + + "D_LINK\020\027\022!\n\035UNABLE_TO_UPDATE_PARENT_TABL" + + "E\020\0302\345\006\n\017MetaDataService\022/\n\010getTable\022\020.Ge" + + "tTableRequest\032\021.MetaDataResponse\0227\n\014getF" + + "unctions\022\024.GetFunctionsRequest\032\021.MetaDat" + + "aResponse\0221\n\tgetSchema\022\021.GetSchemaReques", + "t\032\021.MetaDataResponse\0225\n\013createTable\022\023.Cr" + + "eateTableRequest\032\021.MetaDataResponse\022;\n\016c" + + "reateFunction\022\026.CreateFunctionRequest\032\021." + + "MetaDataResponse\0227\n\014createSchema\022\024.Creat" + + "eSchemaRequest\032\021.MetaDataResponse\0221\n\tdro" + + "pTable\022\021.DropTableRequest\032\021.MetaDataResp" + + "onse\0223\n\ndropSchema\022\022.DropSchemaRequest\032\021" + + ".MetaDataResponse\0227\n\014dropFunction\022\024.Drop" + + "FunctionRequest\032\021.MetaDataResponse\0221\n\tad" + + "dColumn\022\021.AddColumnRequest\032\021.MetaDataRes", + "ponse\0223\n\ndropColumn\022\022.DropColumnRequest\032" + + "\021.MetaDataResponse\022?\n\020updateIndexState\022\030" + + ".UpdateIndexStateRequest\032\021.MetaDataRespo" + + "nse\0225\n\nclearCache\022\022.ClearCacheRequest\032\023." + + "ClearCacheResponse\0225\n\ngetVersion\022\022.GetVe" + + "rsionRequest\032\023.GetVersionResponse\022P\n\023cle" + + "arTableFromCache\022\033.ClearTableFromCacheRe" + + "quest\032\034.ClearTableFromCacheResponseBB\n(o" + + "rg.apache.phoenix.coprocessor.generatedB" + + "\016MetaDataProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -17937,13 +17937,13 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_SharedTableState_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SharedTableState_descriptor, - new java.lang.String[] { "TenantId", "SchemaName", "TableName", "Columns", "PhysicalNames", "ViewIndexId", "UseLongViewIndexId", }); + new java.lang.String[] { "TenantId", "SchemaName", "TableName", "Columns", "PhysicalNames", "ViewIndexId", "ViewIndexType", }); internal_static_MetaDataResponse_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_MetaDataResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MetaDataResponse_descriptor, - new java.lang.String[] { "ReturnCode", "MutationTime", "Table", "TablesToDelete", "ColumnName", "FamilyName", "FunctionName", "Function", "SharedTablesToDelete", "Schema", "AutoPartitionNum", "ViewIndexId", "UseLongViewIndexId", }); + new java.lang.String[] { "ReturnCode", "MutationTime", "Table", "TablesToDelete", "ColumnName", "FamilyName", "FunctionName", "Function", "SharedTablesToDelete", "Schema", "AutoPartitionNum", "ViewIndexId", "ViewIndexType", }); internal_static_GetTableRequest_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_GetTableRequest_fieldAccessorTable = new diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java index d8f5247b059..938ae1f19df 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java @@ -3695,15 +3695,15 @@ org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounterOrBuilder */ int getTransactionProvider(); - // optional bool useLongViewIndexId = 39; + // optional int32 viewIndexType = 39 [default = 5]; /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - boolean hasUseLongViewIndexId(); + boolean hasViewIndexType(); /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - boolean getUseLongViewIndexId(); + int getViewIndexType(); } /** * Protobuf type {@code PTable} @@ -3962,7 +3962,7 @@ private PTable( } case 312: { bitField1_ |= 0x00000002; - useLongViewIndexId_ = input.readBool(); + viewIndexType_ = input.readInt32(); break; } } @@ -4745,20 +4745,20 @@ public int getTransactionProvider() { return transactionProvider_; } - // optional bool useLongViewIndexId = 39; - public static final int USELONGVIEWINDEXID_FIELD_NUMBER = 39; - private boolean useLongViewIndexId_; + // optional int32 viewIndexType = 39 [default = 5]; + public static final int VIEWINDEXTYPE_FIELD_NUMBER = 39; + private int viewIndexType_; /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - public boolean hasUseLongViewIndexId() { + public boolean hasViewIndexType() { return ((bitField1_ & 0x00000002) == 0x00000002); } /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - public boolean getUseLongViewIndexId() { - return useLongViewIndexId_; + public int getViewIndexType() { + return viewIndexType_; } private void initFields() { @@ -4799,7 +4799,7 @@ private void initFields() { encodedCQCounters_ = java.util.Collections.emptyList(); useStatsForParallelization_ = false; transactionProvider_ = 0; - useLongViewIndexId_ = false; + viewIndexType_ = 5; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4979,7 +4979,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeInt32(38, transactionProvider_); } if (((bitField1_ & 0x00000002) == 0x00000002)) { - output.writeBool(39, useLongViewIndexId_); + output.writeInt32(39, viewIndexType_); } getUnknownFields().writeTo(output); } @@ -5145,7 +5145,7 @@ public int getSerializedSize() { } if (((bitField1_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(39, useLongViewIndexId_); + .computeInt32Size(39, viewIndexType_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -5343,10 +5343,10 @@ public boolean equals(final java.lang.Object obj) { result = result && (getTransactionProvider() == other.getTransactionProvider()); } - result = result && (hasUseLongViewIndexId() == other.hasUseLongViewIndexId()); - if (hasUseLongViewIndexId()) { - result = result && (getUseLongViewIndexId() - == other.getUseLongViewIndexId()); + result = result && (hasViewIndexType() == other.hasViewIndexType()); + if (hasViewIndexType()) { + result = result && (getViewIndexType() + == other.getViewIndexType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -5509,9 +5509,9 @@ public int hashCode() { hash = (37 * hash) + TRANSACTIONPROVIDER_FIELD_NUMBER; hash = (53 * hash) + getTransactionProvider(); } - if (hasUseLongViewIndexId()) { - hash = (37 * hash) + USELONGVIEWINDEXID_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getUseLongViewIndexId()); + if (hasViewIndexType()) { + hash = (37 * hash) + VIEWINDEXTYPE_FIELD_NUMBER; + hash = (53 * hash) + getViewIndexType(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; @@ -5711,7 +5711,7 @@ public Builder clear() { bitField1_ = (bitField1_ & ~0x00000008); transactionProvider_ = 0; bitField1_ = (bitField1_ & ~0x00000010); - useLongViewIndexId_ = false; + viewIndexType_ = 5; bitField1_ = (bitField1_ & ~0x00000020); return this; } @@ -5910,7 +5910,7 @@ public org.apache.phoenix.coprocessor.generated.PTableProtos.PTable buildPartial if (((from_bitField1_ & 0x00000020) == 0x00000020)) { to_bitField1_ |= 0x00000002; } - result.useLongViewIndexId_ = useLongViewIndexId_; + result.viewIndexType_ = viewIndexType_; result.bitField0_ = to_bitField0_; result.bitField1_ = to_bitField1_; onBuilt(); @@ -6119,8 +6119,8 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.PTableProtos.P if (other.hasTransactionProvider()) { setTransactionProvider(other.getTransactionProvider()); } - if (other.hasUseLongViewIndexId()) { - setUseLongViewIndexId(other.getUseLongViewIndexId()); + if (other.hasViewIndexType()) { + setViewIndexType(other.getViewIndexType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -8234,35 +8234,35 @@ public Builder clearTransactionProvider() { return this; } - // optional bool useLongViewIndexId = 39; - private boolean useLongViewIndexId_ ; + // optional int32 viewIndexType = 39 [default = 5]; + private int viewIndexType_ = 5; /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - public boolean hasUseLongViewIndexId() { + public boolean hasViewIndexType() { return ((bitField1_ & 0x00000020) == 0x00000020); } /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - public boolean getUseLongViewIndexId() { - return useLongViewIndexId_; + public int getViewIndexType() { + return viewIndexType_; } /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - public Builder setUseLongViewIndexId(boolean value) { + public Builder setViewIndexType(int value) { bitField1_ |= 0x00000020; - useLongViewIndexId_ = value; + viewIndexType_ = value; onChanged(); return this; } /** - * optional bool useLongViewIndexId = 39; + * optional int32 viewIndexType = 39 [default = 5]; */ - public Builder clearUseLongViewIndexId() { + public Builder clearViewIndexType() { bitField1_ = (bitField1_ & ~0x00000020); - useLongViewIndexId_ = false; + viewIndexType_ = 5; onChanged(); return this; } @@ -8936,7 +8936,7 @@ public Builder clearCounter() { "es\030\002 \003(\014\022\033\n\023guidePostsByteCount\030\003 \001(\003\022\025\n" + "\rkeyBytesCount\030\004 \001(\003\022\027\n\017guidePostsCount\030" + "\005 \001(\005\022!\n\013pGuidePosts\030\006 \001(\0132\014.PGuidePosts" + - "\"\311\007\n\006PTable\022\027\n\017schemaNameBytes\030\001 \002(\014\022\026\n\016" + + "\"\307\007\n\006PTable\022\027\n\017schemaNameBytes\030\001 \002(\014\022\026\n\016" + "tableNameBytes\030\002 \002(\014\022\036\n\ttableType\030\003 \002(\0162" + "\013.PTableType\022\022\n\nindexState\030\004 \001(\t\022\026\n\016sequ" + "enceNumber\030\005 \002(\003\022\021\n\ttimeStamp\030\006 \002(\003\022\023\n\013p" + @@ -8959,13 +8959,12 @@ public Builder clearCounter() { "eme\030\" \001(\014\022\026\n\016encodingScheme\030# \001(\014\022,\n\021enc" + "odedCQCounters\030$ \003(\0132\021.EncodedCQCounter\022" + "\"\n\032useStatsForParallelization\030% \001(\010\022\033\n\023t" + - "ransactionProvider\030& \001(\005\022\032\n\022useLongViewI" + - "ndexId\030\' \001(\010\"6\n\020EncodedCQCounter\022\021\n\tcolF" + - "amily\030\001 \002(\t\022\017\n\007counter\030\002 \002(\005*A\n\nPTableTy", - "pe\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005IN" + - "DEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apache.phoenix.co" + - "processor.generatedB\014PTableProtosH\001\210\001\001\240\001" + - "\001" + "ransactionProvider\030& \001(\005\022\030\n\rviewIndexTyp" + + "e\030\' \001(\005:\0015\"6\n\020EncodedCQCounter\022\021\n\tcolFam" + + "ily\030\001 \002(\t\022\017\n\007counter\030\002 \002(\005*A\n\nPTableType", + "\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDE" + + "X\020\003\022\010\n\004JOIN\020\004B@\n(org.apache.phoenix.copr" + + "ocessor.generatedB\014PTableProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8989,7 +8988,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_PTable_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_PTable_descriptor, - new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", "AutoParititonSeqName", "IsAppendOnlySchema", "ParentNameBytes", "StorageScheme", "EncodingScheme", "EncodedCQCounters", "UseStatsForParallelization", "TransactionProvider", "UseLongViewIndexId", }); + new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", "AutoParititonSeqName", "IsAppendOnlySchema", "ParentNameBytes", "StorageScheme", "EncodingScheme", "EncodedCQCounters", "UseStatsForParallelization", "TransactionProvider", "ViewIndexType", }); internal_static_EncodedCQCounter_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_EncodedCQCounter_fieldAccessorTable = new diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java index 9e1ac1ffa7a..fdca3342bc5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java @@ -2158,15 +2158,15 @@ org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnInfoOrBuilder */ int getImmutableStorageScheme(); - // optional bool useLongViewIndex = 22; + // optional int32 viewIndexType = 22; /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - boolean hasUseLongViewIndex(); + boolean hasViewIndexType(); /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - boolean getUseLongViewIndex(); + int getViewIndexType(); } /** * Protobuf type {@code IndexMaintainer} @@ -2362,7 +2362,7 @@ private IndexMaintainer( } case 176: { bitField0_ |= 0x00010000; - useLongViewIndex_ = input.readBool(); + viewIndexType_ = input.readInt32(); break; } } @@ -2849,20 +2849,20 @@ public int getImmutableStorageScheme() { return immutableStorageScheme_; } - // optional bool useLongViewIndex = 22; - public static final int USELONGVIEWINDEX_FIELD_NUMBER = 22; - private boolean useLongViewIndex_; + // optional int32 viewIndexType = 22; + public static final int VIEWINDEXTYPE_FIELD_NUMBER = 22; + private int viewIndexType_; /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - public boolean hasUseLongViewIndex() { + public boolean hasViewIndexType() { return ((bitField0_ & 0x00010000) == 0x00010000); } /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - public boolean getUseLongViewIndex() { - return useLongViewIndex_; + public int getViewIndexType() { + return viewIndexType_; } private void initFields() { @@ -2887,7 +2887,7 @@ private void initFields() { indexedColumnInfo_ = java.util.Collections.emptyList(); encodingScheme_ = 0; immutableStorageScheme_ = 0; - useLongViewIndex_ = false; + viewIndexType_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3049,7 +3049,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeInt32(21, immutableStorageScheme_); } if (((bitField0_ & 0x00010000) == 0x00010000)) { - output.writeBool(22, useLongViewIndex_); + output.writeInt32(22, viewIndexType_); } getUnknownFields().writeTo(output); } @@ -3151,7 +3151,7 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00010000) == 0x00010000)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(22, useLongViewIndex_); + .computeInt32Size(22, viewIndexType_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -3266,10 +3266,10 @@ public boolean equals(final java.lang.Object obj) { result = result && (getImmutableStorageScheme() == other.getImmutableStorageScheme()); } - result = result && (hasUseLongViewIndex() == other.hasUseLongViewIndex()); - if (hasUseLongViewIndex()) { - result = result && (getUseLongViewIndex() - == other.getUseLongViewIndex()); + result = result && (hasViewIndexType() == other.hasViewIndexType()); + if (hasViewIndexType()) { + result = result && (getViewIndexType() + == other.getViewIndexType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -3368,9 +3368,9 @@ public int hashCode() { hash = (37 * hash) + IMMUTABLESTORAGESCHEME_FIELD_NUMBER; hash = (53 * hash) + getImmutableStorageScheme(); } - if (hasUseLongViewIndex()) { - hash = (37 * hash) + USELONGVIEWINDEX_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getUseLongViewIndex()); + if (hasViewIndexType()) { + hash = (37 * hash) + VIEWINDEXTYPE_FIELD_NUMBER; + hash = (53 * hash) + getViewIndexType(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; @@ -3548,7 +3548,7 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00080000); immutableStorageScheme_ = 0; bitField0_ = (bitField0_ & ~0x00100000); - useLongViewIndex_ = false; + viewIndexType_ = 0; bitField0_ = (bitField0_ & ~0x00200000); return this; } @@ -3690,7 +3690,7 @@ public org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintai if (((from_bitField0_ & 0x00200000) == 0x00200000)) { to_bitField0_ |= 0x00010000; } - result.useLongViewIndex_ = useLongViewIndex_; + result.viewIndexType_ = viewIndexType_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3869,8 +3869,8 @@ public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.ServerCachingP if (other.hasImmutableStorageScheme()) { setImmutableStorageScheme(other.getImmutableStorageScheme()); } - if (other.hasUseLongViewIndex()) { - setUseLongViewIndex(other.getUseLongViewIndex()); + if (other.hasViewIndexType()) { + setViewIndexType(other.getViewIndexType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -5636,35 +5636,35 @@ public Builder clearImmutableStorageScheme() { return this; } - // optional bool useLongViewIndex = 22; - private boolean useLongViewIndex_ ; + // optional int32 viewIndexType = 22; + private int viewIndexType_ ; /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - public boolean hasUseLongViewIndex() { + public boolean hasViewIndexType() { return ((bitField0_ & 0x00200000) == 0x00200000); } /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - public boolean getUseLongViewIndex() { - return useLongViewIndex_; + public int getViewIndexType() { + return viewIndexType_; } /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - public Builder setUseLongViewIndex(boolean value) { + public Builder setViewIndexType(int value) { bitField0_ |= 0x00200000; - useLongViewIndex_ = value; + viewIndexType_ = value; onChanged(); return this; } /** - * optional bool useLongViewIndex = 22; + * optional int32 viewIndexType = 22; */ - public Builder clearUseLongViewIndex() { + public Builder clearViewIndexType() { bitField0_ = (bitField0_ & ~0x00200000); - useLongViewIndex_ = false; + viewIndexType_ = 0; onChanged(); return this; } @@ -8705,7 +8705,7 @@ public org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServer "ength\030\003 \002(\005\"4\n\017ColumnReference\022\016\n\006family" + "\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(\014\"4\n\nColumnInfo\022" + "\022\n\nfamilyName\030\001 \001(\t\022\022\n\ncolumnName\030\002 \002(\t\"" + - "\340\005\n\017IndexMaintainer\022\023\n\013saltBuckets\030\001 \002(\005" + + "\335\005\n\017IndexMaintainer\022\023\n\013saltBuckets\030\001 \002(\005" + "\022\025\n\risMultiTenant\030\002 \002(\010\022\023\n\013viewIndexId\030\003" + " \001(\014\022(\n\016indexedColumns\030\004 \003(\0132\020.ColumnRef" + "erence\022 \n\030indexedColumnTypeOrdinal\030\005 \003(\005", @@ -8722,23 +8722,23 @@ public org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServer "ed\030\020 \002(\010\022\033\n\023indexRowKeyByteSize\030\021 \002(\005\022\021\n" + "\timmutable\030\022 \002(\010\022&\n\021indexedColumnInfo\030\023 " + "\003(\0132\013.ColumnInfo\022\026\n\016encodingScheme\030\024 \002(\005" + - "\022\036\n\026immutableStorageScheme\030\025 \002(\005\022\030\n\020useL" + - "ongViewIndex\030\026 \001(\010\"\334\001\n\025AddServerCacheReq" + - "uest\022\020\n\010tenantId\030\001 \001(\014\022\017\n\007cacheId\030\002 \002(\014\022" + - ")\n\010cachePtr\030\003 \002(\0132\027.ImmutableBytesWritab" + - "le\022)\n\014cacheFactory\030\004 \002(\0132\023.ServerCacheFa" + - "ctory\022\017\n\007txState\030\005 \001(\014\022\"\n\032hasProtoBufInd" + - "exMaintainer\030\006 \001(\010\022\025\n\rclientVersion\030\007 \001(", - "\005\"(\n\026AddServerCacheResponse\022\016\n\006return\030\001 " + - "\002(\010\"=\n\030RemoveServerCacheRequest\022\020\n\010tenan" + - "tId\030\001 \001(\014\022\017\n\007cacheId\030\002 \002(\014\"+\n\031RemoveServ" + - "erCacheResponse\022\016\n\006return\030\001 \002(\0102\245\001\n\024Serv" + - "erCachingService\022A\n\016addServerCache\022\026.Add" + - "ServerCacheRequest\032\027.AddServerCacheRespo" + - "nse\022J\n\021removeServerCache\022\031.RemoveServerC" + - "acheRequest\032\032.RemoveServerCacheResponseB" + - "G\n(org.apache.phoenix.coprocessor.genera" + - "tedB\023ServerCachingProtosH\001\210\001\001\240\001\001" + "\022\036\n\026immutableStorageScheme\030\025 \002(\005\022\025\n\rview" + + "IndexType\030\026 \001(\005\"\334\001\n\025AddServerCacheReques" + + "t\022\020\n\010tenantId\030\001 \001(\014\022\017\n\007cacheId\030\002 \002(\014\022)\n\010" + + "cachePtr\030\003 \002(\0132\027.ImmutableBytesWritable\022" + + ")\n\014cacheFactory\030\004 \002(\0132\023.ServerCacheFacto" + + "ry\022\017\n\007txState\030\005 \001(\014\022\"\n\032hasProtoBufIndexM" + + "aintainer\030\006 \001(\010\022\025\n\rclientVersion\030\007 \001(\005\"(", + "\n\026AddServerCacheResponse\022\016\n\006return\030\001 \002(\010" + + "\"=\n\030RemoveServerCacheRequest\022\020\n\010tenantId" + + "\030\001 \001(\014\022\017\n\007cacheId\030\002 \002(\014\"+\n\031RemoveServerC" + + "acheResponse\022\016\n\006return\030\001 \002(\0102\245\001\n\024ServerC" + + "achingService\022A\n\016addServerCache\022\026.AddSer" + + "verCacheRequest\032\027.AddServerCacheResponse" + + "\022J\n\021removeServerCache\022\031.RemoveServerCach" + + "eRequest\032\032.RemoveServerCacheResponseBG\n(" + + "org.apache.phoenix.coprocessor.generated" + + "B\023ServerCachingProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8768,7 +8768,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_IndexMaintainer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IndexMaintainer_descriptor, - new java.lang.String[] { "SaltBuckets", "IsMultiTenant", "ViewIndexId", "IndexedColumns", "IndexedColumnTypeOrdinal", "DataTableColRefForCoveredColumns", "IndexTableColRefForCoveredColumns", "IsLocalIndex", "IndexTableName", "RowKeyOrderOptimizable", "DataTableEmptyKeyValueColFamily", "EmptyKeyValueColFamily", "IndexedExpressions", "RowKeyMetadata", "NumDataTableColFamilies", "IndexWalDisabled", "IndexRowKeyByteSize", "Immutable", "IndexedColumnInfo", "EncodingScheme", "ImmutableStorageScheme", "UseLongViewIndex", }); + new java.lang.String[] { "SaltBuckets", "IsMultiTenant", "ViewIndexId", "IndexedColumns", "IndexedColumnTypeOrdinal", "DataTableColRefForCoveredColumns", "IndexTableColRefForCoveredColumns", "IsLocalIndex", "IndexTableName", "RowKeyOrderOptimizable", "DataTableEmptyKeyValueColFamily", "EmptyKeyValueColFamily", "IndexedExpressions", "RowKeyMetadata", "NumDataTableColFamilies", "IndexWalDisabled", "IndexRowKeyByteSize", "Immutable", "IndexedColumnInfo", "EncodingScheme", "ImmutableStorageScheme", "ViewIndexType", }); internal_static_AddServerCacheRequest_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_AddServerCacheRequest_fieldAccessorTable = new diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java index f5827081a46..3416b212397 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java @@ -101,6 +101,7 @@ import org.apache.phoenix.schema.tuple.BaseTuple; import org.apache.phoenix.schema.tuple.ValueGetterTuple; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.util.BitSet; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EncodedColumnsUtil; @@ -1344,8 +1345,8 @@ public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer prot maintainer.nIndexSaltBuckets = proto.getSaltBuckets(); maintainer.isMultiTenant = proto.getIsMultiTenant(); maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null; - maintainer.viewIndexType = proto.hasUseLongViewIndex() - ? MetaDataUtil.getViewIndexIdDataType() + maintainer.viewIndexType = proto.hasViewIndexType() + ? PDataType.fromTypeId(proto.getViewIndexType()) : MetaDataUtil.getLegacyViewIndexIdDataType(); List indexedColumnsList = proto.getIndexedColumnsList(); maintainer.indexedColumns = new HashSet(indexedColumnsList.size()); @@ -1466,7 +1467,7 @@ public static ServerCachingProtos.IndexMaintainer toProto(IndexMaintainer mainta builder.setIsMultiTenant(maintainer.isMultiTenant); if (maintainer.viewIndexId != null) { builder.setViewIndexId(ByteStringer.wrap(maintainer.viewIndexId)); - builder.setUseLongViewIndex(MetaDataUtil.getViewIndexIdDataType().equals(maintainer.viewIndexType)); + builder.setViewIndexType(maintainer.viewIndexType.getSqlType()); } for (ColumnReference colRef : maintainer.indexedColumns) { ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java index 945a0e872ab..632d765d581 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java @@ -222,8 +222,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData { public static final byte[] IS_VIEW_REFERENCED_BYTES = Bytes.toBytes(IS_VIEW_REFERENCED); public static final String VIEW_INDEX_ID = "VIEW_INDEX_ID"; public static final byte[] VIEW_INDEX_ID_BYTES = Bytes.toBytes(VIEW_INDEX_ID); - public static final String USE_LONG_VIEW_INDEX = "USE_LONG_VIEW_INDEX"; - public static final byte[] USE_LONG_VIEW_INDEX_BYTES = Bytes.toBytes(USE_LONG_VIEW_INDEX); + public static final String VIEW_INDEX_ID_DATA_TYPE = "VIEW_INDEX_ID_DATA_TYPE"; + public static final byte[] VIEW_INDEX_ID_DATA_TYPE_BYTES = Bytes.toBytes(VIEW_INDEX_ID_DATA_TYPE); public static final String BASE_COLUMN_COUNT = "BASE_COLUMN_COUNT"; public static final byte[] BASE_COLUMN_COUNT_BYTES = Bytes.toBytes(BASE_COLUMN_COUNT); public static final String IS_ROW_TIMESTAMP = "IS_ROW_TIMESTAMP"; diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index 318d30cf790..6e27f2a0f4b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -3036,8 +3036,8 @@ protected PhoenixConnection upgradeSystemCatalogIfRequired(PhoenixConnection met metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0, - PhoenixDatabaseMetaData.USE_LONG_VIEW_INDEX + " " - + PBoolean.INSTANCE.getSqlTypeName()); + PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE + " " + + PInteger.INSTANCE.getSqlTypeName()); } return metaConnection; } diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java index 5df636c6937..ab78eb91a92 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java @@ -193,7 +193,7 @@ public enum JoinType {INNER, LEFT_OUTER} MULTI_TENANT + " BOOLEAN,\n" + VIEW_TYPE + " UNSIGNED_TINYINT,\n" + VIEW_INDEX_ID + " BIGINT,\n" + - USE_LONG_VIEW_INDEX + " BOOLEAN,\n" + + VIEW_INDEX_ID_DATA_TYPE + " INTEGER,\n" + // Column metadata (will be null for table row) DATA_TYPE + " INTEGER," + COLUMN_SIZE + " INTEGER," + @@ -371,4 +371,4 @@ public enum JoinType {INNER, LEFT_OUTER} ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; -} \ No newline at end of file +} diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index 56eed5d86ea..54d455b505d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -90,7 +90,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_LONG_VIEW_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE; import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT; import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY; import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; @@ -303,7 +303,7 @@ public class MetaDataClient { IMMUTABLE_STORAGE_SCHEME + "," + ENCODING_SCHEME + "," + USE_STATS_FOR_PARALLELIZATION +"," + - USE_LONG_VIEW_INDEX + + VIEW_INDEX_ID_DATA_TYPE + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; private static final String CREATE_SCHEMA = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE @@ -2580,7 +2580,7 @@ else if (!SchemaUtil.isSystemTable(Bytes.toBytes(SchemaUtil.getTableName(schemaN Collections.emptyList(), isImmutableRows, Collections.emptyList(), defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), null, - Boolean.TRUE.equals(disableWAL), false, false, null, null, null, indexType, true, null, 0, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, true); + Boolean.TRUE.equals(disableWAL), false, false, null, viewIndexType, null, indexType, true, null, 0, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, true); connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP); } @@ -2745,7 +2745,7 @@ public boolean isViewReferenced() { } else { tableUpsert.setBoolean(28, useStatsForParallelizationProp); } - tableUpsert.setBoolean(29, true); + tableUpsert.setInt(29, Types.BIGINT); tableUpsert.execute(); if (asyncCreatedDate != null) { diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java index aabb3ecaf2a..17b788cf897 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java @@ -69,6 +69,7 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.types.PFloat; +import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.ByteUtil; @@ -1243,11 +1244,11 @@ public static PTable createFromProto(PTableProtos.PTable table) { indexState = PIndexState.fromSerializedValue(table.getIndexState()); } Long viewIndexId = null; - if(table.hasViewIndexId()){ - viewIndexId = (long)table.getViewIndexId(); + if (table.hasViewIndexId()) { + viewIndexId = table.getViewIndexId(); } - PDataType viewIndexType = table.hasUseLongViewIndexId() - ? MetaDataUtil.getViewIndexIdDataType() + PDataType viewIndexType = table.hasViewIndexType() + ? PDataType.fromTypeId(table.getViewIndexType()) : MetaDataUtil.getLegacyViewIndexIdDataType(); IndexType indexType = IndexType.getDefault(); if(table.hasIndexType()){ @@ -1385,7 +1386,7 @@ public static PTableProtos.PTable toProto(PTable table) { } if(table.getViewIndexId() != null) { builder.setViewIndexId(table.getViewIndexId()); - builder.setUseLongViewIndexId(MetaDataUtil.getViewIndexIdDataType().equals(table.getViewIndexType())); + builder.setViewIndexType(table.getViewIndexType().getSqlType()); } if(table.getIndexType() != null) { builder.setIndexType(ByteStringer.wrap(new byte[]{table.getIndexType().getSerializedValue()})); diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java index ea45a8b75a5..39c3976c3b3 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.sql.SQLException; +import java.sql.Types; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; diff --git a/phoenix-protocol/src/main/MetaDataService.proto b/phoenix-protocol/src/main/MetaDataService.proto index d7bd3a6fc36..f10aa3ccbfe 100644 --- a/phoenix-protocol/src/main/MetaDataService.proto +++ b/phoenix-protocol/src/main/MetaDataService.proto @@ -61,7 +61,7 @@ message SharedTableState { repeated PColumn columns = 4; repeated bytes physicalNames = 5; required int64 viewIndexId = 6; - optional bool useLongViewIndexId = 7; + optional int32 viewIndexType = 7 [default = 5]; } message MetaDataResponse { @@ -77,7 +77,7 @@ message MetaDataResponse { optional PSchema schema = 10; optional int64 autoPartitionNum = 11; optional int64 viewIndexId = 12; - optional bool useLongViewIndexId = 13; + optional int32 viewIndexType = 13 [default = 5]; } message GetTableRequest { diff --git a/phoenix-protocol/src/main/PTable.proto b/phoenix-protocol/src/main/PTable.proto index 15d2e33c152..be771a9c86d 100644 --- a/phoenix-protocol/src/main/PTable.proto +++ b/phoenix-protocol/src/main/PTable.proto @@ -103,7 +103,7 @@ message PTable { repeated EncodedCQCounter encodedCQCounters = 36; optional bool useStatsForParallelization = 37; optional int32 transactionProvider = 38; - optional bool useLongViewIndexId = 39; + optional int32 viewIndexType = 39 [default = 5]; } message EncodedCQCounter { diff --git a/phoenix-protocol/src/main/ServerCachingService.proto b/phoenix-protocol/src/main/ServerCachingService.proto index 7532a4b123d..d92f2cdab57 100644 --- a/phoenix-protocol/src/main/ServerCachingService.proto +++ b/phoenix-protocol/src/main/ServerCachingService.proto @@ -62,7 +62,7 @@ message IndexMaintainer { repeated ColumnInfo indexedColumnInfo = 19; required int32 encodingScheme = 20; required int32 immutableStorageScheme = 21; - optional bool useLongViewIndex = 22; + optional int32 viewIndexType = 22 ; } message AddServerCacheRequest {