Skip to content

Commit

Permalink
PHOENIX-4579 Add a config to conditionally create Phoenix meta tables…
Browse files Browse the repository at this point in the history
… on first client connection (Chinmay Kulkarni)
  • Loading branch information
jtaylor-sfdc committed Apr 13, 2018
1 parent 15fa00f commit d30d771
Show file tree
Hide file tree
Showing 14 changed files with 1,238 additions and 395 deletions.
Expand Up @@ -114,7 +114,7 @@ private void testTableWithSameSchema(boolean notExists, boolean sameClient) thro
// verify no create table rpcs
verify(connectionQueryServices, never()).createTable(anyListOf(Mutation.class),
any(byte[].class), any(PTableType.class), anyMap(), anyList(), any(byte[][].class),
eq(false), eq(false));
eq(false), eq(false), eq(false));
reset(connectionQueryServices);

// execute alter table ddl that adds the same column
Expand Down
Expand Up @@ -376,8 +376,10 @@ private void verifySyscatData(Properties clientProps, String connName, Statement
while(rs.next()) {

if(rs.getString("IS_NAMESPACE_MAPPED") == null) {
// Check that entry for SYSTEM namespace exists in SYSCAT
systemSchemaExists = rs.getString("TABLE_SCHEM").equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME) ? true : systemSchemaExists;
} else if (rs.getString("COLUMN_NAME") == null) {
// Found the intial entry for a table in SYSCAT
String schemaName = rs.getString("TABLE_SCHEM");
String tableName = rs.getString("TABLE_NAME");

Expand All @@ -395,12 +397,11 @@ private void verifySyscatData(Properties clientProps, String connName, Statement
}
}

if(!systemSchemaExists) {
fail(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + " entry doesn't exist in SYSTEM.CATALOG table.");
}

// The set will contain SYSMUTEX table since that table is not exposed in SYSCAT
if (systemTablesMapped) {
if (!systemSchemaExists) {
fail(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + " entry doesn't exist in SYSTEM.CATALOG table.");
}
assertTrue(namespaceMappedSystemTablesSet.size() == 1);
} else {
assertTrue(systemTablesSet.size() == 1);
Expand Down

Large diffs are not rendered by default.

Expand Up @@ -3610,6 +3610,27 @@ public void getVersion(RpcController controller, GetVersionRequest request, RpcC
}
long version = MetaDataUtil.encodeVersion(env.getHBaseVersion(), config);

PTable systemCatalog = null;
byte[] tableKey =
SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES);
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey);
try {
systemCatalog = loadTable(env, tableKey, cacheKey, MIN_SYSTEM_TABLE_TIMESTAMP,
HConstants.LATEST_TIMESTAMP, request.getClientVersion());
} catch (Throwable t) {
logger.error("loading system catalog table inside getVersion failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
isTablesMappingEnabled).toString(), t));
}
// In case this is the first connection, system catalog does not exist, and so we don't
// set the optional system catalog timestamp.
if (systemCatalog != null) {
builder.setSystemCatalogTimestamp(systemCatalog.getTimeStamp());
}
builder.setVersion(version);
done.run(builder.build());
}
Expand Down
Expand Up @@ -451,6 +451,10 @@ public static long getPriorVersion() {
return iterator.next();
}

public static long getPriorUpgradeVersion() {
return TIMESTAMP_VERSION_MAP.lowerKey(TIMESTAMP_VERSION_MAP.lastKey());
}

public static String getVersion(long serverTimestamp) {
/*
* It is possible that when clients are trying to run upgrades concurrently, we could be at an intermediate
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Expand Up @@ -17,12 +17,23 @@
*/
package org.apache.phoenix.exception;

import org.apache.hadoop.hbase.HConstants;

public class UpgradeRequiredException extends RetriableUpgradeException {
private final long systemCatalogTimestamp;

public UpgradeRequiredException() {
this(HConstants.OLDEST_TIMESTAMP);
}

public UpgradeRequiredException(long systemCatalogTimeStamp) {
super("Operation not allowed since cluster hasn't been upgraded. Call EXECUTE UPGRADE. ",
SQLExceptionCode.UPGRADE_REQUIRED.getSQLState(), SQLExceptionCode.UPGRADE_REQUIRED.getErrorCode());
SQLExceptionCode.UPGRADE_REQUIRED.getSQLState(), SQLExceptionCode.UPGRADE_REQUIRED.getErrorCode());
this.systemCatalogTimestamp = systemCatalogTimeStamp;
}

public long getSystemCatalogTimeStamp() {
return systemCatalogTimestamp;
}

}
Expand Up @@ -85,7 +85,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated

public MetaDataMutationResult createTable(List<Mutation> tableMetaData, byte[] tableName, PTableType tableType,
Map<String, Object> tableProps, List<Pair<byte[], Map<String, Object>>> families, byte[][] splits,
boolean isNamespaceMapped, boolean allocateIndexId) throws SQLException;
boolean isNamespaceMapped, boolean allocateIndexId, boolean isDoNotUpgradePropSet) throws SQLException;
public MetaDataMutationResult dropTable(List<Mutation> tableMetadata, PTableType tableType, boolean cascade) throws SQLException;
public MetaDataMutationResult dropFunction(List<Mutation> tableMetadata, boolean ifExists) throws SQLException;
public MetaDataMutationResult addColumn(List<Mutation> tableMetaData, PTable table, Map<String, List<Pair<String,Object>>> properties, Set<String> colFamiliesForPColumnsToBeAdded, List<PColumn> columns) throws SQLException;
Expand Down

0 comments on commit d30d771

Please sign in to comment.