Skip to content

Commit

Permalink
HIVE-22010 - Clean up ShowCreateTableOperation
Browse files Browse the repository at this point in the history
  • Loading branch information
miklosgergely committed Jul 25, 2019
1 parent 06edd56 commit 68ae4a5
Show file tree
Hide file tree
Showing 24 changed files with 234 additions and 246 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,8 @@ static void internalBeforeClassSetup(Map<String, String> additionalProperties, b
});

MetaStoreTestUtils.startMetaStoreWithRetry(hconf);
// re set the WAREHOUSE property to the test dir, as the previous command added a random port to it
hconf.set(MetastoreConf.ConfVars.WAREHOUSE.getVarname(), System.getProperty("test.warehouse.dir", "/tmp"));

Path testPath = new Path(TEST_PATH);
FileSystem fs = FileSystem.get(testPath.toUri(),hconf);
Expand Down Expand Up @@ -459,7 +461,6 @@ public void testBasicWithCM() throws Exception {
String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
String ptn_locn_2_later = new Path(TEST_PATH, name + "_ptn2_later").toUri().getPath();

createTestDataFile(unptn_locn, unptn_data);
createTestDataFile(ptn_locn_1, ptn_data_1);
Expand Down Expand Up @@ -549,7 +550,6 @@ public void testBootstrapWithConcurrentDropTable() throws IOException {
String[] unptn_data = new String[]{ "eleven" , "twelve" };
String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"};
String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
String[] empty = new String[]{};

String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
Expand Down Expand Up @@ -694,7 +694,6 @@ public void testBootstrapWithConcurrentRename() throws IOException {
run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver);

String[] ptn_data = new String[]{ "eleven" , "twelve" };
String[] empty = new String[]{};
String ptn_locn = new Path(TEST_PATH, name + "_ptn").toUri().getPath();

createTestDataFile(ptn_locn, ptn_data);
Expand Down Expand Up @@ -767,7 +766,6 @@ public void testBootstrapWithDropPartitionedTable() throws IOException {
run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver);

String[] ptn_data = new String[]{ "eleven" , "twelve" };
String[] empty = new String[]{};
String ptn_locn = new Path(TEST_PATH, name + "_ptn").toUri().getPath();

createTestDataFile(ptn_locn, ptn_data);
Expand Down Expand Up @@ -2572,7 +2570,6 @@ public void testIncrementalRepeatEventOnMissingObject() throws Exception {
// List to maintain the incremental dumps for each operation
List<Tuple> incrementalDumpList = new ArrayList<Tuple>();

String[] empty = new String[] {};
String[] unptn_data = new String[] { "ten" };
String[] ptn_data_1 = new String[] { "fifteen" };
String[] ptn_data_2 = new String[] { "seventeen" };
Expand Down Expand Up @@ -2682,7 +2679,6 @@ public void testConcatenateTable() throws IOException {
run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS ORC", driver);

String[] unptn_data = new String[] { "eleven", "twelve" };
String[] empty = new String[] {};
run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver);

// Bootstrap dump/load
Expand All @@ -2695,7 +2691,7 @@ public void testConcatenateTable() throws IOException {
verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver);

// Replicate all the events happened after bootstrap
Tuple incrDump = incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName);
incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName);

// migration test is failing as CONCATENATE is not working. Its not creating the merged file.
if (!isMigrationTest) {
Expand Down Expand Up @@ -2728,7 +2724,7 @@ public void testConcatenatePartitionedTable() throws IOException {
run("ALTER TABLE " + dbName + ".ptned PARTITION(b=2) CONCATENATE", driver);

// Replicate all the events happened so far
Tuple incrDump = incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName);
incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName);

// migration test is failing as CONCATENATE is not working. Its not creating the merged file.
if (!isMigrationTest) {
Expand Down Expand Up @@ -3268,7 +3264,7 @@ public void testDumpWithTableDirMissing() throws IOException {
run("CREATE TABLE " + dbName + ".normal(a int)", driver);
run("INSERT INTO " + dbName + ".normal values (1)", driver);

Path path = new Path(System.getProperty("test.warehouse.dir", ""));
Path path = new Path(System.getProperty("test.warehouse.dir", "/tmp"));
path = new Path(path, dbName.toLowerCase() + ".db");
path = new Path(path, "normal");
FileSystem fs = path.getFileSystem(hconf);
Expand All @@ -3288,7 +3284,7 @@ public void testDumpWithPartitionDirMissing() throws IOException {
run("CREATE TABLE " + dbName + ".normal(a int) PARTITIONED BY (part int)", driver);
run("INSERT INTO " + dbName + ".normal partition (part= 124) values (1)", driver);

Path path = new Path(System.getProperty("test.warehouse.dir",""));
Path path = new Path(System.getProperty("test.warehouse.dir", "/tmp"));
path = new Path(path, dbName.toLowerCase()+".db");
path = new Path(path, "normal");
path = new Path(path, "part=124");
Expand Down Expand Up @@ -3546,12 +3542,6 @@ private List<String> getOutput(IDriver myDriver) throws IOException {
return results;
}

private void printOutput(IDriver myDriver) throws IOException {
for (String s : getOutput(myDriver)){
LOG.info(s);
}
}

private void verifyIfTableNotExist(String dbName, String tableName, HiveMetaStoreClient myClient){
Exception e = null;
try {
Expand Down Expand Up @@ -3594,15 +3584,6 @@ private void verifyIfPartitionExist(String dbName, String tableName, List<String
}
}

private void verifyIfDirNotExist(FileSystem fs, Path path, PathFilter filter){
try {
FileStatus[] statuses = fs.listStatus(path, filter);
assertEquals(0, statuses.length);
} catch (IOException e) {
assert(false);
}
}

private void verifySetup(String cmd, String[] data, IDriver myDriver) throws IOException {
if (VERIFY_SETUP_STEPS){
run(cmd, myDriver);
Expand Down
9 changes: 5 additions & 4 deletions ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;

import org.apache.commons.lang.StringUtils;
Expand Down Expand Up @@ -84,8 +85,8 @@ public static DataOutputStream getOutputStream(Path outputFile, DDLOperationCont
* @return {@code true} if item was added
*/
public static boolean addIfAbsentByName(WriteEntity newWriteEntity, Set<WriteEntity> outputs) {
for(WriteEntity writeEntity : outputs) {
if(writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) {
for (WriteEntity writeEntity : outputs) {
if (writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) {
LOG.debug("Ignoring request to add {} because {} is present", newWriteEntity.toStringDetail(),
writeEntity.toStringDetail());
return false;
Expand Down Expand Up @@ -153,12 +154,12 @@ public static boolean allowOperationInReplicationScope(Hive db, String tableName
return false;
}

public static String propertiesToString(Map<String, String> props, List<String> exclude) {
public static String propertiesToString(Map<String, String> props, Set<String> exclude) {
if (props.isEmpty()) {
return "";
}

Map<String, String> sortedProperties = new TreeMap<String, String>(props);
SortedMap<String, String> sortedProperties = new TreeMap<String, String>(props);
List<String> realProps = new ArrayList<String>();
for (Map.Entry<String, String> e : sortedProperties.entrySet()) {
if (e.getValue() != null && (exclude == null || !exclude.contains(e.getKey()))) {
Expand Down
11 changes: 2 additions & 9 deletions ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java
Original file line number Diff line number Diff line change
Expand Up @@ -33,23 +33,16 @@ public final class DDLWork implements Serializable {
private static final long serialVersionUID = 1L;

private DDLDesc ddlDesc;
boolean needLock = false;
private boolean needLock = false;

/** ReadEntitites that are passed to the hooks. */
private Set<ReadEntity> inputs;
/** List of WriteEntities that are passed to the hooks. */
private Set<WriteEntity> outputs;

public DDLWork() {
}

public DDLWork(Set<ReadEntity> inputs, Set<WriteEntity> outputs) {
public DDLWork(Set<ReadEntity> inputs, Set<WriteEntity> outputs, DDLDesc ddlDesc) {
this.inputs = inputs;
this.outputs = outputs;
}

public DDLWork(Set<ReadEntity> inputs, Set<WriteEntity> outputs, DDLDesc ddlDesc) {
this(inputs, outputs);
this.ddlDesc = ddlDesc;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
* Operation process of altering a database's properties.
*/
public class AlterDatabaseSetPropertiesOperation
extends AbstractAlterDatabaseOperation<AlterDatabaseSetPropertiesDesc> {
extends AbstractAlterDatabaseOperation<AlterDatabaseSetPropertiesDesc> {
public AlterDatabaseSetPropertiesOperation(DDLOperationContext context, AlterDatabaseSetPropertiesDesc desc) {
super(context, desc);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
package org.apache.hadoop.hive.ql.ddl.database;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.DDLOperation;
import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
Expand Down Expand Up @@ -66,7 +66,7 @@ private void makeLocationQualified(Database database) throws HiveException {
database.setLocationUri(Utilities.getQualifiedPath(context.getConf(), new Path(database.getLocationUri())));
} else {
// Location is not set we utilize METASTOREWAREHOUSE together with database name
Path path = new Path(HiveConf.getVar(context.getConf(), HiveConf.ConfVars.METASTOREWAREHOUSE),
Path path = new Path(MetastoreConf.getVar(context.getConf(), MetastoreConf.ConfVars.WAREHOUSE),
database.getName().toLowerCase() + DATABASE_PATH_SUFFIX);
String qualifiedPath = Utilities.getQualifiedPath(context.getConf(), path);
database.setLocationUri(qualifiedPath);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,10 @@ public class ShowFunctionsDesc implements DDLDesc, Serializable {
private final String pattern;

public ShowFunctionsDesc(Path resFile) {
this(resFile, null, false);
this(resFile, null);
}

public ShowFunctionsDesc(Path resFile, String pattern) {
this(resFile, pattern, false);
}

public ShowFunctionsDesc(Path resFile, String pattern, boolean isLikePattern) {
this.resFile = resFile.toString();
this.pattern = pattern;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ public int execute() throws HiveException, IOException, TException {
try (HiveMetaStoreClient msc = new HiveMetaStoreClient(context.getConf())) {
Table table = msc.getTable(SessionState.get().getCurrentCatalog(), names[0], names[1]);
String qualifiedTableName = Warehouse.getCatalogQualifiedTableName(table);
boolean msckEnablePartitionRetention = context.getConf().getBoolean(
MetastoreConf.ConfVars.MSCK_REPAIR_ENABLE_PARTITION_RETENTION.getHiveName(), false);
boolean msckEnablePartitionRetention = MetastoreConf.getBoolVar(context.getConf(),
MetastoreConf.ConfVars.MSCK_REPAIR_ENABLE_PARTITION_RETENTION);
if (msckEnablePartitionRetention) {
partitionExpirySeconds = PartitionManagementTask.getRetentionPeriodInSeconds(table);
LOG.info("{} - Retention period ({}s) for partition is enabled for MSCK REPAIR..", qualifiedTableName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public int execute() throws HiveException {
// Write the results into the file
try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) {
// Write a header for cliDriver
if(!sessionState.isHiveServerQuery()) {
if (!sessionState.isHiveServerQuery()) {
writeHeader(os);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public int execute() throws HiveException {

// Write the results into the file
try (DataOutputStream os = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) {
if(!sessionState.isHiveServerQuery()) {
if (!sessionState.isHiveServerQuery()) {
writeHeader(os);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ protected StorageDescriptor getStorageDescriptor(Table tbl, Partition part) {
return (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd());
}

public void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List<Partition> partitions,
private void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List<Partition> partitions,
DDLOperationContext context, EnvironmentContext environmentContext, AbstractAlterTableDesc alterTable)
throws HiveException {
if (partitions == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ public String getTableName() {
return tableName;
}

public String getDatabaseName(){
public String getDatabaseName() {
return databaseName;
}

Expand Down Expand Up @@ -606,7 +606,7 @@ public void validate(HiveConf conf)
} catch (Exception err) {
LOG.error("Failed to get type info", err);
}
if(null == pti){
if (null == pti) {
throw new SemanticException(ErrorMsg.PARTITION_COLUMN_NON_PRIMITIVE.getMsg() + " Found "
+ partCol + " of type: " + fs.getType());
}
Expand Down Expand Up @@ -711,8 +711,8 @@ public void setReplicationSpec(ReplicationSpec replicationSpec) {
* @return what kind of replication scope this drop is running under.
* This can result in a "CREATE/REPLACE IF NEWER THAN" kind of semantic
*/
public ReplicationSpec getReplicationSpec(){
if (replicationSpec == null){
public ReplicationSpec getReplicationSpec() {
if (replicationSpec == null) {
this.replicationSpec = new ReplicationSpec();
}
return this.replicationSpec;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ public boolean isPurge() {
* @return what kind of replication scope this drop is running under.
* This can result in a "DROP IF OLDER THAN" kind of semantic
*/
public ReplicationSpec getReplicationSpec(){
public ReplicationSpec getReplicationSpec() {
return replicationSpec;
}

public boolean getValidationRequired(){
public boolean getValidationRequired() {
return validationRequired;
}
}
Loading

0 comments on commit 68ae4a5

Please sign in to comment.