Skip to content

Commit

Permalink
Revert "HIVE-22028 Clean up Add Partition"
Browse files Browse the repository at this point in the history
This reverts commit 853bf62.
  • Loading branch information
miklosgergely committed Jul 25, 2019
1 parent cc01599 commit bb1f654
Show file tree
Hide file tree
Showing 13 changed files with 533 additions and 479 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,12 @@
import org.apache.hadoop.hive.metastore.api.DataOperationType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.cli.CliSessionState;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.LockComponentBuilder;
Expand All @@ -40,10 +42,14 @@
import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.LockState;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
import org.apache.hadoop.hive.ql.DriverFactory;
import org.apache.hadoop.hive.ql.IDriver;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hive.hcatalog.common.HCatUtil;

import org.apache.hadoop.security.UserGroupInformation;
Expand Down Expand Up @@ -223,6 +229,18 @@ private StreamingConnection newConnectionImpl(UserGroupInformation ugi,
return new ConnectionImpl(this, ugi, conf, createPartIfNotExists, agentInfo);
}

private static UserGroupInformation getUserGroupInfo(String user)
throws ImpersonationFailed {
try {
return UserGroupInformation.createProxyUser(
user, UserGroupInformation.getLoginUser());
} catch (IOException e) {
LOG.error("Unable to get UserGroupInfo for user : " + user, e);
throw new ImpersonationFailed(user,e);
}
}


@Override
public boolean equals(Object o) {
if (this == o) {
Expand Down Expand Up @@ -449,10 +467,12 @@ private static void createPartitionIfNotExists(HiveEndPoint ep,
Map<String, String> partSpec =
Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals);

Path location = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec));
location = new Path(Utilities.getQualifiedPath(conf, location));
Partition partition =
org.apache.hadoop.hive.ql.metadata.Partition.createMetaPartitionObject(tableObject, partSpec, location);
AlterTableAddPartitionDesc addPartitionDesc = new AlterTableAddPartitionDesc(ep.database, ep.table, true);
String partLocation = new Path(tableObject.getDataLocation(),
Warehouse.makePartPath(partSpec)).toString();
addPartitionDesc.addPartition(partSpec, partLocation);
Partition partition = Hive.convertAddSpecToMetaPartition(tableObject,
addPartitionDesc.getPartition(0), conf);
msClient.add_partition(partition);
}
catch (AlreadyExistsException e) {
Expand All @@ -466,6 +486,36 @@ private static void createPartitionIfNotExists(HiveEndPoint ep,
}
}

private static boolean runDDL(IDriver driver, String sql) throws QueryFailedException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running Hive Query: " + sql);
}
driver.run(sql);
return true;
}

private static String partSpecStr(List<FieldSchema> partKeys, ArrayList<String> partVals) {
if (partKeys.size()!=partVals.size()) {
throw new IllegalArgumentException("Partition values:" + partVals +
", does not match the partition Keys in table :" + partKeys );
}
StringBuilder buff = new StringBuilder(partKeys.size()*20);
buff.append(" ( ");
int i=0;
for (FieldSchema schema : partKeys) {
buff.append(schema.getName());
buff.append("='");
buff.append(partVals.get(i));
buff.append("'");
if (i!=partKeys.size()-1) {
buff.append(",");
}
++i;
}
buff.append(" )");
return buff.toString();
}

private static IMetaStoreClient getMetaStoreClient(HiveEndPoint endPoint, HiveConf conf, boolean secureMode)
throws ConnectionError {

Expand Down
Loading

0 comments on commit bb1f654

Please sign in to comment.