Skip to content
Permalink
Browse files
HIVE-26237: Check if replication cause metastore connection leakage (…
…Haymant Mangla, reviewed by Denys Kuzmenko, Peter Vary)

Closes #3298
  • Loading branch information
hmangla98 committed May 23, 2022
1 parent 6bfb6f6 commit ad92d0d4cb6ebf582322e2f457e813ed4b407fee
Showing 7 changed files with 22 additions and 16 deletions.
@@ -22,6 +22,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.shims.Utils;
import org.junit.After;
import org.junit.AfterClass;
@@ -121,6 +122,7 @@ static void internalBeforeClassSetupExclusiveReplica(Map<String, String> primary
public static void classLevelTearDown() throws IOException {
primary.close();
replica.close();
Hive.closeCurrent();
}

private static void setFullyQualifiedReplicaExternalTableBase(FileSystem fs) throws IOException {
@@ -29,6 +29,7 @@
import org.apache.hadoop.hive.common.repl.ReplScope;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.repl.ReplAck;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.StringAppender;
import org.apache.hadoop.hive.ql.parse.repl.metric.MetricCollector;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
@@ -262,17 +263,20 @@ public static void tearDownAfterClass(){
// FIXME : should clean up TEST_PATH, but not doing it now, for debugging's sake
//Clean up the warehouse after test run as we are restoring the warehouse path for other metastore creation
Path warehousePath = new Path(MetastoreConf.getVar(hconf, MetastoreConf.ConfVars.WAREHOUSE));
try {
warehousePath.getFileSystem(hconf).delete(warehousePath, true);
} catch (IOException e) {

}
Path warehousePathReplica = new Path(MetastoreConf.getVar(hconfMirror, MetastoreConf.ConfVars.WAREHOUSE));
try {
warehousePath.getFileSystem(hconf).delete(warehousePath, true);
warehousePathReplica.getFileSystem(hconfMirror).delete(warehousePathReplica, true);
} catch (IOException e) {

}
Hive.closeCurrent();
if (metaStoreClient != null) {
metaStoreClient.close();
}
if (metaStoreClientMirror != null) {
metaStoreClientMirror.close();
}
}

@Before
@@ -612,6 +612,9 @@ public void close() throws IOException {
if (miniDFSCluster != null && miniDFSCluster.isClusterUp()) {
miniDFSCluster.shutdown();
}
if (client != null) {
client.close();
}
}

CurrentNotificationEventId getCurrentNotificationEventId() throws Exception {
@@ -30,7 +30,6 @@
* Context for DDL operations.
*/
public class DDLOperationContext {
private final Hive db;
private final HiveConf conf;
private final Context context;
private final DDLTask task;
@@ -40,8 +39,7 @@ public class DDLOperationContext {
private final LogHelper console;

public DDLOperationContext(HiveConf conf, Context context, DDLTask task, DDLWork work, QueryState queryState,
QueryPlan queryPlan, LogHelper console) throws HiveException {
this.db = Hive.get(conf);
QueryPlan queryPlan, LogHelper console){
this.conf = conf;
this.context = context;
this.task = task;
@@ -51,8 +49,8 @@ public DDLOperationContext(HiveConf conf, Context context, DDLTask task, DDLWork
this.console = console;
}

public Hive getDb() {
return db;
public Hive getDb() throws HiveException {
return Hive.get(conf);
}

public HiveConf getConf() {
@@ -293,7 +293,7 @@ private void setConstraintsAndStorageHandlerInfo(Table table) throws HiveExcepti
}
}

private void handleMaterializedView(Table table) throws LockException {
private void handleMaterializedView(Table table) throws HiveException {
if (table.isMaterializedView()) {
table.setOutdatedForRewriting(context.getDb().isOutdatedMaterializedView(
table,
@@ -63,7 +63,7 @@ public int execute() throws HiveException {
return executeTask(generalContext, task);
}

private MergeFileWork getMergeFileWork(CompilationOpContext opContext) {
private MergeFileWork getMergeFileWork(CompilationOpContext opContext) throws HiveException {
List<Path> inputDirList = Lists.newArrayList(desc.getInputDir());

// merge work only needs input and output.
@@ -30,7 +30,7 @@
import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.common.repl.ReplScope;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
@@ -96,7 +96,6 @@
import static org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.getEventIdFromFile;
import static org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.prepareTableDiffFile;
import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_METADATA;
import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.getExternalTableBaseDir;
import static org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.LoadDatabase.AlterDatabase;
import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT;
import static org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.RANGER_AUTHORIZER;
@@ -549,8 +548,8 @@ private void createReplLoadCompleteAckTask() {
@Override
public void run() throws SemanticException {
try {
HiveMetaStoreClient metaStoreClient = new HiveMetaStoreClient(conf);
long currentNotificationID = metaStoreClient.getCurrentNotificationEventId().getEventId();
IMetaStoreClient client = getHive().getMSC();
long currentNotificationID = client.getCurrentNotificationEventId().getEventId();
Path loadMetadataFilePath = new Path(work.dumpDirectory, LOAD_METADATA.toString());
Utils.writeOutput(String.valueOf(currentNotificationID), loadMetadataFilePath, conf);
LOG.info("Created LOAD Metadata file : {} with NotificationID : {}",

0 comments on commit ad92d0d

Please sign in to comment.