Skip to content

Commit

Permalink
HIVE-27268: synthesize empty id lists if no session available (#4303)…
Browse files Browse the repository at this point in the history
… (Henri Biestro, reviewed by Sai Hemanth G)
  • Loading branch information
henrib committed May 9, 2023
1 parent 76bc585 commit 2e32367
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 2 deletions.
7 changes: 6 additions & 1 deletion ql/src/java/org/apache/hadoop/hive/ql/Context.java
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@
import org.apache.hadoop.hive.common.BlobStorageUtils;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.cleanup.CleanupService;
import org.apache.hadoop.hive.ql.cleanup.SyncCleanupService;
import org.apache.hadoop.hive.ql.exec.TaskRunner;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
Expand Down Expand Up @@ -741,7 +743,10 @@ public void removeScratchDir() {
// because that will be taken care by removeResultCacheDir
FileSystem fs = p.getFileSystem(conf);
LOG.info("Deleting scratch dir: {}", p);
sessionState.getCleanupService().deleteRecursive(p, fs);
CleanupService cleanupService = sessionState != null
? sessionState.getCleanupService()
: SyncCleanupService.INSTANCE;
cleanupService.deleteRecursive(p, fs);
}
} catch (Exception e) {
LOG.warn("Error Removing Scratch", e);
Expand Down
7 changes: 6 additions & 1 deletion ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
Original file line number Diff line number Diff line change
Expand Up @@ -1773,9 +1773,14 @@ public Table getTable(final String dbName, final String tableName, String metaTa
*/
private ValidWriteIdList getValidWriteIdList(String dbName, String tableName) throws LockException {
ValidWriteIdList validWriteIdList = null;
long txnId = SessionState.get() != null && SessionState.get().getTxnMgr() != null ? SessionState.get().getTxnMgr().getCurrentTxnId() : 0;
SessionState sessionState = SessionState.get();
HiveTxnManager txnMgr = sessionState != null? sessionState.getTxnMgr() : null;
long txnId = txnMgr != null ? txnMgr.getCurrentTxnId() : 0;
if (txnId > 0) {
validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf, dbName, tableName);
} else {
String fullTableName = getFullTableName(dbName, tableName);
validWriteIdList = new ValidReaderWriteIdList(fullTableName, new long[0], new BitSet(), Long.MAX_VALUE);
}
return validWriteIdList;
}
Expand Down
27 changes: 27 additions & 0 deletions ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.apache.hadoop.hive.ql.io.BucketCodec;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
Expand Down Expand Up @@ -909,5 +910,31 @@ public void testEmptyCompactionResult() throws Exception {
Assert.assertEquals(stringifyValues(data), rs);

}

/**
* HIVE-27268
*/
@Test
public void testGetPartitionsNoSession() throws Exception {
hiveConf.setIntVar(HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD, -1);
runStatementOnDriver("drop table if exists T");
runStatementOnDriver("create table T(a int, b int) partitioned by (p int, q int) " +
"stored as orc TBLPROPERTIES ('transactional'='true')");

int[][] targetVals = {{4, 1, 1}, {4, 2, 2}, {4, 3, 1}, {4, 4, 2}};
//we only recompute stats after major compact if they existed before
runStatementOnDriver("insert into T partition(p=1,q) " + makeValuesClause(targetVals));
runStatementOnDriver("analyze table T partition(p=1) compute statistics for columns");

Hive hive = Hive.get();
org.apache.hadoop.hive.ql.metadata.Table hiveTable = hive.getTable("T");
// this will ensure the getValidWriteIdList has no session to work with (thru getPartitions)
SessionState.detachSession();
List<org.apache.hadoop.hive.ql.metadata.Partition> partitions = hive.getPartitions(hiveTable);
Assert.assertNotNull(partitions);
// prevent tear down failure
d.close();
d = null;
}
}

0 comments on commit 2e32367

Please sign in to comment.