Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HIVE-27055: hive-exec typos part 3 #4035

Merged
merged 21 commits into from Feb 24, 2023
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -310,7 +310,7 @@ public DiskRangeList createCacheChunk(
arrayOffset + offsetFromReadStart + extraDiskDataOffset,
smallSize, bb, cacheRanges, largeBufCount, chunkFrom + extraOffsetInChunk);
extraDiskDataOffset += smallSize;
extraOffsetInChunk += smallSize; // Not strictly necessary, noone will look at it.
extraOffsetInChunk += smallSize; // Not strictly necessary, no one will look at it.
if (newCacheData == null) {
newCacheData = smallBuffer;
} else {
Expand Down
10 changes: 5 additions & 5 deletions ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java
Expand Up @@ -87,11 +87,11 @@ private void checkAndRollbackCTAS(QueryLifeTimeHookContext ctx) {
if (table != null) {
LOG.info("Performing cleanup as part of rollback: {}", table.getFullTableName().toString());
try {
CompactionRequest rqst = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR);
rqst.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf));
rqst.putToProperties(META_TABLE_LOCATION, tblPath.toString());
rqst.putToProperties(IF_PURGE, Boolean.toString(true));
boolean success = Hive.get(conf).getMSC().submitForCleanup(rqst, writeId,
CompactionRequest request = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR);
request.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf));
request.putToProperties(META_TABLE_LOCATION, tblPath.toString());
request.putToProperties(IF_PURGE, Boolean.toString(true));
boolean success = Hive.get(conf).getMSC().submitForCleanup(request, writeId,
pCtx.getQueryState().getTxnManager().getCurrentTxnId());
if (success) {
LOG.info("The cleanup request has been submitted");
Expand Down
2 changes: 1 addition & 1 deletion ql/src/java/org/apache/hadoop/hive/ql/IDriver.java
Expand Up @@ -31,7 +31,7 @@
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;

/**
* Hive query executer driver.
* Hive query executor driver.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
Expand Down
16 changes: 8 additions & 8 deletions ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
Expand Up @@ -47,7 +47,7 @@
public class QueryDisplay {

/**
* Preffered objectmapper for this class.
* Preferred objectMapper for this class.
*
* It must be used to have things work in shaded environment (and its also more performant).
*/
Expand Down Expand Up @@ -307,55 +307,55 @@ public synchronized void setExplainPlan(String explainPlan) {

/**
* @param phase phase of query
* @return map of HMS Client method-calls and duration in miliseconds, during given phase.
* @return map of HMS Client method-calls and duration in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getHmsTimings(Phase phase) {
return hmsTimingMap.get(phase);
}

/**
* @param phase phase of query
* @param hmsTimings map of HMS Client method-calls and duration in miliseconds, during given phase.
* @param hmsTimings map of HMS Client method-calls and duration in milliseconds, during given phase.
*/
public synchronized void setHmsTimings(Phase phase, Map<String, Long> hmsTimings) {
hmsTimingMap.put(phase, hmsTimings);
}

/**
* @param phase phase of query
* @return map of PerfLogger call-trace name and start time in miliseconds, during given phase.
* @return map of PerfLogger call-trace name and start time in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogStarts(Phase phase) {
return perfLogStartMap.get(phase);
}

/**
* @param phase phase of query
* @param perfLogStarts map of PerfLogger call-trace name and start time in miliseconds, during given phase.
* @param perfLogStarts map of PerfLogger call-trace name and start time in milliseconds, during given phase.
*/
public synchronized void setPerfLogStarts(Phase phase, Map<String, Long> perfLogStarts) {
perfLogStartMap.put(phase, perfLogStarts);
}

/**
* @param phase phase of query
* @return map of PerfLogger call-trace name and end time in miliseconds, during given phase.
* @return map of PerfLogger call-trace name and end time in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogEnds(Phase phase) {
return perfLogEndMap.get(phase);
}

/**
* @param phase phase of query
* @param perfLogEnds map of PerfLogger call-trace name and end time in miliseconds, during given phase.
* @param perfLogEnds map of PerfLogger call-trace name and end time in milliseconds, during given phase.
*/
public synchronized void setPerfLogEnds(Phase phase, Map<String, Long> perfLogEnds) {
perfLogEndMap.put(phase, perfLogEnds);
}

/**
* @param phase phase of query
* @return map of PerfLogger call-trace name and duration in miliseconds, during given phase.
* @return map of PerfLogger call-trace name and duration in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogTimes(Phase phase) {
Map<String, Long> times = new HashMap<>();
Expand Down
Expand Up @@ -44,7 +44,6 @@
import org.apache.hadoop.hive.ql.ddl.ShowUtils;
import org.apache.hadoop.hive.ql.ddl.table.info.desc.formatter.DescTableFormatter;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.DDLOperation;
import org.apache.hadoop.hive.ql.metadata.Hive;
Expand Down
4 changes: 2 additions & 2 deletions ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java
Expand Up @@ -97,15 +97,15 @@ public void clear() {
}

/**
* Returns entry corresponding to highest row index.
* Returns entry corresponding to the highest row index.
* @return max entry.
*/
public Map.Entry<Integer, Object> getMaxEntry() {
return floorEntry(Integer.MAX_VALUE);
}

/**
* Removes eldest entry from the boundary cache.
* Removes the eldest entry from the boundary cache.
*/
public void evictOne() {
if (queue.isEmpty()) {
Expand Down
Expand Up @@ -1136,8 +1136,8 @@ public void process(Object row, int tag) throws HiveException {
}

rowOutWriters = fpaths.outWriters;
// check if all record writers implement statistics. if atleast one RW
// doesn't implement stats interface we will fallback to conventional way
// check if all record writers implement statistics. if at least one RW
// doesn't implement stats interface we will fall back to conventional way
// of gathering stats
isCollectRWStats = areAllTrue(statsFromRecordWriter);
if (conf.isGatherStats() && !isCollectRWStats) {
Expand Down Expand Up @@ -1637,7 +1637,7 @@ public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException
}
}
if (conf.getTableInfo().isNonNative()) {
//check the ouput specs only if it is a storage handler (native tables's outputformats does
//check the output specs only if it is a storage handler (native tables's outputformats does
//not set the job's output properties correctly)
try {
hiveOutputFormat.checkOutputSpecs(ignored, job);
Expand Down
Expand Up @@ -1129,7 +1129,7 @@ public static boolean shouldEmitSummaryRow(GroupByDesc desc) {
int groupingSetPosition = desc.getGroupingSetPosition();
List<Long> listGroupingSets = desc.getListGroupingSets();
// groupingSets are known at map/reducer side; but have to do real processing
// hence grouppingSetsPresent is true only at map side
// hence groupingSetsPresent is true only at map side
if (groupingSetPosition >= 0 && listGroupingSets != null) {
Long emptyGrouping = (1L << groupingSetPosition) - 1;
if (listGroupingSets.contains(emptyGrouping)) {
Expand Down
Expand Up @@ -18,7 +18,6 @@

package org.apache.hadoop.hive.ql.exec;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
Expand Down Expand Up @@ -181,7 +180,7 @@ void endGroup() throws IOException, HiveException {
RowContainer<ArrayList<Object>> bigKey = (RowContainer)joinOp.storage[currBigKeyTag];
Path outputPath = getOperatorOutputPath(specPath);
FileSystem destFs = outputPath.getFileSystem(hconf);
bigKey.copyToDFSDirecory(destFs, outputPath);
bigKey.copyToDFSDirectory(destFs, outputPath);

for (int i = 0; i < numAliases; i++) {
if (((byte) i) == currBigKeyTag) {
Expand All @@ -191,7 +190,7 @@ void endGroup() throws IOException, HiveException {
if (values != null) {
specPath = conf.getSmallKeysDirMap().get((byte) currBigKeyTag).get(
(byte) i);
values.copyToDFSDirecory(destFs, getOperatorOutputPath(specPath));
values.copyToDFSDirectory(destFs, getOperatorOutputPath(specPath));
}
}
}
Expand Down
Expand Up @@ -468,7 +468,7 @@ private long refreshMemoryUsed() {
if (hp.hashMap != null) {
memUsed += hp.hashMap.memorySize();
} else {
// also include the still-in-memory sidefile, before it has been truely spilled
// also include the still-in-memory sidefile, before it has been truly spilled
if (hp.sidefileKVContainer != null) {
memUsed += hp.sidefileKVContainer.numRowsInReadBuffer() * tableRowSize;
}
Expand Down Expand Up @@ -627,7 +627,7 @@ private int biggestPartition() {
}
}

// It can happen that although there're some partitions in memory, but their sizes are all 0.
// It can happen that although there are some partitions in memory, but their sizes are all 0.
// In that case we just pick one and spill.
if (res == -1) {
for (int i = 0; i < hashPartitions.length; i++) {
Expand Down
Expand Up @@ -295,7 +295,7 @@ private void removeKeys(ROW ret) {
}

private final ArrayList<Object> row = new ArrayList<Object>(2);

private void spillBlock(ROW[] block, int length) throws HiveException {
try {
if (tmpFile == null) {
Expand Down Expand Up @@ -405,7 +405,7 @@ protected boolean nextBlock(int readIntoOffset) throws HiveException {
}
}

public void copyToDFSDirecory(FileSystem destFs, Path destPath) throws IOException, HiveException {
public void copyToDFSDirectory(FileSystem destFs, Path destPath) throws IOException, HiveException {
if (addCursor > 0) {
this.spillBlock(this.currentWriteBlock, addCursor);
}
Expand Down
Expand Up @@ -798,7 +798,7 @@ && shouldBootstrapDumpAcidTable(table.getTableName())) {
return !ReplUtils.tableIncludedInReplScope(work.oldReplScope, table.getTableName());
}

private boolean isTableSatifiesConfig(Table table) {
private boolean isTableSatisfiesConfig(Table table) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if we're about grammar, and this is a private method (we won't break anything), isn't it "doesTableSatisfyConfig instead?

if (table == null) {
return false;
}
Expand Down Expand Up @@ -1095,7 +1095,7 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive
dumpTable(exportService, matchedDbName, tableName, validTxnList, dbRootMetadata, dbRootData, bootDumpBeginReplId,
hiveDb, tableTuple, managedTblList, dataCopyAtLoad);
}
if (tableList != null && isTableSatifiesConfig(table)) {
if (tableList != null && isTableSatisfiesConfig(table)) {
tableList.add(tableName);
}
} catch (InvalidTableException te) {
Expand Down Expand Up @@ -1418,7 +1418,7 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb)
LOG.debug(te.getMessage());
}
dumpConstraintMetadata(dbName, tblName, dbRoot, hiveDb, table != null ? table.getTTable().getId() : -1);
if (tableList != null && isTableSatifiesConfig(table)) {
if (tableList != null && isTableSatisfiesConfig(table)) {
tableList.add(tblName);
}
}
Expand Down Expand Up @@ -1650,7 +1650,7 @@ String getValidTxnListForReplDump(Hive hiveDb, long waitUntilTime) throws HiveEx
// phase won't be able to replicate those txns. So, the logic is to wait for the given amount
// of time to see if all open txns < current txn is getting aborted/committed. If not, then
// we forcefully abort those txns just like AcidHouseKeeperService.
//Exclude readonly and repl created tranasactions
//Exclude readonly and repl created transactions
HiveTxnManager hiveTxnManager = getTxnMgr();
ValidTxnList validTxnList = hiveTxnManager.getValidTxns(excludedTxns);
while (System.currentTimeMillis() < waitUntilTime) {
Expand Down
Expand Up @@ -146,8 +146,8 @@ void setEventFrom(long eventId) {
void overrideLastEventToDump(Hive fromDb, long bootstrapLastId, long failoverEventId) throws Exception {
// If we are bootstrapping ACID tables, we need to dump all the events upto the event id at
// the beginning of the bootstrap dump and also not dump any event after that. So we override
// both, the last event as well as any user specified limit on the number of events. See
// bootstrampDump() for more details.
// both, the last event and any user specified limit on the number of events. See
// bootstrapDump() for more details.
if (failoverEventId > 0) {
LOG.info("eventTo : {} marked as failover eventId.", eventTo);
eventTo = failoverEventId;
Expand Down
Expand Up @@ -84,7 +84,7 @@ void dataLocationDump(Table table, FileList fileList, HashMap<String, Boolean> s
}
if (!TableType.EXTERNAL_TABLE.equals(table.getTableType())) {
throw new IllegalArgumentException(
"only External tables can be writen via this writer, provided table is " + table
"only External tables can be written via this writer, provided table is " + table
.getTableType());
}
Path fullyQualifiedDataLocation = PathBuilder.fullyQualifiedHDFSUri(table.getDataLocation(), FileSystem.get(hiveConf));
Expand Down
Expand Up @@ -31,7 +31,6 @@
import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils;
import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
import org.apache.thrift.TException;
import com.google.common.collect.Collections2;
Expand Down Expand Up @@ -164,7 +163,7 @@ public int execute() {
addAtlasLoadTask();
}
if (conf.getBoolVar(HiveConf.ConfVars.REPL_RANGER_HANDLE_DENY_POLICY_TARGET)) {
initiateRangerDenytask();
initiateRangerDenyTask();
}
if (shouldLoadAuthorizationMetadata()) {
initiateAuthorizationLoadTask();
Expand Down Expand Up @@ -203,7 +202,7 @@ private boolean shouldLoadAuthorizationMetadata() {
return conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_AUTHORIZATION_METADATA);
}

private void initiateRangerDenytask() throws SemanticException {
private void initiateRangerDenyTask() throws SemanticException {
if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) {
LOG.info("Adding Ranger Deny Policy Task for {} ", work.dbNameToLoadIn);
RangerDenyWork rangerDenyWork = new RangerDenyWork(new Path(work.getDumpDirectory()), work.getSourceDbName(),
Expand Down Expand Up @@ -669,7 +668,7 @@ public void run() throws SemanticException {

db.setParameters(params);
hiveDb.alterDatabase(work.getTargetDatabase(), db);
LOG.debug("Database {} poperties after removal {}", work.getTargetDatabase(), params);
LOG.debug("Database {} properties after removal {}", work.getTargetDatabase(), params);
} catch (HiveException e) {
throw new SemanticException(e);
}
Expand Down Expand Up @@ -847,7 +846,7 @@ private int executeIncrementalLoad(long loadStartTime) throws Exception {

Hive db = getHive();
for (String table : work.tablesToDrop) {
LOG.info("Dropping table {} for optimised bootstarap", work.dbNameToLoadIn + "." + table);
LOG.info("Dropping table {} for optimised bootstrap", work.dbNameToLoadIn + "." + table);
db.dropTable(work.dbNameToLoadIn + "." + table, true);
}
Database sourceDb = getSourceDbMetadata(); //This sourceDb was the actual target prior to failover.
Expand Down
Expand Up @@ -218,7 +218,7 @@ private ObjectName initializeMetricsMBeans(HiveConf hiveConf, String dbNameToLoa
return null;
}

// Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registeration.
// Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registration.
private void unRegisterMBeanIfRegistered(String serviceName, String nameName,
Map<String, String> additionalParameters) {

Expand Down Expand Up @@ -284,7 +284,7 @@ public Task<?> getRootTask() {

@Override
public String getDumpDirectory() {return dumpDirectory;}

public void setRootTask(Task<?> rootTask) {
this.rootTask = rootTask;
}
Expand Down
Expand Up @@ -40,7 +40,7 @@ public VectorGroupKeyHelper(int keyCount) {

void init(VectorExpression[] keyExpressions) throws HiveException {

// NOTE: To support pruning the grouping set id dummy key by VectorGroupbyOpeator MERGE_PARTIAL
// NOTE: To support pruning the grouping set id dummy key by VectorGroupByOperator MERGE_PARTIAL
// case, we use the keyCount passed to the constructor and not keyExpressions.length.

// Inspect the output type of each key expression. And, remember the output columns.
Expand All @@ -62,7 +62,7 @@ void init(VectorExpression[] keyExpressions) throws HiveException {
/*
* This helper method copies the group keys from one vectorized row batch to another,
* but does not increment the outputBatch.size (i.e. the next output position).
*
*
* It was designed for VectorGroupByOperator's sorted reduce group batch processing mode
* to copy the group keys at startGroup.
*/
Expand All @@ -75,7 +75,7 @@ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outpu
LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[inputColumnNum];
LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[outputColumnNum];

// This vectorized code pattern says:
// This vectorized code pattern says:
// If the input batch has no nulls at all (noNulls is true) OR
// the input row is NOT NULL, copy the value.
//
Expand Down
Expand Up @@ -210,7 +210,7 @@ public void evaluate(VectorizedRowBatch batch) throws HiveException {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

/*
* Same LIST for entire batch. Still need to validate the LIST upper limit against varing
* Same LIST for entire batch. Still need to validate the LIST upper limit against varying
* INDEX.
*
* (Repeated INDEX case handled above).
Expand Down
Expand Up @@ -402,7 +402,7 @@ private static void propagateNullsCombine(boolean selectedInUse, int n, int[] se
* @param sel selected value position array
* @param n number of qualifying rows
* @param inV input vector
* @param outV ouput vector
* @param outV output vector
*/
private static void propagateNulls(boolean selectedInUse, int n, int[] sel, ColumnVector inV,
ColumnVector outV) {
Expand Down