Skip to content

Commit

Permalink
HIVE-27055: hive-exec typos part 3 (apache#4035) (Michal Lorek review…
Browse files Browse the repository at this point in the history
…ed by Laszlo Bodor)
  • Loading branch information
mlorek authored and yeahyung committed Jul 20, 2023
1 parent 8344e92 commit ce49f14
Show file tree
Hide file tree
Showing 86 changed files with 290 additions and 292 deletions.
Expand Up @@ -310,7 +310,7 @@ public DiskRangeList createCacheChunk(
arrayOffset + offsetFromReadStart + extraDiskDataOffset,
smallSize, bb, cacheRanges, largeBufCount, chunkFrom + extraOffsetInChunk);
extraDiskDataOffset += smallSize;
extraOffsetInChunk += smallSize; // Not strictly necessary, noone will look at it.
extraOffsetInChunk += smallSize; // Not strictly necessary, no one will look at it.
if (newCacheData == null) {
newCacheData = smallBuffer;
} else {
Expand Down
2 changes: 1 addition & 1 deletion ql/src/java/org/apache/hadoop/hive/ql/Compiler.java
Expand Up @@ -89,7 +89,7 @@ public Compiler(Context context, DriverContext driverContext, DriverState driver

/**
* @param deferClose indicates if the close/destroy should be deferred when the process has been interrupted
* it should be set to true if the compile is called within another method like runInternal,
* it should be set to true if the compile method is called within another method like runInternal,
* which defers the close to the called in that method.
*/
public QueryPlan compile(String rawCommand, boolean deferClose) throws CommandProcessorException {
Expand Down
10 changes: 5 additions & 5 deletions ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java
Expand Up @@ -87,11 +87,11 @@ private void checkAndRollbackCTAS(QueryLifeTimeHookContext ctx) {
if (table != null) {
LOG.info("Performing cleanup as part of rollback: {}", table.getFullTableName().toString());
try {
CompactionRequest rqst = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR);
rqst.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf));
rqst.putToProperties(META_TABLE_LOCATION, tblPath.toString());
rqst.putToProperties(IF_PURGE, Boolean.toString(true));
boolean success = Hive.get(conf).getMSC().submitForCleanup(rqst, writeId,
CompactionRequest request = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR);
request.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf));
request.putToProperties(META_TABLE_LOCATION, tblPath.toString());
request.putToProperties(IF_PURGE, Boolean.toString(true));
boolean success = Hive.get(conf).getMSC().submitForCleanup(request, writeId,
pCtx.getQueryState().getTxnManager().getCurrentTxnId());
if (success) {
LOG.info("The cleanup request has been submitted");
Expand Down
2 changes: 1 addition & 1 deletion ql/src/java/org/apache/hadoop/hive/ql/IDriver.java
Expand Up @@ -31,7 +31,7 @@
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;

/**
* Hive query executer driver.
* Hive query executor driver.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
Expand Down
16 changes: 8 additions & 8 deletions ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
Expand Up @@ -47,7 +47,7 @@
public class QueryDisplay {

/**
* Preffered objectmapper for this class.
* Preferred objectMapper for this class.
*
* It must be used to have things work in shaded environment (and its also more performant).
*/
Expand Down Expand Up @@ -307,55 +307,55 @@ public synchronized void setExplainPlan(String explainPlan) {

/**
* @param phase phase of query
* @return map of HMS Client method-calls and duration in miliseconds, during given phase.
* @return map of HMS Client method-calls and duration in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getHmsTimings(Phase phase) {
return hmsTimingMap.get(phase);
}

/**
* @param phase phase of query
* @param hmsTimings map of HMS Client method-calls and duration in miliseconds, during given phase.
* @param hmsTimings map of HMS Client method-calls and duration in milliseconds, during given phase.
*/
public synchronized void setHmsTimings(Phase phase, Map<String, Long> hmsTimings) {
hmsTimingMap.put(phase, hmsTimings);
}

/**
* @param phase phase of query
* @return map of PerfLogger call-trace name and start time in miliseconds, during given phase.
* @return map of PerfLogger call-trace name and start time in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogStarts(Phase phase) {
return perfLogStartMap.get(phase);
}

/**
* @param phase phase of query
* @param perfLogStarts map of PerfLogger call-trace name and start time in miliseconds, during given phase.
* @param perfLogStarts map of PerfLogger call-trace name and start time in milliseconds, during given phase.
*/
public synchronized void setPerfLogStarts(Phase phase, Map<String, Long> perfLogStarts) {
perfLogStartMap.put(phase, perfLogStarts);
}

/**
* @param phase phase of query
* @return map of PerfLogger call-trace name and end time in miliseconds, during given phase.
* @return map of PerfLogger call-trace name and end time in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogEnds(Phase phase) {
return perfLogEndMap.get(phase);
}

/**
* @param phase phase of query
* @param perfLogEnds map of PerfLogger call-trace name and end time in miliseconds, during given phase.
* @param perfLogEnds map of PerfLogger call-trace name and end time in milliseconds, during given phase.
*/
public synchronized void setPerfLogEnds(Phase phase, Map<String, Long> perfLogEnds) {
perfLogEndMap.put(phase, perfLogEnds);
}

/**
* @param phase phase of query
* @return map of PerfLogger call-trace name and duration in miliseconds, during given phase.
* @return map of PerfLogger call-trace name and duration in milliseconds, during given phase.
*/
public synchronized Map<String, Long> getPerfLogTimes(Phase phase) {
Map<String, Long> times = new HashMap<>();
Expand Down
Expand Up @@ -44,7 +44,6 @@
import org.apache.hadoop.hive.ql.ddl.ShowUtils;
import org.apache.hadoop.hive.ql.ddl.table.info.desc.formatter.DescTableFormatter;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.DDLOperation;
import org.apache.hadoop.hive.ql.metadata.Hive;
Expand Down
6 changes: 3 additions & 3 deletions ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
Expand Up @@ -234,10 +234,10 @@ public static String getPartialName(Partition p, int level) throws HiveException

/**
* Determines if one can insert into partition(s), or there's a conflict with
* archive. It can be because partition is itself archived or it is to be
* archive. It can be because partition is itself archived, or it is to be
* created inside existing archive. The second case is when partition doesn't
* exist yet, but it would be inside of an archive if it existed. This one is
* quite tricky to check, we need to find at least one partition inside of
* exist yet, but it would be inside an archive if it existed. This one is
* quite tricky to check, we need to find at least one partition inside
* the parent directory. If it is archived and archiving level tells that
* the archival was done of directory partition is in it means we cannot
* insert; otherwise we can.
Expand Down
4 changes: 2 additions & 2 deletions ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java
Expand Up @@ -97,15 +97,15 @@ public void clear() {
}

/**
* Returns entry corresponding to highest row index.
* Returns entry corresponding to the highest row index.
* @return max entry.
*/
public Map.Entry<Integer, Object> getMaxEntry() {
return floorEntry(Integer.MAX_VALUE);
}

/**
* Removes eldest entry from the boundary cache.
* Removes the eldest entry from the boundary cache.
*/
public void evictOne() {
if (queue.isEmpty()) {
Expand Down
Expand Up @@ -1136,8 +1136,8 @@ public void process(Object row, int tag) throws HiveException {
}

rowOutWriters = fpaths.outWriters;
// check if all record writers implement statistics. if atleast one RW
// doesn't implement stats interface we will fallback to conventional way
// check if all record writers implement statistics. if at least one RW
// doesn't implement stats interface we will fall back to conventional way
// of gathering stats
isCollectRWStats = areAllTrue(statsFromRecordWriter);
if (conf.isGatherStats() && !isCollectRWStats) {
Expand Down Expand Up @@ -1637,7 +1637,7 @@ public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException
}
}
if (conf.getTableInfo().isNonNative()) {
//check the ouput specs only if it is a storage handler (native tables's outputformats does
//check the output specs only if it is a storage handler (native tables's outputformats does
//not set the job's output properties correctly)
try {
hiveOutputFormat.checkOutputSpecs(ignored, job);
Expand Down
Expand Up @@ -1129,7 +1129,7 @@ public static boolean shouldEmitSummaryRow(GroupByDesc desc) {
int groupingSetPosition = desc.getGroupingSetPosition();
List<Long> listGroupingSets = desc.getListGroupingSets();
// groupingSets are known at map/reducer side; but have to do real processing
// hence grouppingSetsPresent is true only at map side
// hence groupingSetsPresent is true only at map side
if (groupingSetPosition >= 0 && listGroupingSets != null) {
Long emptyGrouping = (1L << groupingSetPosition) - 1;
if (listGroupingSets.contains(emptyGrouping)) {
Expand Down
2 changes: 1 addition & 1 deletion ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
Expand Up @@ -426,7 +426,7 @@ private static ObjectInspector createStructFromFields(List<StructField> fields,
private static ObjectInspector unflattenObjInspector(ObjectInspector oi) {
if (oi instanceof StructObjectInspector) {
// Check if all fields start with "key." or "value."
// If so, then unflatten by adding an additional level of nested key and value structs
// If so, then unflatten by adding a level of nested key and value structs
// Example: { "key.reducesinkkey0":int, "key.reducesinkkey1": int, "value._col6":int }
// Becomes
// { "key": { "reducesinkkey0":int, "reducesinkkey1":int }, "value": { "_col6":int } }
Expand Down
Expand Up @@ -18,7 +18,6 @@

package org.apache.hadoop.hive.ql.exec;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
Expand Down Expand Up @@ -181,7 +180,7 @@ void endGroup() throws IOException, HiveException {
RowContainer<ArrayList<Object>> bigKey = (RowContainer)joinOp.storage[currBigKeyTag];
Path outputPath = getOperatorOutputPath(specPath);
FileSystem destFs = outputPath.getFileSystem(hconf);
bigKey.copyToDFSDirecory(destFs, outputPath);
bigKey.copyToDFSDirectory(destFs, outputPath);

for (int i = 0; i < numAliases; i++) {
if (((byte) i) == currBigKeyTag) {
Expand All @@ -191,7 +190,7 @@ void endGroup() throws IOException, HiveException {
if (values != null) {
specPath = conf.getSmallKeysDirMap().get((byte) currBigKeyTag).get(
(byte) i);
values.copyToDFSDirecory(destFs, getOperatorOutputPath(specPath));
values.copyToDFSDirectory(destFs, getOperatorOutputPath(specPath));
}
}
}
Expand Down
Expand Up @@ -468,7 +468,7 @@ private long refreshMemoryUsed() {
if (hp.hashMap != null) {
memUsed += hp.hashMap.memorySize();
} else {
// also include the still-in-memory sidefile, before it has been truely spilled
// also include the still-in-memory sidefile, before it has been truly spilled
if (hp.sidefileKVContainer != null) {
memUsed += hp.sidefileKVContainer.numRowsInReadBuffer() * tableRowSize;
}
Expand Down Expand Up @@ -627,7 +627,7 @@ private int biggestPartition() {
}
}

// It can happen that although there're some partitions in memory, but their sizes are all 0.
// It can happen that although there are some partitions in memory, but their sizes are all 0.
// In that case we just pick one and spill.
if (res == -1) {
for (int i = 0; i < hashPartitions.length; i++) {
Expand Down
Expand Up @@ -295,7 +295,7 @@ private void removeKeys(ROW ret) {
}

private final ArrayList<Object> row = new ArrayList<Object>(2);

private void spillBlock(ROW[] block, int length) throws HiveException {
try {
if (tmpFile == null) {
Expand Down Expand Up @@ -405,7 +405,7 @@ protected boolean nextBlock(int readIntoOffset) throws HiveException {
}
}

public void copyToDFSDirecory(FileSystem destFs, Path destPath) throws IOException, HiveException {
public void copyToDFSDirectory(FileSystem destFs, Path destPath) throws IOException, HiveException {
if (addCursor > 0) {
this.spillBlock(this.currentWriteBlock, addCursor);
}
Expand Down
Expand Up @@ -109,7 +109,6 @@
import java.io.IOException;
import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.LinkedHashMap;
import java.util.Set;
import java.util.HashSet;
import java.util.List;
Expand Down Expand Up @@ -808,7 +807,7 @@ && shouldBootstrapDumpAcidTable(table.getTableName())) {
return !ReplUtils.tableIncludedInReplScope(work.oldReplScope, table.getTableName());
}

private boolean isTableSatifiesConfig(Table table) {
private boolean doesTableSatisfyConfig(Table table) {
if (table == null) {
return false;
}
Expand Down Expand Up @@ -1105,7 +1104,7 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive
dumpTable(exportService, matchedDbName, tableName, validTxnList, dbRootMetadata, dbRootData, bootDumpBeginReplId,
hiveDb, tableTuple, managedTblList, dataCopyAtLoad);
}
if (tableList != null && isTableSatifiesConfig(table)) {
if (tableList != null && doesTableSatisfyConfig(table)) {
tableList.add(tableName);
}
} catch (InvalidTableException te) {
Expand Down Expand Up @@ -1428,7 +1427,7 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb)
LOG.debug(te.getMessage());
}
dumpConstraintMetadata(dbName, tblName, dbRoot, hiveDb, table != null ? table.getTTable().getId() : -1);
if (tableList != null && isTableSatifiesConfig(table)) {
if (tableList != null && doesTableSatisfyConfig(table)) {
tableList.add(tblName);
}
}
Expand Down Expand Up @@ -1660,7 +1659,7 @@ String getValidTxnListForReplDump(Hive hiveDb, long waitUntilTime) throws HiveEx
// phase won't be able to replicate those txns. So, the logic is to wait for the given amount
// of time to see if all open txns < current txn is getting aborted/committed. If not, then
// we forcefully abort those txns just like AcidHouseKeeperService.
//Exclude readonly and repl created tranasactions
//Exclude readonly and repl created transactions
HiveTxnManager hiveTxnManager = getTxnMgr();
ValidTxnList validTxnList = hiveTxnManager.getValidTxns(excludedTxns);
while (System.currentTimeMillis() < waitUntilTime) {
Expand Down
Expand Up @@ -146,8 +146,8 @@ void setEventFrom(long eventId) {
void overrideLastEventToDump(Hive fromDb, long bootstrapLastId, long failoverEventId) throws Exception {
// If we are bootstrapping ACID tables, we need to dump all the events upto the event id at
// the beginning of the bootstrap dump and also not dump any event after that. So we override
// both, the last event as well as any user specified limit on the number of events. See
// bootstrampDump() for more details.
// both, the last event and any user specified limit on the number of events. See
// bootstrapDump() for more details.
if (failoverEventId > 0) {
LOG.info("eventTo : {} marked as failover eventId.", eventTo);
eventTo = failoverEventId;
Expand Down
Expand Up @@ -84,7 +84,7 @@ void dataLocationDump(Table table, FileList fileList, HashMap<String, Boolean> s
}
if (!TableType.EXTERNAL_TABLE.equals(table.getTableType())) {
throw new IllegalArgumentException(
"only External tables can be writen via this writer, provided table is " + table
"only External tables can be written via this writer, provided table is " + table
.getTableType());
}
Path fullyQualifiedDataLocation = PathBuilder.fullyQualifiedHDFSUri(table.getDataLocation(), FileSystem.get(hiveConf));
Expand Down
Expand Up @@ -31,7 +31,6 @@
import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils;
import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
import org.apache.thrift.TException;
import com.google.common.collect.Collections2;
Expand Down Expand Up @@ -164,7 +163,7 @@ public int execute() {
addAtlasLoadTask();
}
if (conf.getBoolVar(HiveConf.ConfVars.REPL_RANGER_HANDLE_DENY_POLICY_TARGET)) {
initiateRangerDenytask();
initiateRangerDenyTask();
}
if (shouldLoadAuthorizationMetadata()) {
initiateAuthorizationLoadTask();
Expand Down Expand Up @@ -203,7 +202,7 @@ private boolean shouldLoadAuthorizationMetadata() {
return conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_AUTHORIZATION_METADATA);
}

private void initiateRangerDenytask() throws SemanticException {
private void initiateRangerDenyTask() throws SemanticException {
if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) {
LOG.info("Adding Ranger Deny Policy Task for {} ", work.dbNameToLoadIn);
RangerDenyWork rangerDenyWork = new RangerDenyWork(new Path(work.getDumpDirectory()), work.getSourceDbName(),
Expand Down Expand Up @@ -669,7 +668,7 @@ public void run() throws SemanticException {

db.setParameters(params);
hiveDb.alterDatabase(work.getTargetDatabase(), db);
LOG.debug("Database {} poperties after removal {}", work.getTargetDatabase(), params);
LOG.debug("Database {} properties after removal {}", work.getTargetDatabase(), params);
} catch (HiveException e) {
throw new SemanticException(e);
}
Expand Down Expand Up @@ -851,7 +850,7 @@ private int executeIncrementalLoad(long loadStartTime) throws Exception {

Hive db = getHive();
for (String table : work.tablesToDrop) {
LOG.info("Dropping table {} for optimised bootstarap", work.dbNameToLoadIn + "." + table);
LOG.info("Dropping table {} for optimised bootstrap", work.dbNameToLoadIn + "." + table);
db.dropTable(work.dbNameToLoadIn + "." + table, true);
}
Database sourceDb = getSourceDbMetadata(); //This sourceDb was the actual target prior to failover.
Expand Down
Expand Up @@ -218,7 +218,7 @@ private ObjectName initializeMetricsMBeans(HiveConf hiveConf, String dbNameToLoa
return null;
}

// Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registeration.
// Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registration.
private void unRegisterMBeanIfRegistered(String serviceName, String nameName,
Map<String, String> additionalParameters) {

Expand Down Expand Up @@ -284,7 +284,7 @@ public Task<?> getRootTask() {

@Override
public String getDumpDirectory() {return dumpDirectory;}

public void setRootTask(Task<?> rootTask) {
this.rootTask = rootTask;
}
Expand Down

0 comments on commit ce49f14

Please sign in to comment.