diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java index 6e9b6beacd62..28fa415b43eb 100644 --- a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java +++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java @@ -310,7 +310,7 @@ public DiskRangeList createCacheChunk( arrayOffset + offsetFromReadStart + extraDiskDataOffset, smallSize, bb, cacheRanges, largeBufCount, chunkFrom + extraOffsetInChunk); extraDiskDataOffset += smallSize; - extraOffsetInChunk += smallSize; // Not strictly necessary, noone will look at it. + extraOffsetInChunk += smallSize; // Not strictly necessary, no one will look at it. if (newCacheData == null) { newCacheData = smallBuffer; } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java b/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java index b6c955485fe8..26377ed9374f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java @@ -89,7 +89,7 @@ public Compiler(Context context, DriverContext driverContext, DriverState driver /** * @param deferClose indicates if the close/destroy should be deferred when the process has been interrupted - * it should be set to true if the compile is called within another method like runInternal, + * it should be set to true if the compile method is called within another method like runInternal, * which defers the close to the called in that method. */ public QueryPlan compile(String rawCommand, boolean deferClose) throws CommandProcessorException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java b/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java index 3e125da08454..9e21b586e0ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java @@ -87,11 +87,11 @@ private void checkAndRollbackCTAS(QueryLifeTimeHookContext ctx) { if (table != null) { LOG.info("Performing cleanup as part of rollback: {}", table.getFullTableName().toString()); try { - CompactionRequest rqst = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR); - rqst.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf)); - rqst.putToProperties(META_TABLE_LOCATION, tblPath.toString()); - rqst.putToProperties(IF_PURGE, Boolean.toString(true)); - boolean success = Hive.get(conf).getMSC().submitForCleanup(rqst, writeId, + CompactionRequest request = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR); + request.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf)); + request.putToProperties(META_TABLE_LOCATION, tblPath.toString()); + request.putToProperties(IF_PURGE, Boolean.toString(true)); + boolean success = Hive.get(conf).getMSC().submitForCleanup(request, writeId, pCtx.getQueryState().getTxnManager().getCurrentTxnId()); if (success) { LOG.info("The cleanup request has been submitted"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java index 73bca0bbebdf..efa1223db5ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/IDriver.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; /** - * Hive query executer driver. + * Hive query executor driver. */ @InterfaceAudience.Private @InterfaceStability.Unstable diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java index cdba54c5b652..35eccd2909a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java @@ -47,7 +47,7 @@ public class QueryDisplay { /** - * Preffered objectmapper for this class. + * Preferred objectMapper for this class. * * It must be used to have things work in shaded environment (and its also more performant). */ @@ -307,7 +307,7 @@ public synchronized void setExplainPlan(String explainPlan) { /** * @param phase phase of query - * @return map of HMS Client method-calls and duration in miliseconds, during given phase. + * @return map of HMS Client method-calls and duration in milliseconds, during given phase. */ public synchronized Map getHmsTimings(Phase phase) { return hmsTimingMap.get(phase); @@ -315,7 +315,7 @@ public synchronized Map getHmsTimings(Phase phase) { /** * @param phase phase of query - * @param hmsTimings map of HMS Client method-calls and duration in miliseconds, during given phase. + * @param hmsTimings map of HMS Client method-calls and duration in milliseconds, during given phase. */ public synchronized void setHmsTimings(Phase phase, Map hmsTimings) { hmsTimingMap.put(phase, hmsTimings); @@ -323,7 +323,7 @@ public synchronized void setHmsTimings(Phase phase, Map hmsTimings /** * @param phase phase of query - * @return map of PerfLogger call-trace name and start time in miliseconds, during given phase. + * @return map of PerfLogger call-trace name and start time in milliseconds, during given phase. */ public synchronized Map getPerfLogStarts(Phase phase) { return perfLogStartMap.get(phase); @@ -331,7 +331,7 @@ public synchronized Map getPerfLogStarts(Phase phase) { /** * @param phase phase of query - * @param perfLogStarts map of PerfLogger call-trace name and start time in miliseconds, during given phase. + * @param perfLogStarts map of PerfLogger call-trace name and start time in milliseconds, during given phase. */ public synchronized void setPerfLogStarts(Phase phase, Map perfLogStarts) { perfLogStartMap.put(phase, perfLogStarts); @@ -339,7 +339,7 @@ public synchronized void setPerfLogStarts(Phase phase, Map perfLog /** * @param phase phase of query - * @return map of PerfLogger call-trace name and end time in miliseconds, during given phase. + * @return map of PerfLogger call-trace name and end time in milliseconds, during given phase. */ public synchronized Map getPerfLogEnds(Phase phase) { return perfLogEndMap.get(phase); @@ -347,7 +347,7 @@ public synchronized Map getPerfLogEnds(Phase phase) { /** * @param phase phase of query - * @param perfLogEnds map of PerfLogger call-trace name and end time in miliseconds, during given phase. + * @param perfLogEnds map of PerfLogger call-trace name and end time in milliseconds, during given phase. */ public synchronized void setPerfLogEnds(Phase phase, Map perfLogEnds) { perfLogEndMap.put(phase, perfLogEnds); @@ -355,7 +355,7 @@ public synchronized void setPerfLogEnds(Phase phase, Map perfLogEn /** * @param phase phase of query - * @return map of PerfLogger call-trace name and duration in miliseconds, during given phase. + * @return map of PerfLogger call-trace name and duration in milliseconds, during given phase. */ public synchronized Map getPerfLogTimes(Phase phase) { Map times = new HashMap<>(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java index 5e4f9b23425c..53db08b8a085 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hive.ql.ddl.ShowUtils; import org.apache.hadoop.hive.ql.ddl.table.info.desc.formatter.DescTableFormatter; import org.apache.hadoop.hive.ql.exec.ColumnInfo; -import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.metadata.Hive; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java index 6ad0556b5528..ebe8f2f52775 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java @@ -234,10 +234,10 @@ public static String getPartialName(Partition p, int level) throws HiveException /** * Determines if one can insert into partition(s), or there's a conflict with - * archive. It can be because partition is itself archived or it is to be + * archive. It can be because partition is itself archived, or it is to be * created inside existing archive. The second case is when partition doesn't - * exist yet, but it would be inside of an archive if it existed. This one is - * quite tricky to check, we need to find at least one partition inside of + * exist yet, but it would be inside an archive if it existed. This one is + * quite tricky to check, we need to find at least one partition inside * the parent directory. If it is archived and archiving level tells that * the archival was done of directory partition is in it means we cannot * insert; otherwise we can. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java index 7cf278ca05ec..3c8b7c567cf9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/BoundaryCache.java @@ -97,7 +97,7 @@ public void clear() { } /** - * Returns entry corresponding to highest row index. + * Returns entry corresponding to the highest row index. * @return max entry. */ public Map.Entry getMaxEntry() { @@ -105,7 +105,7 @@ public Map.Entry getMaxEntry() { } /** - * Removes eldest entry from the boundary cache. + * Removes the eldest entry from the boundary cache. */ public void evictOne() { if (queue.isEmpty()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 9d30093c2398..03b0c76b40f9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -1136,8 +1136,8 @@ public void process(Object row, int tag) throws HiveException { } rowOutWriters = fpaths.outWriters; - // check if all record writers implement statistics. if atleast one RW - // doesn't implement stats interface we will fallback to conventional way + // check if all record writers implement statistics. if at least one RW + // doesn't implement stats interface we will fall back to conventional way // of gathering stats isCollectRWStats = areAllTrue(statsFromRecordWriter); if (conf.isGatherStats() && !isCollectRWStats) { @@ -1637,7 +1637,7 @@ public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException } } if (conf.getTableInfo().isNonNative()) { - //check the ouput specs only if it is a storage handler (native tables's outputformats does + //check the output specs only if it is a storage handler (native tables's outputformats does //not set the job's output properties correctly) try { hiveOutputFormat.checkOutputSpecs(ignored, job); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index f118aa19bb3e..f548afd52401 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -1129,7 +1129,7 @@ public static boolean shouldEmitSummaryRow(GroupByDesc desc) { int groupingSetPosition = desc.getGroupingSetPosition(); List listGroupingSets = desc.getListGroupingSets(); // groupingSets are known at map/reducer side; but have to do real processing - // hence grouppingSetsPresent is true only at map side + // hence groupingSetsPresent is true only at map side if (groupingSetPosition >= 0 && listGroupingSets != null) { Long emptyGrouping = (1L << groupingSetPosition) - 1; if (listGroupingSets.contains(emptyGrouping)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java index e9920d65665f..cda449be2837 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java @@ -426,7 +426,7 @@ private static ObjectInspector createStructFromFields(List fields, private static ObjectInspector unflattenObjInspector(ObjectInspector oi) { if (oi instanceof StructObjectInspector) { // Check if all fields start with "key." or "value." - // If so, then unflatten by adding an additional level of nested key and value structs + // If so, then unflatten by adding a level of nested key and value structs // Example: { "key.reducesinkkey0":int, "key.reducesinkkey1": int, "value._col6":int } // Becomes // { "key": { "reducesinkkey0":int, "reducesinkkey1":int }, "value": { "_col6":int } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java index 94b63f2e2633..d18e53ef3d5c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec; -import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -181,7 +180,7 @@ void endGroup() throws IOException, HiveException { RowContainer> bigKey = (RowContainer)joinOp.storage[currBigKeyTag]; Path outputPath = getOperatorOutputPath(specPath); FileSystem destFs = outputPath.getFileSystem(hconf); - bigKey.copyToDFSDirecory(destFs, outputPath); + bigKey.copyToDFSDirectory(destFs, outputPath); for (int i = 0; i < numAliases; i++) { if (((byte) i) == currBigKeyTag) { @@ -191,7 +190,7 @@ void endGroup() throws IOException, HiveException { if (values != null) { specPath = conf.getSmallKeysDirMap().get((byte) currBigKeyTag).get( (byte) i); - values.copyToDFSDirecory(destFs, getOperatorOutputPath(specPath)); + values.copyToDFSDirectory(destFs, getOperatorOutputPath(specPath)); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java index 545a7296526d..e66977f758a8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java @@ -468,7 +468,7 @@ private long refreshMemoryUsed() { if (hp.hashMap != null) { memUsed += hp.hashMap.memorySize(); } else { - // also include the still-in-memory sidefile, before it has been truely spilled + // also include the still-in-memory sidefile, before it has been truly spilled if (hp.sidefileKVContainer != null) { memUsed += hp.sidefileKVContainer.numRowsInReadBuffer() * tableRowSize; } @@ -627,7 +627,7 @@ private int biggestPartition() { } } - // It can happen that although there're some partitions in memory, but their sizes are all 0. + // It can happen that although there are some partitions in memory, but their sizes are all 0. // In that case we just pick one and spill. if (res == -1) { for (int i = 0; i < hashPartitions.length; i++) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java index 4a47ca0278bc..67ab1e979456 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java @@ -295,7 +295,7 @@ private void removeKeys(ROW ret) { } private final ArrayList row = new ArrayList(2); - + private void spillBlock(ROW[] block, int length) throws HiveException { try { if (tmpFile == null) { @@ -405,7 +405,7 @@ protected boolean nextBlock(int readIntoOffset) throws HiveException { } } - public void copyToDFSDirecory(FileSystem destFs, Path destPath) throws IOException, HiveException { + public void copyToDFSDirectory(FileSystem destFs, Path destPath) throws IOException, HiveException { if (addCursor > 0) { this.spillBlock(this.currentWriteBlock, addCursor); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index e0b58a64493e..0f69a4ee9eb5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -109,7 +109,6 @@ import java.io.IOException; import java.io.Serializable; import java.nio.charset.StandardCharsets; -import java.util.LinkedHashMap; import java.util.Set; import java.util.HashSet; import java.util.List; @@ -798,7 +797,7 @@ && shouldBootstrapDumpAcidTable(table.getTableName())) { return !ReplUtils.tableIncludedInReplScope(work.oldReplScope, table.getTableName()); } - private boolean isTableSatifiesConfig(Table table) { + private boolean doesTableSatisfyConfig(Table table) { if (table == null) { return false; } @@ -1095,7 +1094,7 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive dumpTable(exportService, matchedDbName, tableName, validTxnList, dbRootMetadata, dbRootData, bootDumpBeginReplId, hiveDb, tableTuple, managedTblList, dataCopyAtLoad); } - if (tableList != null && isTableSatifiesConfig(table)) { + if (tableList != null && doesTableSatisfyConfig(table)) { tableList.add(tableName); } } catch (InvalidTableException te) { @@ -1418,7 +1417,7 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) LOG.debug(te.getMessage()); } dumpConstraintMetadata(dbName, tblName, dbRoot, hiveDb, table != null ? table.getTTable().getId() : -1); - if (tableList != null && isTableSatifiesConfig(table)) { + if (tableList != null && doesTableSatisfyConfig(table)) { tableList.add(tblName); } } @@ -1650,7 +1649,7 @@ String getValidTxnListForReplDump(Hive hiveDb, long waitUntilTime) throws HiveEx // phase won't be able to replicate those txns. So, the logic is to wait for the given amount // of time to see if all open txns < current txn is getting aborted/committed. If not, then // we forcefully abort those txns just like AcidHouseKeeperService. - //Exclude readonly and repl created tranasactions + //Exclude readonly and repl created transactions HiveTxnManager hiveTxnManager = getTxnMgr(); ValidTxnList validTxnList = hiveTxnManager.getValidTxns(excludedTxns); while (System.currentTimeMillis() < waitUntilTime) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java index 65d9c17a6756..2e626be7ebc0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java @@ -146,8 +146,8 @@ void setEventFrom(long eventId) { void overrideLastEventToDump(Hive fromDb, long bootstrapLastId, long failoverEventId) throws Exception { // If we are bootstrapping ACID tables, we need to dump all the events upto the event id at // the beginning of the bootstrap dump and also not dump any event after that. So we override - // both, the last event as well as any user specified limit on the number of events. See - // bootstrampDump() for more details. + // both, the last event and any user specified limit on the number of events. See + // bootstrapDump() for more details. if (failoverEventId > 0) { LOG.info("eventTo : {} marked as failover eventId.", eventTo); eventTo = failoverEventId; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java index 14e3b59cff9d..8f48a6ddda18 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplExternalTables.java @@ -84,7 +84,7 @@ void dataLocationDump(Table table, FileList fileList, HashMap s } if (!TableType.EXTERNAL_TABLE.equals(table.getTableType())) { throw new IllegalArgumentException( - "only External tables can be writen via this writer, provided table is " + table + "only External tables can be written via this writer, provided table is " + table .getTableType()); } Path fullyQualifiedDataLocation = PathBuilder.fullyQualifiedHDFSUri(table.getDataLocation(), FileSystem.get(hiveConf)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 82f30d1f26a2..0b1e203e1e40 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger; -import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.thrift.TException; import com.google.common.collect.Collections2; @@ -164,7 +163,7 @@ public int execute() { addAtlasLoadTask(); } if (conf.getBoolVar(HiveConf.ConfVars.REPL_RANGER_HANDLE_DENY_POLICY_TARGET)) { - initiateRangerDenytask(); + initiateRangerDenyTask(); } if (shouldLoadAuthorizationMetadata()) { initiateAuthorizationLoadTask(); @@ -203,7 +202,7 @@ private boolean shouldLoadAuthorizationMetadata() { return conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_AUTHORIZATION_METADATA); } - private void initiateRangerDenytask() throws SemanticException { + private void initiateRangerDenyTask() throws SemanticException { if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) { LOG.info("Adding Ranger Deny Policy Task for {} ", work.dbNameToLoadIn); RangerDenyWork rangerDenyWork = new RangerDenyWork(new Path(work.getDumpDirectory()), work.getSourceDbName(), @@ -669,7 +668,7 @@ public void run() throws SemanticException { db.setParameters(params); hiveDb.alterDatabase(work.getTargetDatabase(), db); - LOG.debug("Database {} poperties after removal {}", work.getTargetDatabase(), params); + LOG.debug("Database {} properties after removal {}", work.getTargetDatabase(), params); } catch (HiveException e) { throw new SemanticException(e); } @@ -847,7 +846,7 @@ private int executeIncrementalLoad(long loadStartTime) throws Exception { Hive db = getHive(); for (String table : work.tablesToDrop) { - LOG.info("Dropping table {} for optimised bootstarap", work.dbNameToLoadIn + "." + table); + LOG.info("Dropping table {} for optimised bootstrap", work.dbNameToLoadIn + "." + table); db.dropTable(work.dbNameToLoadIn + "." + table, true); } Database sourceDb = getSourceDbMetadata(); //This sourceDb was the actual target prior to failover. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index ea5c0fd15031..6ef95ea55ede 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -218,7 +218,7 @@ private ObjectName initializeMetricsMBeans(HiveConf hiveConf, String dbNameToLoa return null; } - // Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registeration. + // Unregisters MBeans by forming the Metrics same as how the Hadoop code forms during MBean registration. private void unRegisterMBeanIfRegistered(String serviceName, String nameName, Map additionalParameters) { @@ -284,7 +284,7 @@ public Task getRootTask() { @Override public String getDumpDirectory() {return dumpDirectory;} - + public void setRootTask(Task rootTask) { this.rootTask = rootTask; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java index 82dc4a704e16..4a9758808e4f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java @@ -40,7 +40,7 @@ public VectorGroupKeyHelper(int keyCount) { void init(VectorExpression[] keyExpressions) throws HiveException { - // NOTE: To support pruning the grouping set id dummy key by VectorGroupbyOpeator MERGE_PARTIAL + // NOTE: To support pruning the grouping set id dummy key by VectorGroupByOperator MERGE_PARTIAL // case, we use the keyCount passed to the constructor and not keyExpressions.length. // Inspect the output type of each key expression. And, remember the output columns. @@ -62,7 +62,7 @@ void init(VectorExpression[] keyExpressions) throws HiveException { /* * This helper method copies the group keys from one vectorized row batch to another, * but does not increment the outputBatch.size (i.e. the next output position). - * + * * It was designed for VectorGroupByOperator's sorted reduce group batch processing mode * to copy the group keys at startGroup. */ @@ -75,7 +75,7 @@ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outpu LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[inputColumnNum]; LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[outputColumnNum]; - // This vectorized code pattern says: + // This vectorized code pattern says: // If the input batch has no nulls at all (noNulls is true) OR // the input row is NOT NULL, copy the value. // diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java index 9d2cdeffd178..6f5b176b77f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ListIndexColColumn.java @@ -210,7 +210,7 @@ public void evaluate(VectorizedRowBatch batch) throws HiveException { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* - * Same LIST for entire batch. Still need to validate the LIST upper limit against varing + * Same LIST for entire batch. Still need to validate the LIST upper limit against varying * INDEX. * * (Repeated INDEX case handled above). diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java index 35937deeeba3..19be05691dbe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java @@ -402,7 +402,7 @@ private static void propagateNullsCombine(boolean selectedInUse, int n, int[] se * @param sel selected value position array * @param n number of qualifying rows * @param inV input vector - * @param outV ouput vector + * @param outV output vector */ private static void propagateNulls(boolean selectedInUse, int n, int[] sel, ColumnVector inV, ColumnVector outV) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java index 904dd4bebded..618c7108f677 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java @@ -465,11 +465,11 @@ private String getUser(HookContext hookContext) { } private String getRequestUser(HookContext hookContext) { - String requestuser = hookContext.getUserName(); - if (requestuser == null) { - requestuser = hookContext.getUgi().getUserName(); + String requestUser = hookContext.getUserName(); + if (requestUser == null) { + requestUser = hookContext.getUgi().getUserName(); } - return requestuser; + return requestUser; } private String getQueueName(ExecutionMode mode, HiveConf conf) { @@ -555,7 +555,7 @@ public void run(HookContext hookContext) throws Exception { EventLogger logger = EventLogger.getInstance(hookContext.getConf()); logger.handle(hookContext); } catch (Exception e) { - LOG.error("Got exceptoin while processing event: ", e); + LOG.error("Got exception while processing event: ", e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java index 3a86d24180ed..cd23b247063a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java @@ -62,7 +62,7 @@ static public enum HookType { DRIVER_RUN_HOOKS(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class, "Hooks that Will be run at the beginning and end of Driver.run"), QUERY_REDACTOR_HOOKS(HiveConf.ConfVars.QUERYREDACTORHOOKS, Redactor.class, - "Hooks to be invoked for each query which can tranform the query before it's placed in the job.xml file"), + "Hooks to be invoked for each query which can transform the query before it's placed in the job.xml file"), // The HiveSessionHook.class cannot access, use Hook.class instead HIVE_SERVER2_SESSION_HOOK(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK, Hook.class, "Hooks to be executed when session manager starts a new session"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java index f4e285c8b75e..be64185f7a9f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java @@ -125,11 +125,11 @@ public WriteEntity(DummyPartition p, WriteType type, boolean complete) { * * @param d * The name of the directory that is being written to. - * @param islocal + * @param isLocal * Flag to decide whether this directory is local or in dfs. */ - public WriteEntity(Path d, boolean islocal) { - this(d, islocal, false); + public WriteEntity(Path d, boolean isLocal) { + this(d, isLocal, false); } /** @@ -137,13 +137,13 @@ public WriteEntity(Path d, boolean islocal) { * * @param d * The name of the directory that is being written to. - * @param islocal + * @param isLocal * Flag to decide whether this directory is local or in dfs. * @param isTemp * True if this is a temporary location such as scratch dir */ - public WriteEntity(Path d, boolean islocal, boolean isTemp) { - super(d, islocal, true); + public WriteEntity(Path d, boolean isLocal, boolean isTemp) { + super(d, isLocal, true); this.isTempURI = isTemp; this.writeType = WriteType.PATH_WRITE; } @@ -222,7 +222,7 @@ public static WriteType determineAlterTableWriteType(AlterTableType op, Table ta // Not used, @see org.apache.hadoop.hive.ql.ddl.table.storage.skewed.AlterTableSkewedByAnalyzer // alter table {table_name} skewed by (col_name1, col_name2, ...) // on ([(col_name1_value, col_name2_value, ...) [, (col_name1_value, col_name2_value), ...] [stored as directories] - case SET_SKEWED_LOCATION: + case SET_SKEWED_LOCATION: // alter table {table_name} set skewed location (col_name1="location1" [, col_name2="location2", ...] ) case INTO_BUCKETS: // Not used, @see org.apache.hadoop.hive.ql.ddl.table.storage.cluster.AlterTableIntoBucketsAnalyzer @@ -241,16 +241,16 @@ public static WriteType determineAlterTableWriteType(AlterTableType op, Table ta } else { return WriteType.DDL_EXCLUSIVE; } - + case CLUSTERED_BY: - // alter table {table_name} clustered by (col_name, col_name, ...) [sorted by (col_name, ...)] + // alter table {table_name} clustered by (col_name, col_name, ...) [sorted by (col_name, ...)] // into {num_buckets} buckets; case NOT_SORTED: case NOT_CLUSTERED: case SET_FILE_FORMAT: // alter table {table_name} [partition ({partition_spec})] set fileformat {file_format} case SET_SERDE: - // alter table {table_name} [PARTITION ({partition_spec})] set serde '{serde_class_name}' + // alter table {table_name} [PARTITION ({partition_spec})] set serde '{serde_class_name}' case ADDCOLS: case REPLACE_COLUMNS: // alter table {table_name} [partition ({partition_spec})] add/replace columns ({col_name} {data_type}) @@ -261,10 +261,10 @@ public static WriteType determineAlterTableWriteType(AlterTableType op, Table ta case OWNER: case RENAME: // alter table {table_name} rename to {new_table_name} - case DROPPROPS: - return AcidUtils.isLocklessReadsEnabled(table, conf) ? + case DROPPROPS: + return AcidUtils.isLocklessReadsEnabled(table, conf) ? WriteType.DDL_EXCL_WRITE : WriteType.DDL_EXCLUSIVE; - + case ADDPARTITION: // Not used: @see org.apache.hadoop.hive.ql.ddl.table.partition.add.AbstractAddPartitionAnalyzer // alter table {table_name} add [if not exists] partition ({partition_spec}) [location '{location}'] @@ -273,19 +273,19 @@ public static WriteType determineAlterTableWriteType(AlterTableType op, Table ta case ADDPROPS: case UPDATESTATS: return WriteType.DDL_SHARED; - + case COMPACT: - // alter table {table_name} [partition (partition_key = 'partition_value' [, ...])] + // alter table {table_name} [partition (partition_key = 'partition_value' [, ...])] // compact 'compaction_type'[and wait] [with overwrite tblproperties ("property"="value" [, ...])]; case TOUCH: // alter table {table_name} touch [partition ({partition_spec})] return WriteType.DDL_NO_LOCK; - + default: throw new RuntimeException("Unknown operation " + op.toString()); } } - + public boolean isDynamicPartitionWrite() { return isDynamicPartitionWrite; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 9853818f09b8..50f642841f43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1742,8 +1742,8 @@ public String toString() { sb.append("Path: " + dirPath); sb.append("; "); sb.append("Files: { "); - for (FileStatus fstatus : files) { - sb.append(fstatus); + for (FileStatus fStatus : files) { + sb.append(fStatus); sb.append(", "); } sb.append(" }"); @@ -2608,7 +2608,7 @@ public static final class OrcAcidVersion { */ public static final int ORC_ACID_VERSION = 2; /** - * Inlucde current acid version in file footer. + * Include current acid version in file footer. * @param writer - file written */ public static void setAcidVersionInDataFile(Writer writer) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java index c6f39a157a96..9299699608dc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java @@ -53,26 +53,26 @@ public int encode(AcidOutputFormat.Options options) { * next 4 bits reserved for future * remaining 12 bits - the statement ID - 0-based numbering of all statements within a * transaction. Each leg of a multi-insert statement gets a separate statement ID. - * The reserved bits align it so that it easier to interpret it in Hex. - * + * The reserved bits align it so that it's easier to interpret it in Hex. + * * Constructs like Merge and Multi-Insert may have multiple tasks writing data that belongs to * the same physical bucket file. For example, a Merge stmt with update and insert clauses, * (and split update enabled - should be the default in 3.0). A task on behalf of insert may * be writing a row into bucket 0 and another task in the update branch may be writing an insert - * event into bucket 0. Each of these task are writing to different delta directory - distinguished + * event into bucket 0. Each of these tasks are writing to different delta directory - distinguished * by statement ID. By including both bucket ID and statement ID in {@link RecordIdentifier} * we ensure that {@link RecordIdentifier} is unique. - * + * * The intent is that sorting rows by {@link RecordIdentifier} groups rows in the same physical * bucket next to each other. * For any row created by a given version of Hive, top 3 bits are constant. The next * most significant bits are the bucket ID, then the statement ID. This ensures that * {@link org.apache.hadoop.hive.ql.optimizer.SortedDynPartitionOptimizer} works which is * designed so that each task only needs to keep 1 writer opened at a time. It could be - * configured such that a single writer sees data for multiple buckets so it must "group" data + * configured such that a single writer sees data for multiple buckets, so it must "group" data * by bucket ID (and then sort within each bucket as required) which is achieved via sorting * by {@link RecordIdentifier} which includes the {@link RecordIdentifier#getBucketProperty()} - * which has the actual bucket ID in the high order bits. This scheme also ensures that + * which has the actual bucket ID in the high order bits. This scheme also ensures that * {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator#process(Object, int)} works in case * there numBuckets > numReducers. (The later could be fixed by changing how writers are * initialized in "if (fpaths.acidLastBucket != bucketNum) {") diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java index 746bd32b937b..c6bbe1ab06a3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java @@ -212,7 +212,7 @@ public class FlatFileRecordReader implements * (potentially decompressed) and creates the deserializer. * * @param conf - * the jobconf + * the JobConf * @param split * the split for this file */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java b/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java index f91b0702b123..7e3b67045669 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/SchemaAwareCompressionOutputStream.java @@ -24,7 +24,7 @@ /** * - * SchemaAwareCompressionOutputStream adds the ability to inform the comression stream + * SchemaAwareCompressionOutputStream adds the ability to inform the compression stream * the current column being compressed. * */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java index 797cd82ba78b..6e43dab71023 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/esriJson/UnenclosedBaseJsonRecordReader.java @@ -155,7 +155,7 @@ public boolean next(LongWritable key, Text value) throws IOException { if (chr < 0) { if (first_brace_found) { // last record was invalid - LOG.error("Parsing error : EOF occured before record ended"); + LOG.error("Parsing error : EOF occurred before record ended"); } return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java index 1ebad2d7f6e2..775005ac0f93 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java @@ -1454,7 +1454,7 @@ private void copyAndReplaceUncompressedChunks(UncompressedCacheChunk candidateCa BufferChunk chunk = (i == 0) ? candidateCached.getChunk() : (BufferChunk)next; dest.put(chunk.getData()); if (isValid) { - trace.logValidUncompresseedChunk(startLim - startPos, chunk); + trace.logValidUncompressedChunk(startLim - startPos, chunk); } next = chunk.next; if (i == 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java index 63af0647689e..141df96de302 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/IoTrace.java @@ -60,7 +60,7 @@ public IoTrace(int byteSize, boolean isAlwaysDump) { SARG_RESULT = 4, RANGES = 5, COLUMN_READ = 6, SKIP_STREAM = 7, ADD_STREAM = 8, START_RG = 9, START_COL = 10, START_STRIPE_STREAM = 11, START_STREAM = 12, START_READ = 13, UNCOMPRESSED_DATA = 14, - PARTIAL_UNCOMPRESSED_DATA = 15, VALID_UNCOMPRESSEED_CHUNK = 16, CACHE_COLLISION = 17, + PARTIAL_UNCOMPRESSED_DATA = 15, VALID_UNCOMPRESSED_CHUNK = 16, CACHE_COLLISION = 17, ORC_CB = 18, INVALID_ORC_CB = 19, PARTIAL_CB = 20, COMPOSITE_ORC_CB = 21, SARG_RESULT2 = 22; public void reset() { @@ -183,7 +183,7 @@ private static int dumpOneLine(int ix, Logger logger, long[] log) { + offset + ", " + (offset + getSecondInt(log[ix])) + ")"); return ix + 2; } - case VALID_UNCOMPRESSEED_CHUNK: { + case VALID_UNCOMPRESSED_CHUNK: { logger.info(ix + ": Combining uncompressed data for cache buffer of length " + getSecondInt(log[ix]) + " from 0x" + Integer.toHexString((int)log[ix + 1])); return ix + 2; @@ -396,11 +396,11 @@ public void logPartialUncompressedData(long partOffset, long candidateEnd, boole this.offset += 2; } - public void logValidUncompresseedChunk(int totalLength, DiskRange chunk) { + public void logValidUncompressedChunk(int totalLength, DiskRange chunk) { if (log == null) return; int offset = this.offset; if (offset + 2 > log.length) return; - log[offset] = makeIntPair(VALID_UNCOMPRESSEED_CHUNK, totalLength); + log[offset] = makeIntPair(VALID_UNCOMPRESSED_CHUNK, totalLength); log[offset + 1] = chunk.hasData() ? System.identityHashCode(chunk.getData()) : 0; this.offset += 2; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java index 5e7a1ddfcfb4..6b44459b8064 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/ParquetDataColumnReaderFactory.java @@ -83,7 +83,7 @@ public static class DefaultParquetDataColumnReader implements ParquetDataColumnR protected Dictionary dict; // After the data is read in the parquet type, isValid will be set to true if the data can be - // returned in the type defined in HMS. Otherwise isValid is set to false. + // returned in the type defined in HMS. Otherwise, isValid is set to false. boolean isValid = true; protected int hivePrecision = 0; @@ -272,7 +272,7 @@ public Dictionary getDictionary() { } /** - * Enforce the max legnth of varchar or char. + * Enforce the max length of varchar or char. */ protected String enforceMaxLength(String value) { return HiveBaseChar.enforceMaxLength(value, length); @@ -1909,8 +1909,8 @@ private static ParquetDataColumnReader getDataColumnReaderByTypeHelper(boolean i case INT96: ZoneId targetZone = skipTimestampConversion ? ZoneOffset.UTC : firstNonNull(writerTimezone, TimeZone.getDefault().toZoneId()); - return isDictionary ? - new TypesFromInt96PageReader(dictionary, length, targetZone, legacyConversionEnabled) : + return isDictionary ? + new TypesFromInt96PageReader(dictionary, length, targetZone, legacyConversionEnabled) : new TypesFromInt96PageReader(valuesReader, length, targetZone, legacyConversionEnabled); case BOOLEAN: return isDictionary ? new TypesFromBooleanPageReader(dictionary, length) : new diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java index fe407c13a2a6..262427d2c861 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; /** - * Implentation of the Rule interface for Nodes Used in Node dispatching to dispatch + * Implementation of the Rule interface for Nodes Used in Node dispatching to dispatch * process/visitor functions for Nodes. The cost method returns 1 if there is an exact * match between the expression and the stack, otherwise -1. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java index 810788104a14..c7edb768aad2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java @@ -273,7 +273,7 @@ public RecordReader getRecordReader(final InputSplit generic jobConf = projectionPusher.pushProjectionsAndFilters(job, finalPath.getParent()); } // textIF considers '\r' or '\n' as line ending but syslog uses '\r' for escaping new lines. So to read multi-line - // exceptions correctly we explictly use only '\n' + // exceptions correctly we explicitly use only '\n' jobConf.set("textinputformat.record.delimiter", "\n"); return super.getRecordReader(genericSplit, jobConf, reporter); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java index 66ed2e53f01a..d840f3f31d42 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java @@ -295,13 +295,13 @@ public List readEvent() throws IOException { expect(' '); - byte[] appname = null; + byte[] appName = null; byte[] procId = null; byte[] msgId = null; Map structuredData = null; if (version >= 1) { - appname = readWordOrNil(48); + appName = readWordOrNil(48); expect(' '); procId = readWordOrNil(12); expect(' '); @@ -310,7 +310,7 @@ public List readEvent() throws IOException { structuredData = readAndParseStructuredData(); } else if (version == 0 && parseTag) { // Try to find a colon terminated tag. - appname = readTag(); + appName = readTag(); if (peek() == '[') { procId = readPid(); } @@ -323,7 +323,7 @@ public List readEvent() throws IOException { if (c != -1) { msg = readLine(); } - createEvent(version, priority, cal, hostname, appname, procId, msgId, structuredData, msg, row); + createEvent(version, priority, cal, hostname, appName, procId, msgId, structuredData, msg, row); return row; } @@ -339,25 +339,27 @@ private List unmatchedEvent(int c) throws IOException { /** * Create a log event from the given parameters. + * https://www.rfc-editor.org/rfc/rfc3164 + * https://www.rfc-editor.org/rfc/rfc5424 * * @param version the syslog version, 0 for RFC 3164 * @param priority the syslog priority, according to RFC 5424 * @param cal the timestamp of the message. Note that timezone matters * @param hostname the hostname - * @param appname the RFC 5424 appname + * @param appName the RFC 5424 app-name * @param procId the RFC 5424 proc-id * @param msgId the RFC 5424 msg-id * @param structuredData the RFC 5424 structured-data * @param body the message body */ private void createEvent(int version, int priority, Calendar cal, String hostname, - byte[] appname, byte[] procId, byte[] msgId, Map structuredData, byte[] body, List row) { + byte[] appName, byte[] procId, byte[] msgId, Map structuredData, byte[] body, List row) { row.add(FACILITIES[priority / 8]); row.add(getEventPriorityBySyslog(priority)); row.add(version == 0 ? "RFC3164" : "RFC5424"); row.add(Timestamp.ofEpochMilli(cal.getTimeInMillis())); row.add(hostname != null ? hostname : ""); - row.add(appname != null ? new String(appname) : ""); + row.add(appName != null ? new String(appName) : ""); row.add(procId != null ? new String(procId) : ""); row.add(msgId != null ? new String(msgId) : ""); row.add(structuredData); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java index 79c329d19b16..e31f9ef2d15c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java @@ -51,7 +51,7 @@ public Iterator iterator() { return new Iterator(){ private boolean initialized = false; - private Iterator ptnsIterator = null; + private Iterator partitionIterator = null; private Iterator partitionNamesIter = null; private Iterator batchIter = null; @@ -59,7 +59,7 @@ public Iterator iterator() { private void initialize(){ if(!initialized){ if (currType == Type.LIST_PROVIDED){ - ptnsIterator = ptnsProvided.iterator(); + partitionIterator = ptnsProvided.iterator(); } else { partitionNamesIter = partitionNames.iterator(); } @@ -71,7 +71,7 @@ private void initialize(){ public boolean hasNext() { initialize(); if (currType == Type.LIST_PROVIDED){ - return ptnsIterator.hasNext(); + return partitionIterator.hasNext(); } else { return ((batchIter != null) && batchIter.hasNext()) || partitionNamesIter.hasNext(); } @@ -81,7 +81,7 @@ public boolean hasNext() { public Partition next() { initialize(); if (currType == Type.LIST_PROVIDED){ - return ptnsIterator.next(); + return partitionIterator.next(); } if ((batchIter == null) || !batchIter.hasNext()){ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 57e02cf3699f..a9ecad39acef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -246,7 +246,7 @@ public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) { // set create time t.setCreateTime((int) (System.currentTimeMillis() / 1000)); } - // Explictly set the bucketing version + // Explicitly set the bucketing version t.getParameters().put(hive_metastoreConstants.TABLE_BUCKETING_VERSION, "2"); return t; @@ -493,7 +493,7 @@ public void setProperty(String name, String value) { } // Please note : Be very careful in using this function. If not used carefully, - // you may end up overwriting all the existing properties. If the usecase is to + // you may end up overwriting all the existing properties. If the use case is to // add or update certain properties use setProperty() instead. public void setParameters(Map params) { tTable.setParameters(params); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java index 757195489094..27518928a4c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java @@ -25,8 +25,8 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; /* - * This is a plug-able policy to chose the candidate map-join table for converting a join to a - * sort merge join. The policy can decide the big table position. Some of the existing policies + * This is a plug-able policy to choose the candidate map-join table for converting a join to a + * sort merge join. The policy can decide the big table position. Some existing policies * decide the big table based on size or position of the tables. */ public interface BigTableSelectorForAutoSMJ { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java index f74ac2feeefc..84d29ee7ca03 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java @@ -477,7 +477,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, SMBJoinDesc smbJoinDesc = smbOp.getConf(); int posBigTable = smbJoinDesc.getPosBigTable(); - // join keys dont match the bucketing keys + // join keys don't match the bucketing keys List keysBigTable = smbJoinDesc.getKeys().get((byte) posBigTable); if (keysBigTable.size() != bucketPositions.size()) { return null; @@ -621,8 +621,8 @@ else if (op instanceof SelectOperator) { if (selectDesc.getColList().size() < bucketPositions.size() || selectDesc.getColList().size() != fsOp.getSchema().getSignature().size()) { // Some columns in select are pruned. This may happen if those are constants. - // TODO: the best solution is to hook the operator before fs with the select operator. - // See smb_mapjoin_20.q for more details. + // TODO: the best solution is to hook the operator before fs with the select operator. + // See smb_mapjoin_20.q for more details. return null; } // Only columns can be selected for both sorted and bucketed positions diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java index 3ed22721739f..f788ff1a9127 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java @@ -209,10 +209,10 @@ private boolean exprContainsOnlyPartitionColOrVirtualColOrConstants(ExprNodeDesc * Has atleast one subexpression containing a partition/virtualcolumn and has * exactly refer to a single table alias. * @param en Expression Node Descriptor - * @return true if there is atleast one subexpression with partition/virtual column + * @return true if there is at least one subexpression with partition/virtual column * and has exactly refer to a single table alias. If not, return false. */ - private boolean hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) { + private boolean hasAtLeastOneSubExprWithPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) { if (en == null || en.getChildren() == null) { return false; } @@ -362,19 +362,19 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } - // 3. See if the IN (STRUCT(EXP1, EXP2,..) has atleast one expression with partition + // 3. See if the IN (STRUCT(EXP1, EXP2,..) has at least one expression with partition // column with single table alias. If not bail out. // We might have expressions containing only partitioning columns, say, T1.A + T2.B // where T1.A and T2.B are both partitioning columns. // However, these expressions should not be considered as valid expressions for separation. - if (!hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(children.get(0))) { + if (!hasAtLeastOneSubExprWithPartColOrVirtualColWithOneTableAlias(children.get(0))) { LOG.debug( "Partition columns not separated for {}, there are no expression containing partition columns in struct fields", fd); return null; } - // 4. See if all the field expressions of the left hand side of IN are expressions + // 4. See if all the field expressions of the left hand side of IN are expressions // containing constants or only partition columns coming from same table. // If so, we need not perform this optimization and we should bail out. if (hasAllSubExprWithConstOrPartColOrVirtualColWithOneTableAlias(children.get(0))) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java index d8f0a3041cde..cf419b170c13 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java @@ -59,7 +59,7 @@ * Checks the query plan for conditions that would make the plan unsuitable for * materialized views or query caching: * - References to temporary or external tables - * - References to non-determinisitc functions. + * - References to non-deterministic functions. */ public class HiveRelOptMaterializationValidator extends HiveRelShuttleImpl { static final Logger LOG = LoggerFactory.getLogger(HiveRelOptMaterializationValidator.class); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 385fe9afeccf..b1dd697b86f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -275,7 +275,7 @@ private Pair, List> generateKeys() { final PrimaryKeyInfo primaryKeyInfo = hiveTblMetadata.getPrimaryKeyInfo(); final UniqueConstraint uniqueKeyInfo = hiveTblMetadata.getUniqueKeyInfo(); ImmutableList.Builder builder = ImmutableList.builder(); - ImmutableList.Builder nonNullbuilder = ImmutableList.builder(); + ImmutableList.Builder nonNullBuilder = ImmutableList.builder(); // First PK if (primaryKeyInfo != null && !primaryKeyInfo.getColNames().isEmpty()) { ImmutableBitSet.Builder keys = ImmutableBitSet.builder(); @@ -294,7 +294,7 @@ private Pair, List> generateKeys() { } ImmutableBitSet key = keys.build(); builder.add(key); - nonNullbuilder.add(key); + nonNullBuilder.add(key); } // Then UKs if (uniqueKeyInfo != null && !uniqueKeyInfo.getUniqueConstraints().isEmpty()) { @@ -321,11 +321,11 @@ private Pair, List> generateKeys() { ImmutableBitSet key = keys.build(); builder.add(key); if (isNonNullable) { - nonNullbuilder.add(key); + nonNullBuilder.add(key); } } } - return new Pair<>(builder.build(), nonNullbuilder.build()); + return new Pair<>(builder.build(), nonNullBuilder.build()); } private List generateReferentialConstraints() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java index a52c6978ada4..6b0753070259 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java @@ -23,7 +23,7 @@ /*** * NOTE:
- * 1. Hivecost normalizes cpu and io in to time.
+ * 1. HiveCost normalizes cpu and io in to time.
* 2. CPU, IO cost is added together to find the query latency.
* 3. If query latency is equal then row count is compared. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java index 49303b29ae6b..880a052bea53 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java @@ -51,7 +51,7 @@ private HiveProjectMergeRule(boolean force, RelBuilderFactory relBuilderFactory) @Override public boolean matches(RelOptRuleCall call) { - // Currently we do not support merging windowing functions with other + // Currently, we do not support merging windowing functions with other // windowing functions i.e. embedding windowing functions within each // other final Project topProject = call.rel(0); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java index 8fced8fd708f..e7dd8bf2b1d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java @@ -386,7 +386,7 @@ private boolean isRexLiteral(final RexNode rexNode) { // Given a groupset this tries to find out if the cardinality of the grouping columns could have changed - // because if not and it consist of keys (unique + not null OR pk), we can safely remove rest of the columns + // because if not, and it consists of keys (unique + not null OR pk), we can safely remove rest of the columns // if those are columns are not being used further up private ImmutableBitSet generateGroupSetIfCardinalitySame(final Aggregate aggregate, final ImmutableBitSet originalGroupSet, final ImmutableBitSet fieldsUsed) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java index 22c0e0ff6a5a..079530fa442e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSemiJoinProjectTransposeRule.java @@ -42,7 +42,7 @@ * This rule is similar to {@link org.apache.calcite.rel.rules.SemiJoinProjectTransposeRule}. * However, it works on Hive nodes rather than logical nodes. * - *

The rule pushes a Semijoin down in a tree past a Project if the + *

The rule pushes a Semi-join down in a tree past a Project if the * Project is followed by a Join. The intention is to remove Projects * between Joins. * @@ -66,7 +66,7 @@ private HiveSemiJoinProjectTransposeRule(RelBuilderFactory relBuilderFactory) { } //~ Methods ---------------------------------------------------------------- - + @Override public void onMatch(RelOptRuleCall call) { Join semiJoin = call.rel(0); Project project = call.rel(1); @@ -75,7 +75,7 @@ public void onMatch(RelOptRuleCall call) { // expression; all projection expressions must be RexInputRefs, // otherwise, we wouldn't have created this semi-join. - // convert the semijoin condition to reflect the LHS with the project + // convert the semi-join condition to reflect the LHS with the project // pulled up RexNode newCondition = adjustCondition(project, semiJoin); @@ -85,7 +85,7 @@ public void onMatch(RelOptRuleCall call) { // Create the new projection. Note that the projection expressions // are the same as the original because they only reference the LHS - // of the semijoin and the semijoin only projects out the LHS + // of the semi-join and the semi-join only projects out the LHS final RelBuilder relBuilder = call.builder(); relBuilder.push(newSemiJoin); relBuilder.project(project.getProjects(), project.getRowType().getFieldNames()); @@ -105,15 +105,15 @@ public void onMatch(RelOptRuleCall call) { */ private RexNode adjustCondition(Project project, Join semiJoin) { // create two RexPrograms -- the bottom one representing a - // concatenation of the project and the RHS of the semijoin and the - // top one representing the semijoin condition + // concatenation of the project and the RHS of the semi-join and the + // top one representing the semi-join condition RexBuilder rexBuilder = project.getCluster().getRexBuilder(); RelDataTypeFactory typeFactory = rexBuilder.getTypeFactory(); RelNode rightChild = semiJoin.getRight(); // for the bottom RexProgram, the input is a concatenation of the - // child of the project and the RHS of the semijoin + // child of the project and the RHS of the semi-join RelDataType bottomInputRowType = SqlValidatorUtil.deriveJoinRowType( project.getInput().getRowType(), @@ -126,7 +126,7 @@ private RexNode adjustCondition(Project project, Join semiJoin) { new RexProgramBuilder(bottomInputRowType, rexBuilder); // add the project expressions, then add input references for the RHS - // of the semijoin + // of the semi-join for (Pair pair : project.getNamedProjects()) { bottomProgramBuilder.addProject(pair.left, pair.right); } @@ -143,8 +143,8 @@ private RexNode adjustCondition(Project project, Join semiJoin) { } RexProgram bottomProgram = bottomProgramBuilder.getProgram(); - // input rowtype into the top program is the concatenation of the - // project and the RHS of the semijoin + // input rowType into the top program is the concatenation of the + // project and the RHS of the semi-join RelDataType topInputRowType = SqlValidatorUtil.deriveJoinRowType( project.getRowType(), @@ -162,8 +162,8 @@ private RexNode adjustCondition(Project project, Join semiJoin) { RexProgram topProgram = topProgramBuilder.getProgram(); // merge the programs and expand out the local references to form - // the new semijoin condition; it now references a concatenation of - // the project's child and the RHS of the semijoin + // the new semi-join condition; it now references a concatenation of + // the project's child and the RHS of the semi-join RexProgram mergedProgram = RexProgramBuilder.mergePrograms( topProgram, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java index 70020cf5785e..9ce0c07a1fb7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java @@ -66,7 +66,7 @@ public boolean matches(RelOptRuleCall call) { // We only apply this rule if Union.all is true. // And Sort.fetch is not null and it is more than 0. return union.all && sort.fetch != null - // Calite bug CALCITE-987 + // Calcite bug CALCITE-987 && RexLiteral.intValue(sort.fetch) > 0; } @@ -74,7 +74,7 @@ public void onMatch(RelOptRuleCall call) { final HiveSortLimit sort = call.rel(0); final HiveUnion union = call.rel(1); List inputs = new ArrayList<>(); - // Thus we use 'finishPushSortPastUnion' as a flag to identify if we have finished pushing the + // Thus, we use 'finishPushSortPastUnion' as a flag to identify if we have finished pushing the // sort past a union. boolean finishPushSortPastUnion = true; final int offset = sort.offset == null ? 0 : RexLiteral.intValue(sort.offset); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java index adc090c19fec..5cefd025db28 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/RelFieldTrimmer.java @@ -1094,7 +1094,7 @@ public TrimResult trimFields( *

The mapping is a * {@link org.apache.calcite.util.mapping.Mappings.SourceMapping}, which means * that no column can be used more than once, and some columns are not used. - * {@code columnsUsed.getSource(i)} returns the source of the i'th output + * {@code columnsUsed.getSource(i)} returns the source of the i-th output * field. * *

For example, consider the mapping for a relational expression that diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java index 818fcf45a84b..0b778decf24b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java @@ -184,7 +184,7 @@ public RelOptPredicateList getPredicates(Join join, RelMetadataQuery mq) { return jI.inferPredicates(false); } - + /** * Utility to infer predicates from one side of the join that apply on the * other side. @@ -414,7 +414,7 @@ public RexNode right() { } private void infer(List predicates, Set allExprsDigests, - List inferedPredicates, List nonFieldsPredicates, + List inferredPredicates, List nonFieldsPredicates, boolean includeEqualityInference, ImmutableBitSet inferringFields) { for (RexNode r : predicates) { if (!includeEqualityInference @@ -430,7 +430,7 @@ private void infer(List predicates, Set allExprsDigests, if (inferringFields.contains(RelOptUtil.InputFinder.bits(tr)) && !allExprsDigests.contains(tr.toString()) && !isAlwaysTrue(tr)) { - inferedPredicates.add(tr); + inferredPredicates.add(tr); allExprsDigests.add(tr.toString()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java index 19bd13de9a1b..d30ba9cbdc46 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java @@ -215,7 +215,7 @@ protected double maxNdvForCorrelatedColumns(List peLst, } /* - * a) Order predciates based on ndv in reverse order. b) ndvCrossProduct = + * a) Order predicates based on ndv in reverse order. b) ndvCrossProduct = * ndv(pe0) * ndv(pe1) ^(1/2) * ndv(pe2) ^(1/4) * ndv(pe3) ^(1/8) ... */ protected double exponentialBackoff(List peLst, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java index 6702f8ae9d63..39ebf61d1a08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java @@ -173,7 +173,7 @@ public static ASTNode table(final RelNode scan) { // NOTE: Calcite considers tbls to be equal if their names are the same. Hence // we need to provide Calcite the fully qualified table name (dbname.tblname) // and not the user provided aliases. - // However in HIVE DB name can not appear in select list; in case of join + // However, in HIVE DB name can not appear in select list; in case of join // where table names differ only in DB name, Hive would require user // introducing explicit aliases for tbl. b.add(HiveParser.Identifier, hts.getTableAlias()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java index 34d4ee380c65..7a363aafd1fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java @@ -158,7 +158,7 @@ private static HIVEGBPHYSICALMODE getAggOPMode(HiveConf hc, GBInfo gbInfo) { return gbPhysicalPipelineMode; } - // For each of the GB op in the logical GB this should be called seperately; + // For each of the GB op in the logical GB this should be called separately; // otherwise GBevaluator and expr nodes may get shared among multiple GB ops private static GBInfo getGBInfo(HiveAggregate aggRel, OpAttr inputOpAf, HiveConf hc) throws SemanticException { GBInfo gbInfo = new GBInfo(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java index 14958aa674d7..1fb5368594db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java @@ -109,8 +109,8 @@ OpAttr visit(HiveTableScan scanRel) { TableScanOperator ts = (TableScanOperator) OperatorFactory.get( hiveOpConverter.getSemanticAnalyzer().getOpContext(), tsd, new RowSchema(colInfos)); - //now that we let Calcite process subqueries we might have more than one - // tablescan with same alias. + // now that we let Calcite process sub-queries we might have more than one + // tableScan with same alias. if (hiveOpConverter.getTopOps().get(tableAlias) != null) { tableAlias = tableAlias + hiveOpConverter.getUniqueCounter(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java index ba34470dd18b..5af598d363ca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java @@ -107,8 +107,8 @@ public void setSortedColsByDirectory(Map> sortedColsByDire * Classes that implement this interface provide a way to store information about equivalent * columns as their names and indexes in the schema change going into and out of operators. The * definition of equivalent columns is up to the class which uses these classes, e.g. - * BucketingSortingOpProcFactory. For example, two columns are equivalent if they - * contain exactly the same data. Though, it's possible that two columns contain exactly the + * BucketingSortingOpProcFactory. For example, two columns are equivalent if they + * contain exactly the same data. Though, it's possible that two columns contain exactly the * same data and are not known to be equivalent. * * E.g. SELECT key a, key b FROM (SELECT key, count(*) c FROM src GROUP BY key) s; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java index 27c38cea8131..b0cf872b3e5f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java @@ -53,7 +53,7 @@ * * For each map reduce task, attempts to infer bucketing and sorting metadata for the outputs. * - * Currently only map reduce tasks which produce final output have there output metadata inferred, + * Currently, only map reduce tasks which produce final output have there output metadata inferred, * but it can be extended to intermediate tasks as well. * * This should be run as the last physical optimizer, as other physical optimizers may invalidate diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 66f0891fecf8..a6e4ffaced00 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -1471,7 +1471,7 @@ private long evaluateChildExpr(Statistics stats, ExprNodeDesc child, /** * GROUPBY operator changes the number of rows. The number of rows emitted by GBY operator will be - * atleast 1 or utmost T(R) (number of rows in relation T) based on the aggregation. A better + * at least 1 or utmost T(R) (number of rows in relation T) based on the aggregation. A better * estimate can be found if we have column statistics on the columns that we are grouping on. *

* Suppose if we are grouping by attributes A,B,C and if statistics for columns A,B,C are diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index d35baa18683c..f5d9a8d3cdd9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -738,7 +738,7 @@ public static String unescapeSQLString(String b) { * Escapes the string for AST; doesn't enclose it in quotes, however. */ public static String escapeSQLString(String b) { - // There's usually nothing to escape so we will be optimistic. + // There's usually nothing to escape, so we will be optimistic. String result = b; for (int i = 0; i < result.length(); ++i) { char currentChar = result.charAt(i); @@ -1302,7 +1302,7 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit specType = SpecType.STATIC_PARTITION; } } else if(createDynPartSpec(ast) && allowDynamicPartitionsSpec) { - // if user hasn't specify partition spec generate it from table's partition spec + // if user hasn't specified partition spec generate it from table's partition spec // do this only if it is INSERT/INSERT INTO/INSERT OVERWRITE/ANALYZE List parts = tableHandle.getPartitionKeys(); partSpec = new LinkedHashMap(parts.size()); @@ -1714,7 +1714,7 @@ public static void validatePartColumnType(Table tbl, Map partSpe TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType); // Since partVal is a constant, it is safe to cast ExprNodeDesc to ExprNodeConstantDesc. // Its value should be in normalized format (e.g. no leading zero in integer, date is in - // format of YYYY-MM-DD etc) + // format of YYYY-MM-DD etc.) Object value = ((ExprNodeConstantDesc)astExprNodePair.getValue()).getValue(); Object convertedValue = value; if (!inputOI.getTypeName().equals(outputOI.getTypeName())) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index a500d2be9579..ac69a5e75957 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -1920,7 +1920,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr // 8. Rerun PPD through Project as column pruning would have introduced // DT above scans; By pushing filter just above TS, Hive can push it into - // storage (incase there are filters on non partition cols). This only + // storage (in case there are filters on non partition cols). This only // matches FIL-PROJ-TS // Also merge, remove and reduce Project if possible generatePartialProgram(program, true, HepMatchOrder.TOP_DOWN, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index 8b4fa0f46194..61b6f67c9687 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -378,7 +378,7 @@ private static void processSetColsNode(ASTNode setCols, ASTSearcher searcher, Co return; } - // Then, find the leftmost logical sibling select, because that's what Hive uses for aliases. + // Then, find the leftmost logical sibling select, because that's what Hive uses for aliases. while (true) { CommonTree queryOfSelect = select.parent; while (queryOfSelect != null && queryOfSelect.getType() != HiveParser.TOK_QUERY) { @@ -480,7 +480,7 @@ private static boolean createChildColumnRef(Tree child, String alias, return false; } if (!aliases.add(colAlias)) { - // TODO: if a side of the union has 2 columns with the same name, noone on the higher + // TODO: if a side of the union has 2 columns with the same name, none on the higher // level can refer to them. We could change the alias in the original node. LOG.debug("Replacing SETCOLREF with ALLCOLREF because of duplicate alias " + colAlias); return false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java index affd608c38ca..2f16abdc2b09 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java @@ -469,7 +469,7 @@ void setSQRR(RowResolver sqRR) { */ private int numOfCorrelationExprsAddedToSQSelect; - private boolean groupbyAddedToSQ; + private boolean groupByAddedToSQ; private int numOuterCorrExprsForHaving; @@ -497,7 +497,7 @@ public QBSubQuery(String outerQueryId, originalSQAST.getTokenStartIndex(), originalSQAST.getTokenStopIndex()); originalSQASTOrigin = new ASTNodeOrigin("SubQuery", alias, s, alias, originalSQAST); numOfCorrelationExprsAddedToSQSelect = 0; - groupbyAddedToSQ = false; + groupByAddedToSQ = false; if ( operator.getType() == SubQueryType.NOT_IN ) { notInCheck = new NotInCheck(); @@ -670,7 +670,7 @@ void validateAndRewriteAST(RowResolver outerQueryRR, */ if ( operator.getType() == SubQueryType.EXISTS && containsAggregationExprs && - groupbyAddedToSQ ) { + groupByAddedToSQ) { throw new SemanticException(ASTErrorUtils.getMsg( ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(), subQueryAST, @@ -679,7 +679,7 @@ void validateAndRewriteAST(RowResolver outerQueryRR, } if ( operator.getType() == SubQueryType.NOT_EXISTS && containsAggregationExprs && - groupbyAddedToSQ ) { + groupByAddedToSQ) { throw new SemanticException(ASTErrorUtils.getMsg( ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(), subQueryAST, @@ -761,7 +761,7 @@ void buildJoinCondition(RowResolver outerQueryRR, RowResolver sqRR, "Correlating expression contains ambiguous column references.")); } } - + parentQueryJoinCond = SubQueryUtils.buildOuterQryToSQJoinCond( parentExpr, alias, @@ -903,9 +903,9 @@ private void rewrite(RowResolver parentQueryRR, rewriteCorrConjunctForHaving(conjunctAST, false, outerQueryAlias, parentQueryRR, conjunct.getRightOuterColInfo()); } - ASTNode joinPredciate = SubQueryUtils.alterCorrelatedPredicate( + ASTNode joinPredicate = SubQueryUtils.alterCorrelatedPredicate( conjunctAST, sqExprForCorr, true); - joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredciate); + joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredicate); subQueryJoinAliasExprs.add(sqExprForCorr); ASTNode selExpr = SubQueryUtils.createSelectItem(conjunct.getLeftExpr(), sqExprAlias); selectClause.addChild(selExpr); @@ -927,9 +927,9 @@ private void rewrite(RowResolver parentQueryRR, rewriteCorrConjunctForHaving(conjunctAST, true, outerQueryAlias, parentQueryRR, conjunct.getLeftOuterColInfo()); } - ASTNode joinPredciate = SubQueryUtils.alterCorrelatedPredicate( + ASTNode joinPredicate = SubQueryUtils.alterCorrelatedPredicate( conjunctAST, sqExprForCorr, false); - joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredciate); + joinConditionAST = SubQueryUtils.andAST(joinConditionAST, joinPredicate); subQueryJoinAliasExprs.add(sqExprForCorr); ASTNode selExpr = SubQueryUtils.createSelectItem(conjunct.getRightExpr(), sqExprAlias); selectClause.addChild(selExpr); @@ -982,7 +982,7 @@ private ASTNode getSubQueryGroupByAST() { } groupBy = SubQueryUtils.buildGroupBy(); - groupbyAddedToSQ = true; + groupByAddedToSQ = true; List newChildren = new ArrayList(); newChildren.add(groupBy); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index 0e810f13d6b9..1a5866461092 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -88,7 +88,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer { @Override public void analyzeInternal(ASTNode ast) throws SemanticException { - LOG.debug("ReplicationSemanticAanalyzer: analyzeInternal"); + LOG.debug("ReplicationSemanticAnalyzer: analyzeInternal"); LOG.debug(ast.getName() + ":" + ast.getToken().getText() + "=" + ast.getText()); // Some of the txn related configs were not set when ReplicationSemanticAnalyzer.conf was initialized. // It should be set first. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index ad30ef5e1a83..067c35dac404 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -13508,7 +13508,7 @@ private Map validateAndAddDefaultProperties( /** * This api is used to determine where to create acid tables are not. - * if the default table type is set to external, then create transcational table should result in acid tables, + * if the default table type is set to external, then create transactional table should result in acid tables, * else create table should result in external table. * */ private boolean isExternalTableChanged (Map tblProp, boolean isTransactional, boolean isExt, boolean isTableTypeChanged) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java index 5ddcd31d5664..3848cd9a5502 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java @@ -84,7 +84,7 @@ public TableAccessInfo analyzeTableAccess() throws SemanticException { SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, tableAccessCtx); SemanticGraphWalker ogw = new DefaultGraphWalker(disp); - // Create a list of topop nodes and walk! + // Create a list of topOp nodes and walk! List topNodes = new ArrayList(); topNodes.addAll(pGraphContext.getTopOps().values()); ogw.startWalking(topNodes, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java index d2b73d924e50..f514f536f32f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java @@ -21,13 +21,13 @@ import java.util.List; /** - * + * * This class stores all the information specified in the TABLESAMPLE clause. * e.g. for the clause "FROM t TABLESAMPLE(1 OUT OF 2 ON c1) it will store the * numerator 1, the denominator 2 and the list of expressions(in this case c1) - * in the appropriate fields. The afore-mentioned sampling clause causes the 1st + * in the appropriate fields. The aforementioned sampling clause causes the 1st * bucket to be picked out of the 2 buckets created by hashing on c1. - * + * */ public class TableSample { @@ -59,7 +59,7 @@ public class TableSample { /** * Constructs the TableSample given the numerator, denominator and the list of * ON clause expressions. - * + * * @param num * The numerator * @param den @@ -81,7 +81,7 @@ public TableSample(int num, int den) { /** * Gets the numerator. - * + * * @return int */ public int getNumerator() { @@ -90,7 +90,7 @@ public int getNumerator() { /** * Sets the numerator. - * + * * @param num * The numerator */ @@ -100,7 +100,7 @@ public void setNumerator(int num) { /** * Gets the denominator. - * + * * @return int */ public int getDenominator() { @@ -109,7 +109,7 @@ public int getDenominator() { /** * Sets the denominator. - * + * * @param den * The denominator */ @@ -119,7 +119,7 @@ public void setDenominator(int den) { /** * Gets the ON part's expression list. - * + * * @return ArrayList<ASTNode> */ public List getExprs() { @@ -128,7 +128,7 @@ public List getExprs() { /** * Sets the expression list. - * + * * @param exprs * The expression list */ @@ -138,7 +138,7 @@ public void setExprs(List exprs) { /** * Gets the flag that indicates whether input pruning is possible. - * + * * @return boolean */ public boolean getInputPruning() { @@ -147,7 +147,7 @@ public boolean getInputPruning() { /** * Sets the flag that indicates whether input pruning is possible or not. - * + * * @param inputPruning * true if input pruning is possible */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 3c37e9958fdf..3b489535c336 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -101,7 +101,7 @@ import java.util.Set; /** - * TaskCompiler is a the base class for classes that compile + * TaskCompiler is the base class for classes that compile * operator pipelines into tasks. */ public abstract class TaskCompiler { @@ -270,7 +270,7 @@ public void compile(final ParseContext pCtx, } if (outerQueryLimit == 0) { // Believe it or not, some tools do generate queries with limit 0 and than expect - // query to run quickly. Lets meet their requirement. + // query to run quickly. Let's meet their requirement. LOG.info("Limit 0. No query execution needed."); return; } @@ -751,7 +751,7 @@ protected abstract void optimizeTaskPlan(List> rootTasks, protected abstract void setInputFormat(Task rootTask); /* - * Called to generate the taks tree from the parse context/operator tree + * Called to generate the tasks tree from the parse context/operator tree */ protected abstract void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java index 48e8a960c91d..4f074879b8bf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java @@ -110,7 +110,7 @@ public OrderSpec getQueryOrderSpec() { * Apply the rules in the Spec. to fill in any missing pieces of every Window Specification, * also validate that the effective Specification is valid. The rules applied are: * - For Wdw Specs that refer to Window Defns, inherit missing components. - * - A Window Spec with no Parition Spec, is Partitioned on a Constant(number 0) + * - A Window Spec with no Partition Spec, is Partitioned on a Constant(number 0) * - For missing Wdw Frames or for Frames with only a Start Boundary, completely specify them * by the rules in {@link effectiveWindowFrame} * - Validate the effective Window Frames with the rules in {@link validateWindowFrame} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java index 08721346d599..7ba2c2794608 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/BootstrapDumpLogger.java @@ -28,7 +28,7 @@ /** * BootstrapDumpLogger. * - * Repllogger for bootstrap dump. + * ReplLogger for bootstrap dump. **/ public class BootstrapDumpLogger extends ReplLogger { private String dbName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java index 4f24c0c3d882..4e979e7c455c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/log/IncrementalDumpLogger.java @@ -26,7 +26,7 @@ /** * IncrementalDumpLogger. * - * Repllogger for incremental dump. + * ReplLogger for incremental dump. **/ public class IncrementalDumpLogger extends ReplLogger { private String dbName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java index 7c8ba62e7246..e29569b11065 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/log/IncrementalLoadLogger.java @@ -27,7 +27,7 @@ /** * IncrementalLoadLogger. * - * Repllogger for Incremental Load. + * ReplLogger for Incremental Load. **/ public class IncrementalLoadLogger extends ReplLogger { private final ReplStatsTracker replStatsTracker; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java index 35e02e54b7fc..87e78444f965 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java @@ -56,7 +56,7 @@ public abstract class BaseWork extends AbstractOperatorDesc { protected static final Logger LOG = LoggerFactory.getLogger(BaseWork.class); // dummyOps is a reference to all the HashTableDummy operators in the - // plan. These have to be separately initialized when we setup a task. + // plan. These have to be separately initialized when we set up a task. // Their function is mainly as root ops to give the mapjoin the correct // schema info. List dummyOps; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java index 20f7d2e0e40f..56399e434ed0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java @@ -41,11 +41,11 @@ public class BasicStatsWork implements Serializable { private boolean statsReliable; // are stats completely reliable // If stats aggregator is not present, clear the current aggregator stats. - // For eg. if a merge is being performed, stats already collected by aggregator (numrows etc.) + // For example, if a merge is being performed, stats already collected by aggregator (numrows etc.) // are still valid. However, if a load file is being performed, the old stats collected by // aggregator are not valid. It might be a good idea to clear them instead of leaving wrong // and old stats. - // Since HIVE-12661, we maintain the old stats (although may be wrong) for CBO + // Since HIVE-12661, we maintain the old stats (although it may be wrong) for CBO // purpose. We use a flag COLUMN_STATS_ACCURATE to // show the accuracy of the stats. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java index 75cdba5abd8e..717d1f8b6a7c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java @@ -22,7 +22,7 @@ public class ColStatistics { private String colName; private String colType; - private long countDistint; + private long countDistinct; private long numNulls; private double avgColLen; private long numTrues; @@ -62,11 +62,11 @@ public void setColumnType(String colType) { } public long getCountDistint() { - return countDistint; + return countDistinct; } - public void setCountDistint(long countDistint) { - this.countDistint = countDistint; + public void setCountDistint(long countDistinct) { + this.countDistinct = countDistinct; } public long getNumNulls() { @@ -137,7 +137,7 @@ public String toString() { sb.append(" colType: "); sb.append(colType); sb.append(" countDistincts: "); - sb.append(countDistint); + sb.append(countDistinct); sb.append(" numNulls: "); sb.append(numNulls); sb.append(" avgColLen: "); @@ -162,7 +162,7 @@ public String toString() { public ColStatistics clone() { ColStatistics clone = new ColStatistics(colName, colType); clone.setAvgColLen(avgColLen); - clone.setCountDistint(countDistint); + clone.setCountDistint(countDistinct); clone.setNumNulls(numNulls); clone.setNumTrues(numTrues); clone.setNumFalses(numFalses); @@ -225,11 +225,11 @@ public void setFilterColumn() { private void setIsFilteredColumn(boolean isFilteredColumn2) { isFilteredColumn=isFilteredColumn2; - + } - + public boolean isFilteredColumn() { return isFilteredColumn; } - + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 4e7c86807e07..66e386c4e5f9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -513,7 +513,7 @@ public boolean isGatherStats() { */ @Override @Explain(displayName = "Stats Publishing Key Prefix", explainLevels = { Level.EXTENDED }) - // FIXME: including this in the signature will almost certenly differ even if the operator is doing the same + // FIXME: including this in the signature will almost certainly differ even if the operator is doing the same // there might be conflicting usages of logicalCompare? @Signature public String getStatsAggPrefix() { @@ -685,7 +685,7 @@ public int getBucketingVersionForExplain() { return getBucketingVersion(); } /** - * Whether this is CREATE TABLE SELECT or CREATE MATERIALIZED VIEW statemet + * Whether this is CREATE TABLE SELECT or CREATE MATERIALIZED VIEW statement * Set by semantic analyzer this is required because CTAS/CM requires some special logic * in mvFileToFinalPath */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java index 150362417cbe..33d0148bca4d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java @@ -59,7 +59,7 @@ CommandProcessorResponse run(SessionState ss, String command) throws CommandProc } String[] parts = command.split("\\s+"); boolean isDefault = false; - List varnames = new ArrayList<>(parts.length); + List varNames = new ArrayList<>(parts.length); for (String part : parts) { if (part.isEmpty()) { continue; @@ -67,22 +67,22 @@ CommandProcessorResponse run(SessionState ss, String command) throws CommandProc if (DEFAULT_ARG.equals(part)) { isDefault = true; } else { - varnames.add(part); + varNames.add(part); } } - if (varnames.isEmpty()) { + if (varNames.isEmpty()) { throw new CommandProcessorException(1, -1, "No variable names specified", "42000", null); } String variableNames = ""; - for (String varname : varnames) { + for (String varName : varNames) { if (isDefault) { if (!variableNames.isEmpty()) { variableNames += ", "; } - variableNames += varname; - resetToDefault(ss, varname); + variableNames += varName; + resetToDefault(ss, varName); } else { - resetOverrideOnly(ss, varname); + resetOverrideOnly(ss, varName); } } String message = isDefault ? "Resetting " + variableNames + " to default values" : null; @@ -100,32 +100,32 @@ private static void resetOverridesOnly(SessionState ss) { ss.getOverriddenConfigurations().clear(); } - private static void resetOverrideOnly(SessionState ss, String varname) { - if (!ss.getOverriddenConfigurations().containsKey(varname)) { + private static void resetOverrideOnly(SessionState ss, String varName) { + if (!ss.getOverriddenConfigurations().containsKey(varName)) { return; } - setSessionVariableFromConf(ss, varname, new HiveConf()); - ss.getOverriddenConfigurations().remove(varname); + setSessionVariableFromConf(ss, varName, new HiveConf()); + ss.getOverriddenConfigurations().remove(varName); } - private static void setSessionVariableFromConf(SessionState ss, String varname, HiveConf conf) { - String value = conf.get(varname); + private static void setSessionVariableFromConf(SessionState ss, String varName, HiveConf conf) { + String value = conf.get(varName); if (value != null) { - SetProcessor.setConf(ss, varname, varname, value, false); + SetProcessor.setConf(ss, varName, varName, value, false); } } - private static CommandProcessorResponse resetToDefault(SessionState ss, String varname) + private static CommandProcessorResponse resetToDefault(SessionState ss, String varName) throws CommandProcessorException { - varname = varname.trim(); + varName = varName.trim(); try { String nonErrorMessage = null; - if (varname.startsWith(SystemVariables.HIVECONF_PREFIX)){ - String propName = varname.substring(SystemVariables.HIVECONF_PREFIX.length()); + if (varName.startsWith(SystemVariables.HIVECONF_PREFIX)){ + String propName = varName.substring(SystemVariables.HIVECONF_PREFIX.length()); nonErrorMessage = SetProcessor.setConf( - varname, propName, getConfVar(propName).getDefaultValue(), false); - } else if (varname.startsWith(SystemVariables.METACONF_PREFIX)) { - String propName = varname.substring(SystemVariables.METACONF_PREFIX.length()); + varName, propName, getConfVar(propName).getDefaultValue(), false); + } else if (varName.startsWith(SystemVariables.METACONF_PREFIX)) { + String propName = varName.substring(SystemVariables.METACONF_PREFIX.length()); HiveConf.ConfVars confVars = getConfVar(propName); Hive.get(ss.getConf()).setMetaConf(propName, new VariableSubstitution(new HiveVariableSource() { @Override @@ -134,9 +134,9 @@ public Map getHiveVariable() { } }).substitute(ss.getConf(), confVars.getDefaultValue())); } else { - String defaultVal = getConfVar(varname).getDefaultValue(); - nonErrorMessage = SetProcessor.setConf(varname, varname, defaultVal, true); - if (varname.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) { + String defaultVal = getConfVar(varName).getDefaultValue(); + nonErrorMessage = SetProcessor.setConf(varName, varName, defaultVal, true); + if (varName.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) { SessionState.get().updateHistory(Boolean.parseBoolean(defaultVal), ss); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java index f102d61fd09f..9c89c3bd3ae2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java @@ -158,72 +158,72 @@ private void dumpOption(String s) { } } - public CommandProcessorResponse executeSetVariable(String varname, String varvalue) throws CommandProcessorException { + public CommandProcessorResponse executeSetVariable(String varName, String varValue) throws CommandProcessorException { try { - return setVariable(varname, varvalue); + return setVariable(varName, varValue); } catch (Exception e) { Throwable exception = e instanceof IllegalArgumentException ? null : e; throw new CommandProcessorException(1, -1, e.getMessage(), "42000", exception); } } - public static CommandProcessorResponse setVariable(String varname, String varvalue) throws Exception { + public static CommandProcessorResponse setVariable(String varName, String varValue) throws Exception { SessionState ss = SessionState.get(); - if (varvalue.contains("\n")){ + if (varValue.contains("\n")){ ss.err.println("Warning: Value had a \\n character in it."); } - varname = varname.trim(); + varName = varName.trim(); String nonErrorMessage = null; - if (varname.startsWith(ENV_PREFIX)){ + if (varName.startsWith(ENV_PREFIX)){ ss.err.println("env:* variables can not be set."); throw new CommandProcessorException(1); // Should we propagate the error message properly? - } else if (varname.startsWith(SYSTEM_PREFIX)){ - String propName = varname.substring(SYSTEM_PREFIX.length()); + } else if (varName.startsWith(SYSTEM_PREFIX)){ + String propName = varName.substring(SYSTEM_PREFIX.length()); System.getProperties() .setProperty(propName, new VariableSubstitution(new HiveVariableSource() { @Override public Map getHiveVariable() { return SessionState.get().getHiveVariables(); } - }).substitute(ss.getConf(), varvalue)); - } else if (varname.startsWith(HIVECONF_PREFIX)){ - String propName = varname.substring(HIVECONF_PREFIX.length()); - nonErrorMessage = setConf(varname, propName, varvalue, false); - } else if (varname.startsWith(HIVEVAR_PREFIX)) { - String propName = varname.substring(HIVEVAR_PREFIX.length()); + }).substitute(ss.getConf(), varValue)); + } else if (varName.startsWith(HIVECONF_PREFIX)){ + String propName = varName.substring(HIVECONF_PREFIX.length()); + nonErrorMessage = setConf(varName, propName, varValue, false); + } else if (varName.startsWith(HIVEVAR_PREFIX)) { + String propName = varName.substring(HIVEVAR_PREFIX.length()); ss.getHiveVariables().put(propName, new VariableSubstitution(new HiveVariableSource() { @Override public Map getHiveVariable() { return SessionState.get().getHiveVariables(); } - }).substitute(ss.getConf(), varvalue)); - } else if (varname.startsWith(METACONF_PREFIX)) { - String propName = varname.substring(METACONF_PREFIX.length()); + }).substitute(ss.getConf(), varValue)); + } else if (varName.startsWith(METACONF_PREFIX)) { + String propName = varName.substring(METACONF_PREFIX.length()); Hive hive = Hive.get(ss.getConf()); hive.setMetaConf(propName, new VariableSubstitution(new HiveVariableSource() { @Override public Map getHiveVariable() { return SessionState.get().getHiveVariables(); } - }).substitute(ss.getConf(), varvalue)); + }).substitute(ss.getConf(), varValue)); } else { - nonErrorMessage = setConf(varname, varname, varvalue, true); - if (varname.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) { - SessionState.get().updateHistory(Boolean.parseBoolean(varvalue), ss); + nonErrorMessage = setConf(varName, varName, varValue, true); + if (varName.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) { + SessionState.get().updateHistory(Boolean.parseBoolean(varValue), ss); } } return new CommandProcessorResponse(null, nonErrorMessage); } - static String setConf(String varname, String key, String varvalue, boolean register) + static String setConf(String varName, String key, String varValue, boolean register) throws IllegalArgumentException { - return setConf(SessionState.get(), varname, key, varvalue, register); + return setConf(SessionState.get(), varName, key, varValue, register); } /** * @return A console message that is not strong enough to fail the command (e.g. deprecation). */ - static String setConf(SessionState ss, String varname, String key, String varvalue, boolean register) + static String setConf(SessionState ss, String varName, String key, String varValue, boolean register) throws IllegalArgumentException { String result = null; HiveConf conf = ss.getConf(); @@ -232,13 +232,13 @@ static String setConf(SessionState ss, String varname, String key, String varval public Map getHiveVariable() { return ss.getHiveVariables(); } - }).substitute(conf, varvalue); + }).substitute(conf, varValue); if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { HiveConf.ConfVars confVars = HiveConf.getConfVars(key); if (confVars != null) { if (!confVars.isType(value)) { StringBuilder message = new StringBuilder(); - message.append("'SET ").append(varname).append('=').append(varvalue); + message.append("'SET ").append(varName).append('=').append(varValue); message.append("' FAILED because ").append(key).append(" expects "); message.append(confVars.typeString()).append(" type value."); throw new IllegalArgumentException(message.toString()); @@ -246,7 +246,7 @@ public Map getHiveVariable() { String fail = confVars.validate(value); if (fail != null) { StringBuilder message = new StringBuilder(); - message.append("'SET ").append(varname).append('=').append(varvalue); + message.append("'SET ").append(varName).append('=').append(varValue); message.append("' FAILED in validation : ").append(fail).append('.'); throw new IllegalArgumentException(message.toString()); } @@ -285,14 +285,14 @@ private SortedMap mapToSortedMap(Map data){ return sortedEnvMap; } - private CommandProcessorResponse getVariable(String varname) throws Exception { + private CommandProcessorResponse getVariable(String varName) throws Exception { SessionState ss = SessionState.get(); - if (varname.equals("silent")){ + if (varName.equals("silent")){ ss.out.println("silent" + "=" + ss.getIsSilent()); return new CommandProcessorResponse(getSchema(), null); } - if (varname.startsWith(SYSTEM_PREFIX)) { - String propName = varname.substring(SYSTEM_PREFIX.length()); + if (varName.startsWith(SYSTEM_PREFIX)) { + String propName = varName.substring(SYSTEM_PREFIX.length()); String result = System.getProperty(propName); if (result != null) { if(isHidden(propName)) { @@ -305,8 +305,8 @@ private CommandProcessorResponse getVariable(String varname) throws Exception { ss.out.println(propName + " is undefined as a system property"); throw new CommandProcessorException(1); } - } else if (varname.indexOf(ENV_PREFIX) == 0) { - String var = varname.substring(ENV_PREFIX.length()); + } else if (varName.indexOf(ENV_PREFIX) == 0) { + String var = varName.substring(ENV_PREFIX.length()); if (System.getenv(var) != null) { if(isHidden(var)) { ss.out.println(ENV_PREFIX + var + " is a hidden config"); @@ -315,11 +315,11 @@ private CommandProcessorResponse getVariable(String varname) throws Exception { } return new CommandProcessorResponse(getSchema(), null); } else { - ss.out.println(varname + " is undefined as an environmental variable"); + ss.out.println(varName + " is undefined as an environmental variable"); throw new CommandProcessorException(1); } - } else if (varname.indexOf(HIVECONF_PREFIX) == 0) { - String var = varname.substring(HIVECONF_PREFIX.length()); + } else if (varName.indexOf(HIVECONF_PREFIX) == 0) { + String var = varName.substring(HIVECONF_PREFIX.length()); if (ss.getConf().isHiddenConfig(var)) { ss.out.println(HIVECONF_PREFIX + var + " is a hidden config"); return new CommandProcessorResponse(getSchema(), null); @@ -327,31 +327,31 @@ private CommandProcessorResponse getVariable(String varname) throws Exception { ss.out.println(HIVECONF_PREFIX + var + "=" + ss.getConf().get(var)); return new CommandProcessorResponse(getSchema(), null); } else { - ss.out.println(varname + " is undefined as a hive configuration variable"); + ss.out.println(varName + " is undefined as a hive configuration variable"); throw new CommandProcessorException(1); } - } else if (varname.indexOf(HIVEVAR_PREFIX) == 0) { - String var = varname.substring(HIVEVAR_PREFIX.length()); + } else if (varName.indexOf(HIVEVAR_PREFIX) == 0) { + String var = varName.substring(HIVEVAR_PREFIX.length()); if (ss.getHiveVariables().get(var) != null) { ss.out.println(HIVEVAR_PREFIX + var + "=" + ss.getHiveVariables().get(var)); return new CommandProcessorResponse(getSchema(), null); } else { - ss.out.println(varname + " is undefined as a hive variable"); + ss.out.println(varName + " is undefined as a hive variable"); throw new CommandProcessorException(1); } - } else if (varname.indexOf(METACONF_PREFIX) == 0) { - String var = varname.substring(METACONF_PREFIX.length()); + } else if (varName.indexOf(METACONF_PREFIX) == 0) { + String var = varName.substring(METACONF_PREFIX.length()); Hive hive = Hive.get(ss.getConf()); String value = hive.getMetaConf(var); if (value != null) { ss.out.println(METACONF_PREFIX + var + "=" + value); return new CommandProcessorResponse(getSchema(), null); } else { - ss.out.println(varname + " is undefined as a hive meta variable"); + ss.out.println(varName + " is undefined as a hive meta variable"); throw new CommandProcessorException(1); } } else { - dumpOption(varname); + dumpOption(varName); return new CommandProcessorResponse(getSchema(), null); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java index 3cbaa60bdf1e..d3e6a4722c27 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java @@ -80,7 +80,7 @@ public static ScheduledQueryExecutionService startScheduledQueryExecutorService( synchronized (ScheduledQueryExecutionService.class) { if (INSTANCE != null) { throw new IllegalStateException( - "There is already a ScheduledQueryExecutionService in service; check it and close it explicitly if neccessary"); + "There is already a ScheduledQueryExecutionService in service; check it and close it explicitly if necessary"); } INSTANCE = new ScheduledQueryExecutionService(ctx); return INSTANCE; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java b/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java index ed2d887f1e9f..357e9ddce800 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/secrets/SecretSource.java @@ -24,7 +24,7 @@ /** * Interface representing source of a secret using an uri. * The URI scheme is used to match an URI to an implementation scheme. The implementations are discovered and loaded - * using java service loader. Currenty there isn't a way to initialize or reset a SecretSource after construction. + * using java service loader. Currently, there isn't a way to initialize or reset a SecretSource after construction. * * The secret source is expected to be thread-safe. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 32c0891d3f94..620022eda124 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -1092,7 +1092,7 @@ public static Registry getRegistry() { public static Registry getRegistryForWrite() { Registry registry = getRegistry(); if (registry == null) { - throw new RuntimeException("Function registery for session is not initialized"); + throw new RuntimeException("Function registry for session is not initialized"); } return registry; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java index 145bdd613d1a..723f64f594c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java @@ -76,11 +76,11 @@ * StatsNoJobTask is used in cases where stats collection is the only task for the given query (no * parent MR or Tez job). It is used in the following cases 1) ANALYZE with noscan for * file formats that implement StatsProvidingRecordReader interface: ORC format (implements - * StatsProvidingRecordReader) stores column statistics for all columns in the file footer. Its much + * StatsProvidingRecordReader) stores column statistics for all columns in the file footer. It's much * faster to compute the table/partition statistics by reading the footer than scanning all the * rows. This task can be used for computing basic stats like numFiles, numRows, fileSize, * rawDataSize from ORC footer. - * However, this cannot be used for full ACID tables, since some of the files may contain updates + * However, this cannot be used for full ACID tables, since some files may contain updates * and deletes to existing rows, so summing up the per-file row counts is invalid. **/ public class BasicStatsNoJobTask implements IStatsProcessor { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index c04d5c80322c..965d107fdde1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -167,7 +167,7 @@ public Object process(StatsAggregator statsAggregator) throws HiveException, Met } // The collectable stats for the aggregator needs to be cleared. - // For eg. if a file is being loaded, the old number of rows are not valid + // For example, if a file is being loaded, the old number of rows are not valid // XXX: makes no sense for me... possibly not needed anymore if (work.isClearAggregatorStats()) { // we choose to keep the invalid stats and only change the setting. @@ -492,7 +492,7 @@ private List getPartitionsList(Hive db) throws HiveException { if (!table.isPartitioned()) { return null; } - // get all partitions that matches with the partition spec + // get all partitions that match with the partition spec return tblSpec.partitions != null ? unmodifiableList(tblSpec.partitions) : emptyList(); } else if (work.getLoadTableDesc() != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java index 54916bd3192d..152a2f2437e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java @@ -141,9 +141,9 @@ static class PPart extends Partish { private Partition partition; // FIXME: possibly the distinction between table/partition is not need; however it was like this before....will change it later - public PPart(Table table, Partition partiton) { + public PPart(Table table, Partition partition) { this.table = table; - partition = partiton; + this.partition = partition; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java index d1fc3f271058..94aaa32ecfcb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/estimator/StatEstimator.java @@ -31,7 +31,7 @@ public interface StatEstimator { /** * Computes the output statistics of the actual UDF. * - * The estimator should return with a prefereably overestimated {@link ColStatistics} object if possible. + * The estimator should return with a preferably overestimated {@link ColStatistics} object if possible. * The actual estimation logic may decide to not give an estimation; it should return with {@link Optional#empty()}. * * Note: at the time of the call there will be {@link ColStatistics} for all the arguments; if that is not available - the estimation is skipped. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index b1e690f16d8d..247164cc8a17 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -294,7 +294,7 @@ protected Boolean findNextCompactionAndExecute(boolean collectGenericStats, bool LOG.warn("A timed out copmaction pool entry ({}) is picked up by one of the default compaction pool workers.", ci); } if (StringUtils.isNotBlank(getPoolName()) && StringUtils.isNotBlank(ci.poolName) && !getPoolName().equals(ci.poolName)) { - LOG.warn("The returned compaction request ({}) belong to a different pool. Altough the worker is assigned to the {} pool," + + LOG.warn("The returned compaction request ({}) belong to a different pool. Although the worker is assigned to the {} pool," + " it will process the request.", ci, getPoolName()); } checkInterrupt(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java index 856556b8a291..df8275debe5b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java @@ -88,7 +88,7 @@ private void decode(long val, int radix) { * @param radix * must be between MIN_RADIX and MAX_RADIX * @param fromPos - * is the first element that should be conisdered + * is the first element that should be considered * @return the result should be treated as an unsigned 64-bit integer. */ private long encode(int radix, int fromPos) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java index bf012ddd0379..ecd466f646d9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java @@ -61,7 +61,7 @@ public abstract class TableFunctionResolver { /* * - called during translation. * - invokes createEvaluator which must be implemented by a subclass - * - sets up the evaluator with references to the TableDef, PartitionClass, PartitionMemsize and + * - sets up the evaluator with references to the TableDef, PartitionClass, PartitionMemSize and * the transformsRawInput boolean. */ public void initialize(HiveConf cfg, PTFDesc ptfDesc, PartitionedTableFunctionDef tDef) @@ -198,7 +198,7 @@ public boolean carryForwardNames() { /** * Provide referenced columns names to be used in partition function * - * @return null for unknown (will get all columns from table including virtual columns) + * @return null for unknown (will get all columns from table including virtual columns) * @throws SemanticException */ public List getReferencedColumns() throws SemanticException {