From 00e010a9dd2622c97dae2e540bc73411895fb924 Mon Sep 17 00:00:00 2001 From: Mike Walch Date: Mon, 4 May 2015 15:31:36 -0400 Subject: [PATCH 1/3] ACCUMULO-3771 Configure automatic Java code style formatting * Added maven-java-formatter-plugin to POM files --- examples/simple/pom.xml | 9 +++++++ maven-plugin/src/it/plugin-test/pom.xml | 7 +++++ pom.xml | 35 ++++++++++++++++++++++++- server/base/pom.xml | 9 +++++++ server/gc/pom.xml | 11 ++++++++ server/master/pom.xml | 11 ++++++++ server/monitor/pom.xml | 9 +++++++ server/native/pom.xml | 7 +++++ server/tracer/pom.xml | 11 ++++++++ server/tserver/pom.xml | 9 +++++++ 10 files changed, 117 insertions(+), 1 deletion(-) diff --git a/examples/simple/pom.xml b/examples/simple/pom.xml index a7580163a09..ebe663f75fe 100644 --- a/examples/simple/pom.xml +++ b/examples/simple/pom.xml @@ -113,5 +113,14 @@ + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + + diff --git a/maven-plugin/src/it/plugin-test/pom.xml b/maven-plugin/src/it/plugin-test/pom.xml index 7654c96e4ca..8a848e4229e 100644 --- a/maven-plugin/src/it/plugin-test/pom.xml +++ b/maven-plugin/src/it/plugin-test/pom.xml @@ -113,6 +113,13 @@ + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../../../contrib/Eclipse-Accumulo-Codestyle.xml + + diff --git a/pom.xml b/pom.xml index 000c8309a0f..41bd12b561c 100644 --- a/pom.xml +++ b/pom.xml @@ -905,6 +905,32 @@ apilyzer-maven-plugin 1.0.1 + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + 0.4 + + + **/thrift/*.java + **/proto/*.java + + + + + org.eclipse.tycho + org.eclipse.jdt.core + 3.10.0.v20140604-1726 + compile + + + + + + format + + + + @@ -950,7 +976,7 @@ - + @@ -1112,6 +1138,13 @@ true true + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../contrib/Eclipse-Accumulo-Codestyle.xml + + diff --git a/server/base/pom.xml b/server/base/pom.xml index c391dff5130..0a22bc075d8 100644 --- a/server/base/pom.xml +++ b/server/base/pom.xml @@ -127,6 +127,15 @@ src/test/resources + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + + diff --git a/server/gc/pom.xml b/server/gc/pom.xml index 9b1fa59375b..0f7dd111935 100644 --- a/server/gc/pom.xml +++ b/server/gc/pom.xml @@ -87,4 +87,15 @@ test + + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + + + diff --git a/server/master/pom.xml b/server/master/pom.xml index 9172fd22c90..fcfe91965d2 100644 --- a/server/master/pom.xml +++ b/server/master/pom.xml @@ -95,4 +95,15 @@ test + + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + + + diff --git a/server/monitor/pom.xml b/server/monitor/pom.xml index 2d033a56641..b0563a8eef8 100644 --- a/server/monitor/pom.xml +++ b/server/monitor/pom.xml @@ -143,5 +143,14 @@ + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + + diff --git a/server/native/pom.xml b/server/native/pom.xml index b6202a6a970..616b4fa29ca 100644 --- a/server/native/pom.xml +++ b/server/native/pom.xml @@ -109,6 +109,13 @@ + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + diff --git a/server/tracer/pom.xml b/server/tracer/pom.xml index 1e0774c3c9e..02b399d7986 100644 --- a/server/tracer/pom.xml +++ b/server/tracer/pom.xml @@ -88,6 +88,17 @@ test + + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + + + thrift diff --git a/server/tserver/pom.xml b/server/tserver/pom.xml index 2f9daf82d5b..6c34d77d5f8 100644 --- a/server/tserver/pom.xml +++ b/server/tserver/pom.xml @@ -141,5 +141,14 @@ + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + + ${project.basedir}/../../contrib/Eclipse-Accumulo-Codestyle.xml + + + From 437b1e0002db70a98ba88a99189d8bd4ac9a9cbe Mon Sep 17 00:00:00 2001 From: Mike Walch Date: Wed, 6 May 2015 11:40:17 -0400 Subject: [PATCH 2/3] ACCUMULO-3771 Configure automatic Java code style formatting * Built Accumulo which ran maven-java-formatter-plugin for first time --- .../core/client/ClientConfiguration.java | 4 +- .../client/ClientSideIteratorScanner.java | 4 +- .../client/impl/ActiveCompactionImpl.java | 1 - .../client/impl/ConditionalWriterImpl.java | 2 +- .../core/client/impl/OfflineIterator.java | 2 +- .../core/client/impl/ThriftScanner.java | 2 +- .../core/client/impl/ThriftTransportPool.java | 5 ++- .../core/client/lexicoder/PairLexicoder.java | 3 +- .../lexicoder/impl/AbstractLexicoder.java | 3 +- .../client/mapred/AbstractInputFormat.java | 11 ++--- .../client/mapred/AccumuloOutputFormat.java | 2 +- .../core/client/mapred/InputFormatBase.java | 20 ++++----- .../client/mapred/impl/BatchInputSplit.java | 3 +- .../client/mapreduce/AbstractInputFormat.java | 25 +++++------ .../client/mapreduce/AccumuloInputFormat.java | 2 +- .../client/mapreduce/InputFormatBase.java | 21 +++++---- .../client/mapreduce/RangeInputSplit.java | 2 +- .../mapreduce/impl/BatchInputSplit.java | 19 ++++---- .../client/mapreduce/impl/SplitUtils.java | 8 ++-- .../core/client/mock/MockScannerBase.java | 7 +-- .../security/tokens/DelegationToken.java | 3 +- .../accumulo/core/compaction/PatternType.java | 2 +- .../accumulo/core/compaction/SizeType.java | 2 +- .../accumulo/core/compaction/StringType.java | 2 +- .../apache/accumulo/core/compaction/Type.java | 2 +- .../accumulo/core/compaction/UIntType.java | 2 +- .../core/conf/ConfigurationDocGen.java | 4 +- .../apache/accumulo/core/conf/Property.java | 10 ++--- .../apache/accumulo/core/data/KeyExtent.java | 15 ++++--- .../apache/accumulo/core/data/Mutation.java | 14 +++--- .../apache/accumulo/core/data/TabletId.java | 2 + .../accumulo/core/data/impl/TabletIdImpl.java | 10 ++--- .../core/file/DispatchingFileFactory.java | 2 +- .../core/file/blockfile/cache/HeapSize.java | 2 +- .../file/blockfile/cache/LruBlockCache.java | 2 +- .../accumulo/core/file/rfile/RFile.java | 3 +- .../core/iterators/AggregatingIterator.java | 1 - .../accumulo/core/iterators/IteratorUtil.java | 3 +- .../accumulo/core/iterators/LongCombiner.java | 4 +- .../iterators/SortedKeyValueIterator.java | 10 ++--- .../iterators/system/MapFileIterator.java | 1 - .../iterators/system/VisibilityFilter.java | 1 - .../iterators/user/BigDecimalCombiner.java | 3 +- .../core/iterators/user/RegExFilter.java | 1 - .../iterators/user/TransformingIterator.java | 1 - .../core/metadata/schema/MetadataSchema.java | 2 +- .../core/rpc/SaslConnectionParams.java | 7 ++- .../core/rpc/UGIAssumingTransportFactory.java | 2 +- ...achingHDFSSecretKeyEncryptionStrategy.java | 12 ++--- .../security/crypto/CryptoModuleFactory.java | 8 ++-- ...NonCachingSecretKeyEncryptionStrategy.java | 14 +++--- .../org/apache/accumulo/core/util/Help.java | 2 +- .../org/apache/accumulo/core/util/Jar.java | 2 +- .../accumulo/core/util/UtilWaitThread.java | 1 - .../core/util/format/FormatterFactory.java | 1 - .../accumulo/core/volume/VolumeImpl.java | 1 - .../accumulo/core/zookeeper/ZooUtil.java | 1 - .../core/client/ClientConfigurationTest.java | 4 +- .../client/lexicoder/BytesLexicoderTest.java | 2 +- .../client/lexicoder/DateLexicoderTest.java | 2 +- .../client/lexicoder/StringLexicoderTest.java | 2 +- .../client/lexicoder/TextLexicoderTest.java | 2 +- .../lexicoder/impl/AbstractLexicoderTest.java | 7 +-- .../client/lexicoder/impl/ByteUtilsTest.java | 4 +- .../core/client/mock/MockNamespacesTest.java | 2 +- .../accumulo/core/data/MutationTest.java | 16 +++---- .../FirstEntryInRowIteratorTest.java | 3 +- .../aggregation/NumSummationTest.java | 3 +- .../user/RowDeletingIteratorTest.java | 2 +- .../client/CountingVerifyingReceiver.java | 2 +- .../examples/simple/client/RowOperations.java | 1 - .../simple/mapreduce/TokenFileWordCount.java | 1 - .../examples/simple/reservations/ARS.java | 1 - .../examples/simple/dirlist/CountTest.java | 1 - .../org/apache/accumulo/fate/AdminUtil.java | 2 +- .../org/apache/accumulo/fate/AgeOffStore.java | 1 - .../java/org/apache/accumulo/fate/Fate.java | 1 - .../accumulo/fate/util/AddressUtil.java | 1 - .../accumulo/fate/util/UtilWaitThread.java | 1 - .../zookeeper/DistributedReadWriteLock.java | 1 - .../apache/accumulo/fate/zookeeper/Retry.java | 1 - .../fate/zookeeper/TransactionWatcher.java | 1 - .../accumulo/fate/zookeeper/ZooCache.java | 2 +- .../accumulo/fate/util/AddressUtilTest.java | 1 - .../apache/accumulo/proxy/ProxyServer.java | 3 +- .../apache/accumulo/server/TabletLevel.java | 4 +- .../server/client/ClientServiceHandler.java | 4 +- .../master/state/MetaDataStateStore.java | 2 +- .../server/master/state/TabletStateStore.java | 7 +-- .../master/state/ZooTabletStateStore.java | 5 ++- .../server/problems/ProblemReports.java | 2 +- .../AuthenticationTokenKeyManager.java | 3 +- .../ZooAuthenticationKeyDistributor.java | 1 - .../server/util/MasterMetadataUtil.java | 6 +-- .../server/util/MetadataTableUtil.java | 8 ++-- .../accumulo/server/util/TabletIterator.java | 2 +- .../iterators/MetadataBulkLoadFilterTest.java | 2 +- .../security/handler/ZKAuthenticatorTest.java | 2 +- .../org/apache/accumulo/gc/GCExecutable.java | 2 +- .../gc/GarbageCollectWriteAheadLogs.java | 28 +++++------- .../CloseWriteAheadLogReferences.java | 4 +- .../accumulo/master/TabletGroupWatcher.java | 13 ++---- .../accumulo/master/tableOps/ChooseDir.java | 2 +- .../accumulo/master/tableOps/CleanUp.java | 4 +- .../master/tableOps/CleanUpBulkImport.java | 2 +- .../accumulo/master/tableOps/CloneInfo.java | 2 +- .../master/tableOps/CloneMetadata.java | 2 +- .../master/tableOps/CloneZookeeper.java | 2 +- .../master/tableOps/CompactionDriver.java | 2 +- .../master/tableOps/CompleteBulkImport.java | 2 +- .../accumulo/master/tableOps/CreateDir.java | 2 +- .../master/tableOps/CreateImportDir.java | 2 +- .../accumulo/master/tableOps/ExportInfo.java | 2 +- .../tableOps/FinishCancelCompaction.java | 2 +- .../master/tableOps/FinishCloneTable.java | 2 +- .../tableOps/FinishCreateNamespace.java | 2 +- .../master/tableOps/FinishCreateTable.java | 2 +- .../master/tableOps/FinishImportTable.java | 2 +- .../tableOps/ImportPopulateZookeeper.java | 2 +- .../tableOps/ImportSetupPermissions.java | 2 +- .../master/tableOps/ImportedTableInfo.java | 2 +- .../master/tableOps/NamespaceInfo.java | 2 +- .../master/tableOps/PopulateMetadata.java | 2 +- .../master/tableOps/PopulateZookeeper.java | 2 +- .../PopulateZookeeperWithNamespace.java | 2 +- .../accumulo/master/tableOps/TableInfo.java | 2 +- .../master/tableOps/WriteExportFiles.java | 2 +- .../monitor/servlets/trace/NullScanner.java | 3 +- .../monitor/ShowTraceLinkTypeTest.java | 3 +- .../accumulo/tracer/AsyncSpanReceiver.java | 4 +- .../accumulo/tracer/TraceTableStats.java | 30 +++++-------- .../server/GarbageCollectionLogger.java | 3 +- .../accumulo/tserver/MemKeyComparator.java | 2 +- .../tserver/MemKeyConversionIterator.java | 2 +- .../PartialMutationSkippingIterator.java | 2 +- .../tserver/TabletIteratorEnvironment.java | 2 +- .../apache/accumulo/tserver/TabletServer.java | 13 +++--- .../EverythingCompactionStrategy.java | 2 +- .../tserver/log/TabletServerLogger.java | 4 +- .../tserver/tablet/MinorCompactor.java | 2 +- .../accumulo/tserver/tablet/SplitInfo.java | 7 +-- .../accumulo/tserver/tablet/Tablet.java | 44 ++++++------------- .../apache/accumulo/shell/ShellOptions.java | 1 - .../classloader/AccumuloClassLoader.java | 4 +- .../test/EstimateInMemMapOverhead.java | 1 - .../test/InMemoryMapMemoryUsageTest.java | 2 +- .../test/IntObjectMemoryUsageTest.java | 2 +- .../apache/accumulo/test/MemoryUsageTest.java | 2 +- .../test/MutationMemoryUsageTest.java | 2 +- .../accumulo/test/TextMemoryUsageTest.java | 2 +- .../continuous/ContinuousBatchWalker.java | 6 +-- .../accumulo/test/continuous/HistData.java | 2 +- .../continuous/PrintScanTimeHistogram.java | 2 +- .../test/functional/AuthsIterator.java | 2 +- .../accumulo/test/randomwalk/Environment.java | 2 +- .../randomwalk/concurrent/Replication.java | 14 +++--- .../accumulo/harness/AccumuloClusterIT.java | 3 +- .../accumulo/proxy/SimpleProxyBase.java | 2 +- .../test/BalanceWithOfflineTableIT.java | 8 ++-- .../accumulo/test/GarbageCollectWALIT.java | 2 +- .../accumulo/test/TableOperationsIT.java | 4 +- .../org/apache/accumulo/test/VolumeIT.java | 2 +- .../functional/AccumuloInputFormatIT.java | 14 +++--- .../test/functional/ConcurrencyIT.java | 2 +- .../accumulo/test/functional/KerberosIT.java | 6 +-- .../test/functional/KerberosProxyIT.java | 8 ++-- .../test/functional/WALSunnyDayIT.java | 16 +++---- .../performance/RollWALPerformanceIT.java | 14 ++---- .../metadata/FastBulkImportIT.java | 5 +-- .../test/replication/ReplicationIT.java | 4 +- 170 files changed, 348 insertions(+), 427 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java index b531f264793..f008ab7b4a6 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java +++ b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java @@ -179,8 +179,8 @@ public ClientConfiguration(List configs) { if (c instanceof AbstractConfiguration) { AbstractConfiguration abstractConfiguration = (AbstractConfiguration) c; if (!abstractConfiguration.isDelimiterParsingDisabled() && abstractConfiguration.getListDelimiter() != '\0') { - log.warn("Client configuration constructed with a Configuration that did not have list delimiter disabled or overridden, multi-valued config " + - "properties may be unavailable"); + log.warn("Client configuration constructed with a Configuration that did not have list delimiter disabled or overridden, multi-valued config " + + "properties may be unavailable"); abstractConfiguration.setListDelimiter('\0'); } } diff --git a/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java b/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java index c75f54a1eb8..f0775736d1e 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java +++ b/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java @@ -62,7 +62,7 @@ public class ClientSideIteratorScanner extends ScannerOptions implements Scanner private long readaheadThreshold = Constants.SCANNER_DEFAULT_READAHEAD_THRESHOLD; /** - * @deprecated since 1.7.0 was never intended for public use. However this could have been used by anything extending this class. + * @deprecated since 1.7.0 was never intended for public use. However this could have been used by anything extending this class. */ @Deprecated public class ScannerTranslator extends ScannerTranslatorImpl { @@ -209,7 +209,7 @@ public void registerSideChannel(final SortedKeyValueIterator iter) {} @Override public Authorizations getAuthorizations() { - return smi.scanner.getAuthorizations(); + return smi.scanner.getAuthorizations(); } }, false, null); } catch (IOException e) { diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ActiveCompactionImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ActiveCompactionImpl.java index bb7366b57e3..1e429c8b04a 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/ActiveCompactionImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ActiveCompactionImpl.java @@ -48,7 +48,6 @@ public String getTable() throws TableNotFoundException { return Tables.getTableName(instance, new KeyExtent(tac.getExtent()).getTableId().toString()); } - @Override @Deprecated public org.apache.accumulo.core.data.KeyExtent getExtent() { diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java index a2430d45ae7..7cf4928957f 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java @@ -660,7 +660,7 @@ private void invalidateSession(HostAndPort location, TabletServerMutations scan(ClientContext context, ScanState scanState, int timeOut) throws ScanTimedOutException, AccumuloException, diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java index 730fd730e9c..36d4e840e25 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java @@ -192,7 +192,7 @@ final void setReserved(boolean reserved) { final void checkForStuckIO(long threshold) { /* * checking for stuck io needs to be light weight. - * + * * Tried to call System.currentTimeMillis() and Thread.currentThread() before every io operation.... this dramatically slowed things down. So switched to * incrementing a counter before and after each io operation. */ @@ -477,7 +477,8 @@ public Pair getAnyTransport(List servers, } private TTransport createNewTransport(ThriftTransportKey cacheKey) throws TTransportException { - TTransport transport = ThriftUtil.createClientTransport(cacheKey.getServer(), (int) cacheKey.getTimeout(), cacheKey.getSslParams(), cacheKey.getSaslParams()); + TTransport transport = ThriftUtil.createClientTransport(cacheKey.getServer(), (int) cacheKey.getTimeout(), cacheKey.getSslParams(), + cacheKey.getSaslParams()); log.trace("Creating new connection to connection to {}", cacheKey.getServer()); diff --git a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java index dff946fc5d8..f28912c5bf2 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java +++ b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java @@ -49,7 +49,8 @@ * @since 1.6.0 */ -public class PairLexicoder,B extends Comparable> extends AbstractLexicoder> implements Lexicoder> { +public class PairLexicoder,B extends Comparable> extends AbstractLexicoder> implements + Lexicoder> { private Lexicoder firstLexicoder; private Lexicoder secondLexicoder; diff --git a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoder.java b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoder.java index e568f256946..5652747be48 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoder.java +++ b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoder.java @@ -19,5 +19,4 @@ import org.apache.accumulo.core.client.lexicoder.AbstractEncoder; import org.apache.accumulo.core.client.lexicoder.Lexicoder; -public abstract class AbstractLexicoder extends AbstractEncoder implements Lexicoder { -} +public abstract class AbstractLexicoder extends AbstractEncoder implements Lexicoder {} diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java index cb3ef11a5f1..86a7adf72f5 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java @@ -398,7 +398,6 @@ protected abstract static class AbstractRecordReader implements RecordReade private org.apache.accumulo.core.client.mapreduce.RangeInputSplit baseSplit; protected ScannerBase scannerBase; - /** * Extracts Iterators settings from the context to be used by RecordReader. * @@ -496,7 +495,7 @@ public void initialize(InputSplit inSplit, JobConf job) throws IOException { BatchScanner scanner; BatchInputSplit multiRangeSplit = (BatchInputSplit) baseSplit; - try{ + try { // Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit will not span tablets int scanThreads = 1; scanner = instance.getConnector(principal, token).createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads); @@ -608,7 +607,6 @@ Map>> binOfflineTable(JobConf job, String table return InputConfigurator.binOffline(tableId, ranges, instance, conn); } - /** * Gets the splits of the tables that have been set on the job by reading the metadata table for the specified ranges. * @@ -646,9 +644,8 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { String principal = getPrincipal(job); AuthenticationToken token = getAuthenticationToken(job); - boolean batchScan = InputConfigurator.isBatchScan(CLASS, job); - boolean supportBatchScan = - !(tableConfig.isOfflineScan() || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators()); + boolean batchScan = InputConfigurator.isBatchScan(CLASS, job); + boolean supportBatchScan = !(tableConfig.isOfflineScan() || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators()); if (batchScan && !supportBatchScan) throw new IllegalArgumentException("BatchScanner optimization not available for offline scan, isolated, or local iterators"); @@ -716,7 +713,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { if (batchScan) { // group ranges by tablet to be read by a BatchScanner ArrayList clippedRanges = new ArrayList(); - for(Range r: extentRanges.getValue()) + for (Range r : extentRanges.getValue()) clippedRanges.add(ke.clip(r)); BatchInputSplit split = new BatchInputSplit(tableName, tableId, clippedRanges, new String[] {location}); diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java index 88ed1846143..af3197c0767 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java @@ -83,7 +83,7 @@ public class AccumuloOutputFormat implements OutputFormat { /** * Sets the connector information needed to communicate with Accumulo in this job. - + * *

* WARNING: Some tokens, when serialized, divulge sensitive information in the configuration as a means to pass the token to MapReduce tasks. This * information is BASE64 encoded to provide a charset safe conversion to a string, but this conversion is not intended to be secure. {@link PasswordToken} is diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java index 517cd19665d..ffb02a9d4b5 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java @@ -177,8 +177,7 @@ public static void setAutoAdjustRanges(JobConf job, boolean enableFeature) { } /** - * Determines whether a configuration has auto-adjust ranges enabled. - * Must be enabled when {@link #setBatchScan(JobConf, boolean)} is true. + * Determines whether a configuration has auto-adjust ranges enabled. Must be enabled when {@link #setBatchScan(JobConf, boolean)} is true. * * @param job * the Hadoop context for the configured job @@ -298,21 +297,20 @@ protected static boolean isOfflineScan(JobConf job) { } /** - * Controls the use of the {@link org.apache.accumulo.core.client.BatchScanner} in this job. - * Using this feature will group Ranges by their source tablet, producing an InputSplit per tablet - * rather than per Range. This batching helps to reduce overhead when querying a large number of small ranges. - * (ex: when doing quad-tree decomposition for spatial queries) + * Controls the use of the {@link org.apache.accumulo.core.client.BatchScanner} in this job. Using this feature will group Ranges by their source tablet, + * producing an InputSplit per tablet rather than per Range. This batching helps to reduce overhead when querying a large number of small ranges. (ex: when + * doing quad-tree decomposition for spatial queries) *

- * In order to achieve good locality of InputSplits this option always clips the input Ranges to tablet boundaries. - * This may result in one input Range contributing to several InputSplits. + * In order to achieve good locality of InputSplits this option always clips the input Ranges to tablet boundaries. This may result in one input Range + * contributing to several InputSplits. *

* Note: that the value of {@link #setAutoAdjustRanges(JobConf, boolean)} is ignored and is assumed to be true when BatchScan option is enabled. *

* This configuration is incompatible with: *

    - *
  • {@link #setOfflineTableScan(JobConf, boolean)}
  • - *
  • {@link #setLocalIterators(JobConf, boolean)}
  • - *
  • {@link #setScanIsolation(JobConf, boolean)}
  • + *
  • {@link #setOfflineTableScan(JobConf, boolean)}
  • + *
  • {@link #setLocalIterators(JobConf, boolean)}
  • + *
  • {@link #setScanIsolation(JobConf, boolean)}
  • *
*

* By default, this feature is disabled. diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/impl/BatchInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/impl/BatchInputSplit.java index 619f9cd585c..a046ee307fb 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapred/impl/BatchInputSplit.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/impl/BatchInputSplit.java @@ -23,8 +23,7 @@ import org.apache.hadoop.mapred.InputSplit; /** - * The Class BatchInputSplit. Encapsulates Accumulo ranges for use in Map Reduce jobs. - * Can contain several Ranges per InputSplit. + * The Class BatchInputSplit. Encapsulates Accumulo ranges for use in Map Reduce jobs. Can contain several Ranges per InputSplit. */ public class BatchInputSplit extends org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit implements InputSplit { diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java index b2f10357bb9..2575fe57382 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java @@ -364,7 +364,7 @@ protected static InputTableConfig getInputTableConfig(JobContext context, String * if the table name set on the configuration doesn't exist * @since 1.6.0 * @deprecated since 1.7.0 This method returns a type that is not part of the public API and is not guaranteed to be stable. The method was deprecated to - * discourage its use. + * discourage its use. */ @Deprecated protected static TabletLocator getTabletLocator(JobContext context, String table) throws TableNotFoundException { @@ -430,18 +430,18 @@ protected abstract static class AbstractRecordReader extends RecordReader contextIterators(TaskAttemptContext context, String tableName); /** - * Configures the iterators on a scanner for the given table name. - * Will attempt to use configuration from the InputSplit, on failure will try to extract them from TaskAttemptContext. + * Configures the iterators on a scanner for the given table name. Will attempt to use configuration from the InputSplit, on failure will try to extract + * them from TaskAttemptContext. * * @param context * the Hadoop context for the configured job @@ -526,7 +526,7 @@ public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IO BatchInputSplit batchSplit = (BatchInputSplit) split; BatchScanner scanner; - try{ + try { // Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit will not span tablets int scanThreads = 1; scanner = instance.getConnector(principal, token).createBatchScanner(split.getTableName(), authorizations, scanThreads); @@ -691,9 +691,8 @@ public List getSplits(JobContext context) throws IOException { String principal = getPrincipal(context); AuthenticationToken token = getAuthenticationToken(context); - boolean batchScan = InputConfigurator.isBatchScan(CLASS, context.getConfiguration()); - boolean supportBatchScan = - !(tableConfig.isOfflineScan() || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators()); + boolean batchScan = InputConfigurator.isBatchScan(CLASS, context.getConfiguration()); + boolean supportBatchScan = !(tableConfig.isOfflineScan() || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators()); if (batchScan && !supportBatchScan) throw new IllegalArgumentException("BatchScanner optimization not available for offline scan, isolated, or local iterators"); @@ -743,7 +742,7 @@ public List getSplits(JobContext context) throws IOException { throw new IOException(e); } - // all of this code will add either range per each locations or split ranges and add range-location split + // all of this code will add either range per each locations or split ranges and add range-location split // Map from Range to Array of Locations, we only use this if we're don't split HashMap> splitsToAdd = null; @@ -764,7 +763,7 @@ public List getSplits(JobContext context) throws IOException { if (batchScan) { // group ranges by tablet to be read by a BatchScanner ArrayList clippedRanges = new ArrayList(); - for(Range r: extentRanges.getValue()) + for (Range r : extentRanges.getValue()) clippedRanges.add(ke.clip(r)); BatchInputSplit split = new BatchInputSplit(tableName, tableId, clippedRanges, new String[] {location}); SplitUtils.updateSplit(split, instance, tableConfig, principal, token, auths, logLevel); diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java index 45315dbac4d..33eccc12f6d 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java @@ -60,7 +60,7 @@ public RecordReader createRecordReader(InputSplit split, TaskAttemptC log.setLevel(level); } } else { - throw new IllegalArgumentException("No RecordReader for " + split.getClass().toString()); + throw new IllegalArgumentException("No RecordReader for " + split.getClass().toString()); } return new RecordReaderBase() { diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java index dcc4fd5cfac..ef4509f7244 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java @@ -176,8 +176,7 @@ public static void setAutoAdjustRanges(Job job, boolean enableFeature) { } /** - * Determines whether a configuration has auto-adjust ranges enabled. - * Must be enabled when {@link #setBatchScan(Job, boolean)} is true. + * Determines whether a configuration has auto-adjust ranges enabled. Must be enabled when {@link #setBatchScan(Job, boolean)} is true. * * @param context * the Hadoop context for the configured job @@ -297,21 +296,20 @@ protected static boolean isOfflineScan(JobContext context) { } /** - * Controls the use of the {@link org.apache.accumulo.core.client.BatchScanner} in this job. - * Using this feature will group Ranges by their source tablet, producing an InputSplit per tablet - * rather than per Range. This batching helps to reduce overhead when querying a large number of small ranges. - * (ex: when doing quad-tree decomposition for spatial queries) + * Controls the use of the {@link org.apache.accumulo.core.client.BatchScanner} in this job. Using this feature will group Ranges by their source tablet, + * producing an InputSplit per tablet rather than per Range. This batching helps to reduce overhead when querying a large number of small ranges. (ex: when + * doing quad-tree decomposition for spatial queries) *

- * In order to achieve good locality of InputSplits this option always clips the input Ranges to tablet boundaries. - * This may result in one input Range contributing to several InputSplits. + * In order to achieve good locality of InputSplits this option always clips the input Ranges to tablet boundaries. This may result in one input Range + * contributing to several InputSplits. *

* Note: that the value of {@link #setAutoAdjustRanges(Job, boolean)} is ignored and is assumed to be true when BatchScan option is enabled. *

* This configuration is incompatible with: *

    - *
  • {@link #setOfflineTableScan(org.apache.hadoop.mapreduce.Job, boolean)}
  • - *
  • {@link #setLocalIterators(org.apache.hadoop.mapreduce.Job, boolean)}
  • - *
  • {@link #setScanIsolation(org.apache.hadoop.mapreduce.Job, boolean)}
  • + *
  • {@link #setOfflineTableScan(org.apache.hadoop.mapreduce.Job, boolean)}
  • + *
  • {@link #setLocalIterators(org.apache.hadoop.mapreduce.Job, boolean)}
  • + *
  • {@link #setScanIsolation(org.apache.hadoop.mapreduce.Job, boolean)}
  • *
*

* By default, this feature is disabled. @@ -378,6 +376,7 @@ protected void setupIterators(TaskAttemptContext context, Scanner scanner) { /** * Initialize a scanner over the given input split using this task attempt configuration. + * * @deprecated since 1.7.0; Use {@link #contextIterators} instead. */ @Deprecated diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java index b55e2cf07f3..25886d6c271 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java @@ -98,7 +98,7 @@ public static float getProgress(ByteSequence start, ByteSequence end, ByteSequen public float getProgress(Key currentKey) { if (currentKey == null) return 0f; - if(range.contains(currentKey)){ + if (range.contains(currentKey)) { if (range.getStartKey() != null && range.getEndKey() != null) { if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW) != 0) { // just look at the row progress diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplit.java index f674485ebf8..04875ac6f9a 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplit.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplit.java @@ -30,8 +30,7 @@ import org.apache.accumulo.core.data.Range; /** - * The Class BatchInputSplit. Encapsulates a set of Accumulo ranges on a single tablet for use in Map Reduce jobs. - * Can contain several Ranges per split. + * The Class BatchInputSplit. Encapsulates a set of Accumulo ranges on a single tablet for use in Map Reduce jobs. Can contain several Ranges per split. */ public class BatchInputSplit extends RangeInputSplit { private Collection ranges; @@ -75,10 +74,12 @@ public float getProgress(Key currentKey) { rangeProgress[i] = SplitUtils.getProgress(range.getStartKey().getRowData(), range.getEndKey().getRowData(), currentKey.getRowData()); } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM) != 0) { // just look at the column family progress - rangeProgress[i] = SplitUtils.getProgress(range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), currentKey.getColumnFamilyData()); + rangeProgress[i] = SplitUtils.getProgress(range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), + currentKey.getColumnFamilyData()); } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL) != 0) { // just look at the column qualifier progress - rangeProgress[i] = SplitUtils.getProgress(range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), currentKey.getColumnQualifierData()); + rangeProgress[i] = SplitUtils.getProgress(range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), + currentKey.getColumnQualifierData()); } } total += rangeProgress[i]; @@ -107,7 +108,7 @@ public void readFields(DataInput in) throws IOException { int numRanges = in.readInt(); ranges = new ArrayList(numRanges); - for (int i = 0; i < numRanges; ++i){ + for (int i = 0; i < numRanges; ++i) { Range r = new Range(); r.readFields(in); ranges.add(r); @@ -119,7 +120,7 @@ public void write(DataOutput out) throws IOException { super.write(out); out.writeInt(ranges.size()); - for (Range r: ranges) + for (Range r : ranges) r.write(out); } @@ -141,12 +142,12 @@ public Collection getRanges() { } @Override - public Range getRange(){ + public Range getRange() { throw new UnsupportedOperationException(); } @Override - public void setRange(Range range){ + public void setRange(Range range) { throw new UnsupportedOperationException(); } @@ -179,4 +180,4 @@ public Boolean usesLocalIterators() { public void setUsesLocalIterators(Boolean localIterators) { throw new UnsupportedOperationException(); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/SplitUtils.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/SplitUtils.java index 742660db2e7..d19b4992211 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/SplitUtils.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/SplitUtils.java @@ -34,11 +34,11 @@ public class SplitUtils { /** - * Central place to set common split configuration not handled by split constructors. - * The intention is to make it harder to miss optional setters in future refactor. + * Central place to set common split configuration not handled by split constructors. The intention is to make it harder to miss optional setters in future + * refactor. */ - public static void updateSplit(RangeInputSplit split, Instance instance, InputTableConfig tableConfig, - String principal, AuthenticationToken token, Authorizations auths, Level logLevel) { + public static void updateSplit(RangeInputSplit split, Instance instance, InputTableConfig tableConfig, String principal, AuthenticationToken token, + Authorizations auths, Level logLevel) { split.setInstanceName(instance.getInstanceName()); split.setZooKeepers(instance.getZooKeepers()); split.setMockInstance(instance instanceof MockInstance); diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java index 60f2a2ad4d6..3c746e1a912 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScannerBase.java @@ -66,7 +66,7 @@ static class MockIteratorEnvironment implements IteratorEnvironment { private final Authorizations auths; MockIteratorEnvironment(Authorizations auths) { - this.auths = auths; + this.auths = auths; } @Override @@ -98,7 +98,7 @@ public void registerSideChannel(SortedKeyValueIterator iter) { @Override public Authorizations getAuthorizations() { - return auths; + return auths; } SortedKeyValueIterator getTopLevelIterator(SortedKeyValueIterator iter) { @@ -127,7 +127,8 @@ public Iterator> iterator() { throw new UnsupportedOperationException(); } - @Override public Authorizations getAuthorizations() { + @Override + public Authorizations getAuthorizations() { return auths; } } diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/DelegationToken.java b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/DelegationToken.java index 8256d66ac11..43a16d373a7 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/DelegationToken.java +++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/DelegationToken.java @@ -25,7 +25,8 @@ * itself. The delegation token serves as a mechanism to obtain a transient shared secret with Accumulo using a {@link KerberosToken} and then run some task * authenticating with that shared secret. * - *

Obtain a delegation token by calling {@link SecurityOperations#getDelegationToken(org.apache.accumulo.core.client.admin.DelegationTokenConfig)} + *

+ * Obtain a delegation token by calling {@link SecurityOperations#getDelegationToken(org.apache.accumulo.core.client.admin.DelegationTokenConfig)} * * @since 1.7.0 */ diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java b/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java index c52dcb4d5d4..0603e0e3f98 100644 --- a/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java +++ b/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java @@ -25,4 +25,4 @@ public String convert(String str) { Pattern.compile(str); return str; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java b/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java index c2af401c180..ea8660525de 100644 --- a/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java +++ b/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java @@ -27,4 +27,4 @@ public String convert(String str) { Preconditions.checkArgument(size > 0); return Long.toString(size); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java b/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java index 7098a5c5aac..595d3886ceb 100644 --- a/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java +++ b/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java @@ -21,4 +21,4 @@ class StringType implements Type { public String convert(String str) { return str; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/Type.java b/core/src/main/java/org/apache/accumulo/core/compaction/Type.java index d8f81a6fd92..ef2f540af28 100644 --- a/core/src/main/java/org/apache/accumulo/core/compaction/Type.java +++ b/core/src/main/java/org/apache/accumulo/core/compaction/Type.java @@ -18,4 +18,4 @@ interface Type { String convert(String str); -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java b/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java index c8880fcd675..09923c369c3 100644 --- a/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java +++ b/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java @@ -24,4 +24,4 @@ public String convert(String str) { Preconditions.checkArgument(Integer.parseInt(str) > 0); return str; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java b/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java index ca7d81f9235..5d95410c4af 100644 --- a/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java +++ b/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java @@ -358,9 +358,9 @@ void generateAsciidoc() { /* * Generates documentation for conf/accumulo-site.xml file usage. Arguments are: "--generate-doc", file to write to. - * + * * @param args command-line arguments - * + * * @throws IllegalArgumentException if args is invalid */ public static void main(String[] args) throws FileNotFoundException, UnsupportedEncodingException { diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java index a5bef0ae26b..09d462d44a3 100644 --- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java +++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java @@ -39,7 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public enum Property { // Crypto-related properties @Experimental @@ -274,10 +273,8 @@ public enum Property { "The maximum number of concurrent major compactions for a tablet server"), TSERV_MINC_MAXCONCURRENT("tserver.compaction.minor.concurrent.max", "4", PropertyType.COUNT, "The maximum number of concurrent minor compactions for a tablet server"), - TSERV_MAJC_TRACE_PERCENT("tserver.compaction.major.trace.percent", "0.1", PropertyType.FRACTION, - "The percent of major compactions to trace"), - TSERV_MINC_TRACE_PERCENT("tserver.compaction.minor.trace.percent", "0.1", PropertyType.FRACTION, - "The percent of minor compactions to trace"), + TSERV_MAJC_TRACE_PERCENT("tserver.compaction.major.trace.percent", "0.1", PropertyType.FRACTION, "The percent of major compactions to trace"), + TSERV_MINC_TRACE_PERCENT("tserver.compaction.minor.trace.percent", "0.1", PropertyType.FRACTION, "The percent of minor compactions to trace"), TSERV_COMPACTION_WARN_TIME("tserver.compaction.warn.time", "10m", PropertyType.TIMEDURATION, "When a compaction has not made progress for this time period, a warning will be logged"), TSERV_BLOOM_LOAD_MAXCONCURRENT("tserver.bloom.load.concurrent.max", "4", PropertyType.COUNT, @@ -560,8 +557,7 @@ public enum Property { "Amount of time to wait before first checking for replication work, not useful outside of tests"), REPLICATION_WORK_PROCESSOR_PERIOD("replication.work.processor.period", "0s", PropertyType.TIMEDURATION, "Amount of time to wait before re-checking for replication work, not useful outside of tests"), - REPLICATION_TRACE_PERCENT("replication.trace.percent", "0.1", PropertyType.FRACTION, - "The sampling percentage to use for replication traces"), + REPLICATION_TRACE_PERCENT("replication.trace.percent", "0.1", PropertyType.FRACTION, "The sampling percentage to use for replication traces"), ; diff --git a/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java b/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java index e9f3b65c5b5..7bbb0c26795 100644 --- a/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java +++ b/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java @@ -127,8 +127,8 @@ public int hashCode() { @Override public boolean equals(Object o) { - if(o instanceof KeyExtent){ - return wrapped.equals(((KeyExtent)o).wrapped); + if (o instanceof KeyExtent) { + return wrapped.equals(((KeyExtent) o).wrapped); } return false; @@ -179,7 +179,7 @@ public boolean isRootTablet() { return wrapped.isRootTablet(); } - private static SortedSet unwrap(Set tablets){ + private static SortedSet unwrap(Set tablets) { SortedSet trans = new TreeSet<>(); for (KeyExtent wrapper : tablets) { trans.add(wrapper.wrapped); @@ -188,11 +188,11 @@ private static SortedSet unwrap(Se return trans; } - private static KeyExtent wrap(org.apache.accumulo.core.data.impl.KeyExtent ke){ + private static KeyExtent wrap(org.apache.accumulo.core.data.impl.KeyExtent ke) { return new KeyExtent(ke.getTableId(), ke.getEndRow(), ke.getPrevEndRow()); } - private static SortedSet wrap(Collection unwrapped){ + private static SortedSet wrap(Collection unwrapped) { SortedSet wrapped = new TreeSet<>(); for (org.apache.accumulo.core.data.impl.KeyExtent wrappee : unwrapped) { wrapped.add(wrap(wrappee)); @@ -225,7 +225,7 @@ public static Value encodePrevEndRow(Text per) { } public static Mutation getPrevRowUpdateMutation(KeyExtent ke) { - return org.apache.accumulo.core.data.impl.KeyExtent.getPrevRowUpdateMutation(ke.wrapped); + return org.apache.accumulo.core.data.impl.KeyExtent.getPrevRowUpdateMutation(ke.wrapped); } public static byte[] tableOfMetadataRow(Text row) { @@ -246,12 +246,13 @@ public static Set findOverlapping(KeyExtent nke, SortedSet public static Set findOverlapping(KeyExtent nke, SortedMap extents) { SortedMap trans = new TreeMap<>(); - for(Entry entry : extents.entrySet()){ + for (Entry entry : extents.entrySet()) { trans.put(entry.getKey().wrapped, entry.getValue()); } return wrap(org.apache.accumulo.core.data.impl.KeyExtent.findOverlapping(nke.wrapped, trans)); } + public static Text getMetadataEntry(KeyExtent extent) { return org.apache.accumulo.core.data.impl.KeyExtent.getMetadataEntry(extent.wrapped); } diff --git a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java index 1cc424238d7..aba5cdcbcd3 100644 --- a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java +++ b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java @@ -98,10 +98,10 @@ private void serialize() { } } - /* This is so hashCode & equals can be called without changing this object. - * - * It will return a copy of the current data buffer if serialized has not been - * called previously. Otherwise, this.data will be returned since the buffer is + /* + * This is so hashCode & equals can be called without changing this object. + * + * It will return a copy of the current data buffer if serialized has not been called previously. Otherwise, this.data will be returned since the buffer is * null and will not change. */ private ByteBuffer serializedSnapshot() { @@ -1141,11 +1141,9 @@ private boolean equalMutation(Mutation m) { } /** - * Creates a {@link org.apache.accumulo.core.data.thrift.TMutation} object - * containing this Mutation's data. + * Creates a {@link org.apache.accumulo.core.data.thrift.TMutation} object containing this Mutation's data. * - * Note that this method will move the Mutation into a "serialized" state - * that will prevent users from adding more data via Mutation#put(). + * Note that this method will move the Mutation into a "serialized" state that will prevent users from adding more data via Mutation#put(). * * @return a thrift form of this Mutation */ diff --git a/core/src/main/java/org/apache/accumulo/core/data/TabletId.java b/core/src/main/java/org/apache/accumulo/core/data/TabletId.java index 2dfba479a20..113183d3aca 100644 --- a/core/src/main/java/org/apache/accumulo/core/data/TabletId.java +++ b/core/src/main/java/org/apache/accumulo/core/data/TabletId.java @@ -26,6 +26,8 @@ */ public interface TabletId extends Comparable { public Text getTableId(); + public Text getEndRow(); + public Text getPrevEndRow(); } diff --git a/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java b/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java index 395dd222c07..61e882a867e 100644 --- a/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java @@ -30,20 +30,20 @@ public class TabletIdImpl implements TabletId { public static final Function KE_2_TID_OLD = new Function() { @Override public TabletId apply(org.apache.accumulo.core.data.KeyExtent input) { - //the following if null check is to appease findbugs... grumble grumble spent a good part of my morning looking into this + // the following if null check is to appease findbugs... grumble grumble spent a good part of my morning looking into this // http://sourceforge.net/p/findbugs/bugs/1139/ // https://code.google.com/p/guava-libraries/issues/detail?id=920 - if(input == null) + if (input == null) return null; return new TabletIdImpl(input); } }; @SuppressWarnings("deprecation") - public static final Function TID_2_KE_OLD = new Function() { + public static final Function TID_2_KE_OLD = new Function() { @Override public org.apache.accumulo.core.data.KeyExtent apply(TabletId input) { - if(input == null) + if (input == null) return null; return new org.apache.accumulo.core.data.KeyExtent(input.getTableId(), input.getEndRow(), input.getPrevEndRow()); } @@ -94,7 +94,7 @@ public boolean equals(Object o) { } @Override - public String toString(){ + public String toString() { return ke.toString(); } } diff --git a/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java index 128a931af8d..1e7ecc957e2 100644 --- a/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java +++ b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java @@ -133,4 +133,4 @@ public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, return findFileFactory(file).openIndex(file, fs, conf, acuconf, dCache, iCache); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java index 04db3cd583b..b1d975227ee 100644 --- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java +++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java @@ -30,7 +30,7 @@ * *

  * public class SampleObject implements HeapSize {
- *
+ * 
  *   int[] numbers;
  *   int x;
  * }
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
index af2f6df091c..7d960561af5 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
@@ -513,7 +513,7 @@ public long getEvictedCount() {
 
   /*
    * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows above the acceptable level.

- * + * * Thread is triggered into action by {@link LruBlockCache#runEviction()} */ private static class EvictionThread extends Thread { diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java index c6de9ccfcf0..060d9560a44 100644 --- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java +++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java @@ -68,7 +68,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class RFile { public static final String EXTENSION = "rf"; @@ -565,7 +564,7 @@ private void _next() throws IOException { currBlock = getDataBlock(indexEntry); checkRange = range.afterEndKey(indexEntry.getKey()); - if (!checkRange) + if (!checkRange) hasTop = true; } else { diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java index 6b5a8fcd6bf..3292cc2df59 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java @@ -33,7 +33,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This iterator wraps another iterator. It automatically aggregates. * diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java index 5efa2c21a87..031d13f7bd9 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java @@ -66,7 +66,8 @@ public static enum IteratorScope { /** * Fetch the correct configuration key prefix for the given scope. Throws an IllegalArgumentException if no property exists for the given scope. * - * @deprecated since 1.7.0 This method returns a type that is not part of the public API and is not guaranteed to be stable. The method was deprecated to discourage its use. + * @deprecated since 1.7.0 This method returns a type that is not part of the public API and is not guaranteed to be stable. The method was deprecated to + * discourage its use. */ @Deprecated public static Property getProperty(IteratorScope scope) { diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java index 2a225fd5a30..cbc76ab22db 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java @@ -167,7 +167,7 @@ protected Long decodeUnchecked(byte[] b, int offset, int len) { return decodeStatic(b, offset, len); } - // refactor? it's public, so cannot remove + // refactor? it's public, so cannot remove public static long decode(byte[] b, int offset) { if (b.length < offset + 8) throw new ValueFormatException("trying to convert to long, but byte array isn't long enough, wanted " + (offset + 8) + " found " + b.length); @@ -179,7 +179,7 @@ public static long decodeStatic(byte[] b, int offset, int len) { if (b.length < offset + 8 || len < 8) throw new ValueFormatException("trying to convert to long, but byte array isn't long enough, wanted " + (offset + 8) + " found " + len); return (((long) b[offset + 0] << 56) + ((long) (b[offset + 1] & 255) << 48) + ((long) (b[offset + 2] & 255) << 40) + ((long) (b[offset + 3] & 255) << 32) - + ((long) (b[offset + 4] & 255) << 24) + ((b[offset + 5] & 255) << 16) + ((b[offset + 6] & 255) << 8) + ((b[offset + 7] & 255) << 0)); + + ((long) (b[offset + 4] & 255) << 24) + ((b[offset + 5] & 255) << 16) + ((b[offset + 6] & 255) << 8) + ((b[offset + 7] & 255) << 0)); } } diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java index f6d317059f7..ce5ef24c4a4 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java @@ -107,9 +107,9 @@ public interface SortedKeyValueIterator,V extend * compaction scopes the iterator may see deletion entries. These entries should be preserved by all iterators except ones that are strictly scan-time * iterators that will never be configured for the minc or majc scopes. Deletion entries are only removed during full major compactions. *

- * For performance reasons, iterators reserve the right to reuse objects returned by getTopKey when {@link #next()} is called, changing the data - * that the object references. Iterators that need to save an object returned by getTopKey ought to copy the object's data into a new object - * in order to avoid aliasing bugs. + * For performance reasons, iterators reserve the right to reuse objects returned by getTopKey when {@link #next()} is called, changing the data that + * the object references. Iterators that need to save an object returned by getTopKey ought to copy the object's data into a new object in order to + * avoid aliasing bugs. * * @return K * @exception IllegalStateException @@ -123,8 +123,8 @@ public interface SortedKeyValueIterator,V extend * Returns top value. Can be called 0 or more times without affecting behavior of next() or hasTop(). *

* For performance reasons, iterators reserve the right to reuse objects returned by getTopValue when {@link #next()} is called, changing the - * underlying data that the object references. Iterators that need to save an object returned by getTopValue ought to copy the object's data - * into a new object in order to avoid aliasing bugs. + * underlying data that the object references. Iterators that need to save an object returned by getTopValue ought to copy the object's data into a + * new object in order to avoid aliasing bugs. * * @return V * @exception IllegalStateException diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java index da5f37c10c5..9d59570f107 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java @@ -40,7 +40,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class MapFileIterator implements FileSKVIterator { private static final Logger log = LoggerFactory.getLogger(MapFileIterator.class); diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java index 069d99974e6..a204ad17ea2 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java @@ -32,7 +32,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class VisibilityFilter extends Filter { protected VisibilityEvaluator ve; protected Text defaultVisibility; diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/BigDecimalCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/BigDecimalCombiner.java index 3a6dbcbb813..e90e87e0c2e 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/user/BigDecimalCombiner.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/BigDecimalCombiner.java @@ -101,7 +101,8 @@ public BigDecimal typedReduce(Key key, Iterator iter) { * Provides the ability to encode scientific notation. * */ - public static class BigDecimalEncoder extends AbstractLexicoder implements org.apache.accumulo.core.iterators.TypedValueCombiner.Encoder { + public static class BigDecimalEncoder extends AbstractLexicoder implements + org.apache.accumulo.core.iterators.TypedValueCombiner.Encoder { @Override public byte[] encode(BigDecimal v) { return v.toString().getBytes(UTF_8); diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java index c61a1608df5..9fccaa7ff2d 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java @@ -34,7 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * A Filter that matches entries based on Java regular expressions. */ diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java index 52fa70e54af..66de3d6dc5e 100644 --- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java +++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java @@ -53,7 +53,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * The TransformingIterator allows portions of a key (except for the row) to be transformed. This iterator handles the details that come with modifying keys * (i.e., that the sort order could change). In order to do so, however, the iterator must put all keys sharing the same prefix in memory. Prefix is defined as diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java index c787d6d0524..fe75f9e820c 100644 --- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java +++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java @@ -289,7 +289,7 @@ public static void getFile(Key k, Text buff) { */ public static class CurrentLogsSection { private static final Section section = new Section(RESERVED_PREFIX + "wal+", true, RESERVED_PREFIX + "wal,", false); - private static byte LEFT_BRACKET = (byte)'['; + private static byte LEFT_BRACKET = (byte) '['; public static final Text COLF = new Text("log"); public static final Value UNUSED = new Value("unused".getBytes(UTF_8)); diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java b/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java index 0e889678b2d..956fee29b93 100644 --- a/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java +++ b/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java @@ -53,9 +53,7 @@ public class SaslConnectionParams { * Enumeration around {@link Sasl#QOP} */ public enum QualityOfProtection { - AUTH("auth"), - AUTH_INT("auth-int"), - AUTH_CONF("auth-conf"); + AUTH("auth"), AUTH_INT("auth-int"), AUTH_CONF("auth-conf"); private final String quality; @@ -226,6 +224,7 @@ protected void updateFromConfiguration(ClientConfiguration conf) { public Map getSaslProperties() { return Collections.unmodifiableMap(saslProperties); } + /** * The quality of protection used with SASL. See {@link Sasl#QOP} for more information. */ @@ -263,7 +262,7 @@ public CallbackHandler getCallbackHandler() { @Override public int hashCode() { - HashCodeBuilder hcb = new HashCodeBuilder(23,29); + HashCodeBuilder hcb = new HashCodeBuilder(23, 29); hcb.append(kerberosServerPrimary).append(saslProperties).append(qop.hashCode()).append(principal).append(mechanism).append(callbackHandler); return hcb.toHashCode(); } diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransportFactory.java b/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransportFactory.java index 77a3ce3acc3..b1772fa6e67 100644 --- a/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransportFactory.java +++ b/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransportFactory.java @@ -52,4 +52,4 @@ public TTransport run() { } }); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java index c685a2d4d80..4ee27ef1e91 100644 --- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java +++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java @@ -50,7 +50,7 @@ public CryptoModuleParameters encryptSecretKey(CryptoModuleParameters context) { secretKeyCache.ensureSecretKeyCacheInitialized(context); doKeyEncryptionOperation(Cipher.WRAP_MODE, context); } catch (IOException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } return context; @@ -62,7 +62,7 @@ public CryptoModuleParameters decryptSecretKey(CryptoModuleParameters context) { secretKeyCache.ensureSecretKeyCacheInitialized(context); doKeyEncryptionOperation(Cipher.UNWRAP_MODE, context); } catch (IOException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } return context; @@ -74,7 +74,7 @@ private void doKeyEncryptionOperation(int encryptionMode, CryptoModuleParameters try { cipher.init(encryptionMode, new SecretKeySpec(secretKeyCache.getKeyEncryptionKey(), params.getAlgorithmName())); } catch (InvalidKeyException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } @@ -83,10 +83,10 @@ private void doKeyEncryptionOperation(int encryptionMode, CryptoModuleParameters Key plaintextKey = cipher.unwrap(params.getEncryptedKey(), params.getAlgorithmName(), Cipher.SECRET_KEY); params.setPlaintextKey(plaintextKey.getEncoded()); } catch (InvalidKeyException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } catch (NoSuchAlgorithmException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } } else { @@ -96,7 +96,7 @@ private void doKeyEncryptionOperation(int encryptionMode, CryptoModuleParameters params.setEncryptedKey(encryptedSecretKey); params.setOpaqueKeyEncryptionKeyID(secretKeyCache.getPathToKeyName()); } catch (InvalidKeyException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } catch (IllegalBlockSizeException e) { log.error("{}", e.getMessage(), e); diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java index e2058bd23cf..e8e23265467 100644 --- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java +++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java @@ -107,12 +107,12 @@ private static CryptoModule instantiateCryptoModule(String cryptoModuleClassname } catch (InstantiationException e) { log.warn(String.format("Got instantiation exception %s when instantiating crypto module \"%s\". No encryption will be used.", e.getCause().getClass() .getName(), cryptoModuleClassname)); - log.warn("InstantiationException",e.getCause()); + log.warn("InstantiationException", e.getCause()); return new NullCryptoModule(); } catch (IllegalAccessException e) { log.warn(String.format("Got illegal access exception when trying to instantiate crypto module \"%s\". No encryption will be used.", cryptoModuleClassname)); - log.warn("IllegalAccessException",e); + log.warn("IllegalAccessException", e); return new NullCryptoModule(); } } @@ -184,12 +184,12 @@ private static SecretKeyEncryptionStrategy instantiateSecreteKeyEncryptionStrate } catch (InstantiationException e) { log.warn(String.format("Got instantiation exception %s when instantiating secret key encryption strategy \"%s\". No encryption will be used.", e .getCause().getClass().getName(), className)); - log.warn("InstantiationException",e.getCause()); + log.warn("InstantiationException", e.getCause()); return new NullSecretKeyEncryptionStrategy(); } catch (IllegalAccessException e) { log.warn(String.format("Got illegal access exception when trying to instantiate secret key encryption strategy \"%s\". No encryption will be used.", className)); - log.warn("IllegalAccessException",e); + log.warn("IllegalAccessException", e); return new NullSecretKeyEncryptionStrategy(); } } diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java index ce9da775196..1dd8d606dae 100644 --- a/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java +++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java @@ -83,7 +83,7 @@ private void doKeyEncryptionOperation(int encryptionMode, CryptoModuleParameters try { cipher.init(encryptionMode, new SecretKeySpec(keyEncryptionKey, params.getAlgorithmName())); } catch (InvalidKeyException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } @@ -92,10 +92,10 @@ private void doKeyEncryptionOperation(int encryptionMode, CryptoModuleParameters Key plaintextKey = cipher.unwrap(params.getEncryptedKey(), params.getAlgorithmName(), Cipher.SECRET_KEY); params.setPlaintextKey(plaintextKey.getEncoded()); } catch (InvalidKeyException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } catch (NoSuchAlgorithmException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } } else { @@ -105,10 +105,10 @@ private void doKeyEncryptionOperation(int encryptionMode, CryptoModuleParameters params.setEncryptedKey(encryptedSecretKey); params.setOpaqueKeyEncryptionKeyID(pathToKeyName); } catch (InvalidKeyException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } catch (IllegalBlockSizeException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } @@ -159,7 +159,7 @@ public CryptoModuleParameters encryptSecretKey(CryptoModuleParameters params) { doKeyEncryptionOperation(Cipher.WRAP_MODE, params, fullPath, pathToKey, fs); } catch (IOException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } @@ -183,7 +183,7 @@ public CryptoModuleParameters decryptSecretKey(CryptoModuleParameters params) { doKeyEncryptionOperation(Cipher.UNWRAP_MODE, params, pathToKeyName, pathToKey, fs); } catch (IOException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); throw new RuntimeException(e); } diff --git a/core/src/main/java/org/apache/accumulo/core/util/Help.java b/core/src/main/java/org/apache/accumulo/core/util/Help.java index fda72fe52d9..f090b18cddd 100644 --- a/core/src/main/java/org/apache/accumulo/core/util/Help.java +++ b/core/src/main/java/org/apache/accumulo/core/util/Help.java @@ -32,4 +32,4 @@ public String keyword() { public void execute(final String[] args) throws Exception { Main.printUsage(); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/util/Jar.java b/core/src/main/java/org/apache/accumulo/core/util/Jar.java index 2fc6d17050c..e5c2d1c7def 100644 --- a/core/src/main/java/org/apache/accumulo/core/util/Jar.java +++ b/core/src/main/java/org/apache/accumulo/core/util/Jar.java @@ -56,4 +56,4 @@ public void execute(final String[] args) throws Exception { String[] newArgs = Main.stripArgs(args, mainClass.getName().equals(candidateMainClass) ? 2 : 1); Main.execMainClass(mainClass, newArgs); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/apache/accumulo/core/util/UtilWaitThread.java b/core/src/main/java/org/apache/accumulo/core/util/UtilWaitThread.java index d7005a42e29..01f5fa87845 100644 --- a/core/src/main/java/org/apache/accumulo/core/util/UtilWaitThread.java +++ b/core/src/main/java/org/apache/accumulo/core/util/UtilWaitThread.java @@ -19,7 +19,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class UtilWaitThread { private static final Logger log = LoggerFactory.getLogger(UtilWaitThread.class); diff --git a/core/src/main/java/org/apache/accumulo/core/util/format/FormatterFactory.java b/core/src/main/java/org/apache/accumulo/core/util/format/FormatterFactory.java index 65b34f25d7e..7eb542f4e45 100644 --- a/core/src/main/java/org/apache/accumulo/core/util/format/FormatterFactory.java +++ b/core/src/main/java/org/apache/accumulo/core/util/format/FormatterFactory.java @@ -23,7 +23,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class FormatterFactory { private static final Logger log = LoggerFactory.getLogger(FormatterFactory.class); diff --git a/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java b/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java index c91c36e708c..9285cb676fd 100644 --- a/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java @@ -28,7 +28,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Basic Volume implementation that contains a FileSystem and a base path that should be used within that filesystem. */ diff --git a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java index be05730bd2d..07a38dc420a 100644 --- a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java +++ b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java @@ -32,7 +32,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class ZooUtil extends org.apache.accumulo.fate.zookeeper.ZooUtil { private static final Logger log = LoggerFactory.getLogger(ZooUtil.class); diff --git a/core/src/test/java/org/apache/accumulo/core/client/ClientConfigurationTest.java b/core/src/test/java/org/apache/accumulo/core/client/ClientConfigurationTest.java index 6f91b0bc5c2..b1f196862fb 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/ClientConfigurationTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/ClientConfigurationTest.java @@ -114,7 +114,7 @@ public void testMultipleValues() throws ConfigurationException { assertEquals(1, conf.getList(ClientProperty.TRACE_SPAN_RECEIVERS.getKey()).size()); // only used internally - Map map = new HashMap<>(); + Map map = new HashMap<>(); map.put(ClientProperty.INSTANCE_ZK_HOST.getKey(), val); map.put(ClientProperty.TRACE_SPAN_RECEIVERS.getKey(), val); conf = new ClientConfiguration(new MapConfiguration(map)); @@ -131,7 +131,7 @@ public void testGetAllPropertiesWithPrefix() { conf.addProperty(ClientProperty.TRACE_SPAN_RECEIVER_PREFIX.getKey() + "second", "2nd"); conf.addProperty("other", "value"); - Map props = conf.getAllPropertiesWithPrefix(ClientProperty.TRACE_SPAN_RECEIVER_PREFIX); + Map props = conf.getAllPropertiesWithPrefix(ClientProperty.TRACE_SPAN_RECEIVER_PREFIX); assertEquals(2, props.size()); assertEquals("1st", props.get(ClientProperty.TRACE_SPAN_RECEIVER_PREFIX.getKey() + "first")); assertEquals("2nd", props.get(ClientProperty.TRACE_SPAN_RECEIVER_PREFIX.getKey() + "second")); diff --git a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/BytesLexicoderTest.java b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/BytesLexicoderTest.java index 3f5b991791b..ee37950160a 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/BytesLexicoderTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/BytesLexicoderTest.java @@ -25,4 +25,4 @@ public void testDecodes() { assertDecodesB(lexicoder, new byte[0]); assertDecodesB(lexicoder, "accumulo".getBytes()); } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/DateLexicoderTest.java b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/DateLexicoderTest.java index 98d3ed11a5c..87db36dbabb 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/DateLexicoderTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/DateLexicoderTest.java @@ -27,4 +27,4 @@ public void testDecode() throws Exception { assertDecodes(new DateLexicoder(), new Date(0)); assertDecodes(new DateLexicoder(), new Date(Long.MAX_VALUE)); } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/StringLexicoderTest.java b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/StringLexicoderTest.java index 686ee462c99..388d21f8803 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/StringLexicoderTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/StringLexicoderTest.java @@ -25,4 +25,4 @@ public void testDecode() throws Exception { assertDecodes(new StringLexicoder(), "0"); assertDecodes(new StringLexicoder(), "accumulo"); } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/TextLexicoderTest.java b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/TextLexicoderTest.java index 74558de7e0c..4199b210319 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/TextLexicoderTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/TextLexicoderTest.java @@ -25,4 +25,4 @@ public void testDecode() throws Exception { assertDecodes(new TextLexicoder(), new Text("")); assertDecodes(new TextLexicoder(), new Text("accumulo")); } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java index 502d1329e49..5c0358828f7 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java @@ -20,8 +20,9 @@ import org.apache.commons.lang.ArrayUtils; /** - * Assists in Testing classes that extend {@link org.apache.accumulo.core.client.lexicoder.AbstractEncoder}. It - * references methods not formally defined in the {@link org.apache.accumulo.core.client.lexicoder.Lexicoder} interface. + * Assists in Testing classes that extend {@link org.apache.accumulo.core.client.lexicoder.AbstractEncoder}. It references methods not formally defined in the + * {@link org.apache.accumulo.core.client.lexicoder.Lexicoder} interface. + * * @since 1.7.0 */ public abstract class AbstractLexicoderTest extends LexicoderTest { @@ -91,4 +92,4 @@ protected static void assertOutOfBoundsFails(AbstractLexicoder lexicoder, fail("Should throw on negative length, even if (offset+len) is within bounds."); } catch (IllegalArgumentException e) {} } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/ByteUtilsTest.java b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/ByteUtilsTest.java index 1304797bad7..9c3e34300fd 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/ByteUtilsTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/ByteUtilsTest.java @@ -23,7 +23,7 @@ public class ByteUtilsTest { private final byte[] empty = new byte[0]; private final byte[] noSplits = "nosplits".getBytes(); - private final byte[] splitAt5 = ("1234" + (char)0x00 + "56789").getBytes(); + private final byte[] splitAt5 = ("1234" + (char) 0x00 + "56789").getBytes(); @Test public void testSplit() throws Exception { @@ -68,4 +68,4 @@ public void testSplitWithOffset() { Assert.assertEquals(1, result.length); Assert.assertArrayEquals("5678".getBytes(), result[0]); } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java b/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java index 77b989fa37e..0b70d293666 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java @@ -212,7 +212,7 @@ public void testRenameAndCloneTableToNewNamespace() throws Exception { // TODO implement clone in mock /* * c.tableOperations().clone(tableName1, tableName2, false, null, null); - * + * * assertTrue(c.tableOperations().exists(tableName1)); assertTrue(c.tableOperations().exists(tableName2)); */ return; diff --git a/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java b/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java index 0d075e4a5bf..b39757baa11 100644 --- a/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java +++ b/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java @@ -651,17 +651,17 @@ public void testThrift_Invalid() { new Mutation(tm1); } - /* The following two tests assert that no exception is thrown after calling - * hashCode or equals on a Mutation. These guard against the condition noted - * in ACCUMULO-3718. + /* + * The following two tests assert that no exception is thrown after calling hashCode or equals on a Mutation. These guard against the condition noted in + * ACCUMULO-3718. */ @Test public void testPutAfterHashCode() { Mutation m = new Mutation("r"); m.hashCode(); try { - m.put("cf", "cq", "v"); - } catch(IllegalStateException e) { + m.put("cf", "cq", "v"); + } catch (IllegalStateException e) { fail("Calling Mutation#hashCode then Mutation#put should not result in an IllegalStateException."); } } @@ -672,9 +672,9 @@ public void testPutAfterEquals() { Mutation m2 = new Mutation("r2"); m.equals(m2); try { - m.put("cf", "cq", "v"); - m2.put("cf", "cq", "v"); - } catch(IllegalStateException e) { + m.put("cf", "cq", "v"); + m2.put("cf", "cq", "v"); + } catch (IllegalStateException e) { fail("Calling Mutation#equals then Mutation#put should not result in an IllegalStateException."); } } diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/FirstEntryInRowIteratorTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/FirstEntryInRowIteratorTest.java index 88be983d5f5..74f74626f8f 100644 --- a/core/src/test/java/org/apache/accumulo/core/iterators/FirstEntryInRowIteratorTest.java +++ b/core/src/test/java/org/apache/accumulo/core/iterators/FirstEntryInRowIteratorTest.java @@ -67,8 +67,7 @@ public Authorizations getAuthorizations() { } @Override - public SortedKeyValueIterator reserveMapFileReader( - String arg0) throws IOException { + public SortedKeyValueIterator reserveMapFileReader(String arg0) throws IOException { return null; } }; diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/NumSummationTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/NumSummationTest.java index 796311e8d3e..5a56ead2069 100644 --- a/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/NumSummationTest.java +++ b/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/NumSummationTest.java @@ -25,6 +25,7 @@ import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** * @deprecated since 1.4 */ @@ -74,7 +75,7 @@ public void test2() { la = NumArraySummation.bytesToLongArray(nas.aggregate().get()); assertTrue(la.length == 0); } catch (Exception e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); assertTrue(false); } } diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/user/RowDeletingIteratorTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/user/RowDeletingIteratorTest.java index 6bd92ee1dc7..a3c1ccac50d 100644 --- a/core/src/test/java/org/apache/accumulo/core/iterators/user/RowDeletingIteratorTest.java +++ b/core/src/test/java/org/apache/accumulo/core/iterators/user/RowDeletingIteratorTest.java @@ -74,7 +74,7 @@ public void registerSideChannel(SortedKeyValueIterator iter) {} @Override public Authorizations getAuthorizations() { - return null; + return null; } } diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java index 873f886969b..4ac9db1f024 100644 --- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java +++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java @@ -61,4 +61,4 @@ public void receive(Key key, Value value) { count++; } -} \ No newline at end of file +} diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java index 3644590e441..d0898f03feb 100644 --- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java +++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java @@ -40,7 +40,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * A demonstration of reading entire rows and deleting entire rows. */ diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java index 9c1abd90e52..74c40a55b09 100644 --- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java +++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java @@ -34,7 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this. This version does not use the ClientOpts * class to parse arguments as an example of using AccumuloInputFormat and AccumuloOutputFormat directly. See README.mapred for more details. diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java index 0915e17b0c3..b9e1a83736c 100644 --- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java +++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java @@ -41,7 +41,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Accumulo Reservation System : An example reservation system using Accumulo. Supports atomic reservations of a resource at a date. Wait list are also * supported. In order to keep the example simple, no checking is done of the date. Also the code is inefficient, if interested in improving it take a look at diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java index e4887aa6abd..f089d429178 100644 --- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java +++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java @@ -40,7 +40,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class CountTest extends TestCase { private static final Logger log = LoggerFactory.getLogger(CountTest.class); diff --git a/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java b/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java index d93bf8c33a9..8532e92c01d 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java +++ b/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java @@ -111,7 +111,7 @@ public void print(ReadOnlyTStore zs, IZooReaderWriter zk, String lockPath, Fo tables.add(lda[0].charAt(0) + ":" + id); } catch (Exception e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); } pos++; } diff --git a/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java b/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java index 87ef5ab9fbe..d023c270d83 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java +++ b/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java @@ -27,7 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This store removes Repos, in the store it wraps, that are in a finished or new state for more than a configurable time period. * diff --git a/fate/src/main/java/org/apache/accumulo/fate/Fate.java b/fate/src/main/java/org/apache/accumulo/fate/Fate.java index 34d2206de3b..cf2ab73a1c6 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/Fate.java +++ b/fate/src/main/java/org/apache/accumulo/fate/Fate.java @@ -28,7 +28,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Fault tolerant executor * diff --git a/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java b/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java index c9a822454b7..6fa5ad005cf 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java +++ b/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java @@ -23,7 +23,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class AddressUtil { private static final Logger log = LoggerFactory.getLogger(AddressUtil.class); diff --git a/fate/src/main/java/org/apache/accumulo/fate/util/UtilWaitThread.java b/fate/src/main/java/org/apache/accumulo/fate/util/UtilWaitThread.java index e109e065ffc..de31fb0eebc 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/util/UtilWaitThread.java +++ b/fate/src/main/java/org/apache/accumulo/fate/util/UtilWaitThread.java @@ -19,7 +19,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class UtilWaitThread { private static final Logger log = LoggerFactory.getLogger(UtilWaitThread.class); diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java index 2c6446ddec9..624ce5dbabb 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java +++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java @@ -31,7 +31,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - // A ReadWriteLock that can be implemented in ZooKeeper. Features the ability to store data // with the lock, and recover the lock using that data to find the lock. public class DistributedReadWriteLock implements java.util.concurrent.locks.ReadWriteLock { diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java index f5a57a6f461..e84b1af325b 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java +++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java @@ -19,7 +19,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Encapsulates the retrying implementation for some operation. Provides bounded retry attempts with a bounded, linear backoff. */ diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java index 2fcb7a9df1d..dda7db9d31d 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java +++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java @@ -24,7 +24,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class TransactionWatcher { public interface Arbitrator { diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java index 5a4fb117339..ec92a7f3054 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java +++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java @@ -259,7 +259,7 @@ public void run(ZooKeeper zooKeeper) throws KeeperException, InterruptedExceptio /* * The following call to exists() is important, since we are caching that a node does not exist. Once the node comes into existence, it will be added to * the cache. But this notification of a node coming into existence will only be given if exists() was previously called. - * + * * If the call to exists() is bypassed and only getData() is called with a special case that looks for Code.NONODE in the KeeperException, then * non-existence can not be cached. */ diff --git a/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java b/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java index 6c7c1d61e78..7933ff11e8f 100644 --- a/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java +++ b/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java @@ -23,7 +23,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Test the AddressUtil class. * diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java index 5a05431700d..e2dabe57060 100644 --- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java +++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java @@ -199,8 +199,7 @@ public ProxyServer(Properties props) { } else { clientConf = ClientConfiguration.loadDefault(); } - instance = new ZooKeeperInstance(clientConf.withInstance(props.getProperty("instance")) - .withZkHosts(props.getProperty("zookeepers"))); + instance = new ZooKeeperInstance(clientConf.withInstance(props.getProperty("instance")).withZkHosts(props.getProperty("zookeepers"))); } try { diff --git a/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java b/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java index c87c7e175ee..e97b99b78e3 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java +++ b/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java @@ -19,9 +19,7 @@ import org.apache.accumulo.core.data.impl.KeyExtent; public enum TabletLevel { - ROOT, - META, - NORMAL; + ROOT, META, NORMAL; public static TabletLevel getLevel(KeyExtent extent) { if (!extent.isMeta()) diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java index 00e0346c075..588e3e00649 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java +++ b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java @@ -134,7 +134,7 @@ public boolean authenticate(TInfo tinfo, TCredentials credentials) throws Thrift try { return security.authenticateUser(credentials, credentials); } catch (ThriftSecurityException e) { - log.error("ThriftSecurityException",e); + log.error("ThriftSecurityException", e); throw e; } } @@ -144,7 +144,7 @@ public boolean authenticateUser(TInfo tinfo, TCredentials credentials, TCredenti try { return security.authenticateUser(credentials, toAuth); } catch (ThriftSecurityException e) { - log.error("ThriftSecurityException",e); + log.error("ThriftSecurityException", e); throw e; } } diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java index c154bd00c3d..600349bb6e2 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java +++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java @@ -124,7 +124,7 @@ public void setFutureLocations(Collection assignments) throws Distri } @Override - public void unassign(Collection tablets, Map> logsForDeadServers) throws DistributedStoreException { + public void unassign(Collection tablets, Map> logsForDeadServers) throws DistributedStoreException { BatchWriter writer = createBatchWriter(); try { diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java index acc10d850f6..147e0714d0c 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java +++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java @@ -62,9 +62,10 @@ public abstract class TabletStateStore implements Iterable * @param logsForDeadServers * a cache of logs in use by servers when they died */ - abstract public void unassign(Collection tablets, Map> logsForDeadServers) throws DistributedStoreException; + abstract public void unassign(Collection tablets, Map> logsForDeadServers) throws DistributedStoreException; - public static void unassign(AccumuloServerContext context, TabletLocationState tls, Map> logsForDeadServers) throws DistributedStoreException { + public static void unassign(AccumuloServerContext context, TabletLocationState tls, Map> logsForDeadServers) + throws DistributedStoreException { TabletStateStore store; if (tls.extent.isRootTablet()) { store = new ZooTabletStateStore(); @@ -91,6 +92,6 @@ public static void setLocation(AccumuloServerContext context, Assignment assignm /** * When a server fails, its logs must be marked as unused after the log markers are moved to the tablets. */ - abstract public void markLogsAsUnused(AccumuloServerContext context, Map> logs) throws DistributedStoreException; + abstract public void markLogsAsUnused(AccumuloServerContext context, Map> logs) throws DistributedStoreException; } diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java index bce20fd5a65..03627e34e50 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java +++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java @@ -165,7 +165,7 @@ public void setLocations(Collection assignments) throws DistributedS } @Override - public void unassign(Collection tablets, Map> logsForDeadServers) throws DistributedStoreException { + public void unassign(Collection tablets, Map> logsForDeadServers) throws DistributedStoreException { if (tablets.size() != 1) throw new IllegalArgumentException("There is only one root tablet"); TabletLocationState tls = tablets.iterator().next(); @@ -183,7 +183,8 @@ public void unassign(Collection tablets, Map { /* * use a thread pool so that reporting a problem never blocks - * + * * make the thread pool use a bounded queue to avoid the case where problem reports are not being processed because the whole system is in a really bad state * (like HDFS is down) and everything is reporting lots of problems, but problem reports can not be processed */ diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java index 3582cfd74c6..ad36d3ed64b 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java +++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java @@ -52,8 +52,7 @@ public class AuthenticationTokenKeyManager extends Daemon { * @param tokenMaxLifetime * The lifetime, in milliseconds, of generated AuthenticationKeys (and subsequently delegation tokens). */ - public AuthenticationTokenKeyManager(AuthenticationTokenSecretManager mgr, ZooAuthenticationKeyDistributor dist, long keyUpdateInterval, - long tokenMaxLifetime) { + public AuthenticationTokenKeyManager(AuthenticationTokenSecretManager mgr, ZooAuthenticationKeyDistributor dist, long keyUpdateInterval, long tokenMaxLifetime) { super("Delegation Token Key Manager"); this.secretManager = mgr; this.keyDistributor = dist; diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyDistributor.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyDistributor.java index 515b03673f5..b327a2605a2 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyDistributor.java +++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyDistributor.java @@ -38,7 +38,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Class that manages distribution of {@link AuthenticationKey}s, Accumulo's secret in the delegation token model, to other Accumulo nodes via ZooKeeper. */ diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java index 0a30314a92b..da2948d2d14 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java @@ -70,7 +70,7 @@ public class MasterMetadataUtil { private static final Logger log = LoggerFactory.getLogger(MasterMetadataUtil.class); public static void addNewTablet(ClientContext context, KeyExtent extent, String path, TServerInstance location, Map datafileSizes, - Multimap bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) { + Multimap bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) { Mutation m = extent.getPrevRowUpdateMutation(); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(UTF_8))); @@ -89,7 +89,7 @@ public static void addNewTablet(ClientContext context, KeyExtent extent, String m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode())); } - for (Entry entry : bulkLoadedFiles.entries()) { + for (Entry entry : bulkLoadedFiles.entries()) { byte[] tidBytes = Long.toString(entry.getKey()).getBytes(); m.put(TabletsSection.BulkFileColumnFamily.NAME, entry.getValue().meta(), new Value(tidBytes)); } @@ -283,7 +283,6 @@ private static void updateRootTabletDataFile(KeyExtent extent, FileRef path, Fil } } - /** * Create an update that updates a tablet * @@ -317,7 +316,6 @@ private static Mutation getUpdateForTabletDataFile(KeyExtent extent, FileRef pat TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(Long.toString(flushId).getBytes(UTF_8))); - return m; } } diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java index 01520029e19..42df0b5e67e 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java @@ -926,9 +926,9 @@ public static List getBulkFilesLoaded(Connector conn, KeyExtent extent, } } - public static Multimap getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException { + public static Multimap getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException { Text metadataRow = extent.getMetadataEntry(); - Multimap ret = HashMultimap.create(); + Multimap ret = HashMultimap.create(); VolumeManager fs = VolumeManagerImpl.get(); Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY); @@ -1141,8 +1141,8 @@ public static void markLogUnused(ClientContext context, ZooLock lock, TServerIns } } - public static void fetchLogsForDeadServer(ClientContext context, ZooLock lock, KeyExtent extent, TServerInstance server, Map> logsForDeadServers) - throws TableNotFoundException, AccumuloException, AccumuloSecurityException { + public static void fetchLogsForDeadServer(ClientContext context, ZooLock lock, KeyExtent extent, TServerInstance server, + Map> logsForDeadServers) throws TableNotFoundException, AccumuloException, AccumuloSecurityException { // already cached if (logsForDeadServers.containsKey(server)) { return; diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java index a1fc9c5bbac..96ea9eb5af2 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java @@ -79,7 +79,7 @@ public TabletDeletedException(String msg) { /* * public TabletIterator(String table, boolean returnPrevEndRow){ - * + * * } */ diff --git a/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java b/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java index e7c71614834..7e9543f6204 100644 --- a/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java +++ b/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java @@ -116,7 +116,7 @@ public void registerSideChannel(SortedKeyValueIterator iter) {} @Override public Authorizations getAuthorizations() { - return null; + return null; } @Override diff --git a/server/base/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java b/server/base/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java index 1db7009ae91..827e7726eb4 100644 --- a/server/base/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java +++ b/server/base/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java @@ -83,7 +83,7 @@ public void testEncryption() { storedBytes = ZKSecurityTool.createPass(rawPass); assertTrue(ZKSecurityTool.checkPass(rawPass, storedBytes)); } catch (AccumuloException e) { - log.error("{}", e.getMessage(),e); + log.error("{}", e.getMessage(), e); assertTrue(false); } } diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GCExecutable.java b/server/gc/src/main/java/org/apache/accumulo/gc/GCExecutable.java index 0c7444441a3..b3d490ff250 100644 --- a/server/gc/src/main/java/org/apache/accumulo/gc/GCExecutable.java +++ b/server/gc/src/main/java/org/apache/accumulo/gc/GCExecutable.java @@ -33,4 +33,4 @@ public String keyword() { public void execute(final String[] args) throws IOException { SimpleGarbageCollector.main(args); } -} \ No newline at end of file +} diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java index a9a3f650e68..f896b1fd2c8 100644 --- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java +++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java @@ -119,15 +119,13 @@ public void collect(GCStatus status) { try { Set currentServers = liveServers.getCurrentServers(); - status.currentLog.started = System.currentTimeMillis(); - Map > candidates = new HashMap<>(); + Map> candidates = new HashMap<>(); long count = getCurrent(candidates, currentServers); long fileScanStop = System.currentTimeMillis(); - log.info(String.format("Fetched %d files for %d servers in %.2f seconds", count, candidates.size(), - (fileScanStop - status.currentLog.started) / 1000.)); + log.info(String.format("Fetched %d files for %d servers in %.2f seconds", count, candidates.size(), (fileScanStop - status.currentLog.started) / 1000.)); status.currentLog.candidates = count; span.stop(); @@ -171,7 +169,6 @@ public void collect(GCStatus status) { log.info(String.format("%d markers removed in %.2f seconds", count, (removeMarkersStop - removeStop) / 1000.)); span.stop(); - status.currentLog.finished = removeStop; status.lastLog = status.currentLog; status.currentLog = new GcCycleStats(); @@ -200,7 +197,7 @@ private long removeTabletServerMarkers(Map> candidates root.addMutation(m); meta.addMutation(m); } - } finally { + } finally { if (meta != null) { meta.close(); } @@ -214,7 +211,7 @@ private long removeTabletServerMarkers(Map> candidates return result; } - private long removeFiles(Map > candidates, final GCStatus status) { + private long removeFiles(Map> candidates, final GCStatus status) { for (Entry> entry : candidates.entrySet()) { for (Path path : entry.getValue()) { log.debug("Removing unused WAL for server " + entry.getKey() + " log " + path); @@ -236,12 +233,12 @@ private UUID path2uuid(Path path) { return UUID.fromString(path.getName()); } - private long removeEntriesInUse(Map > candidates, GCStatus status, Set liveServers) throws IOException, KeeperException, - InterruptedException { + private long removeEntriesInUse(Map> candidates, GCStatus status, Set liveServers) throws IOException, + KeeperException, InterruptedException { // remove any entries if there's a log reference, or a tablet is still assigned to the dead server - Map walToDeadServer = new HashMap<>(); + Map walToDeadServer = new HashMap<>(); for (Entry> entry : candidates.entrySet()) { for (Path file : entry.getValue()) { walToDeadServer.put(path2uuid(file), entry.getKey()); @@ -276,8 +273,7 @@ private long removeEntriesInUse(Map > candidates, GCS return count; } - protected int removeReplicationEntries(Map > candidates, GCStatus status) throws IOException, KeeperException, - InterruptedException { + protected int removeReplicationEntries(Map> candidates, GCStatus status) throws IOException, KeeperException, InterruptedException { Connector conn; try { conn = context.getConnector(); @@ -314,7 +310,6 @@ protected int removeReplicationEntries(Map > candidat return count; } - /** * Determine if the given WAL is needed for replication * @@ -375,9 +370,6 @@ protected Iterable> getReplicationStatusForFile(Connector conn, return metaScanner; } - - - /** * Scans log markers. The map passed in is populated with the logs for dead servers. * @@ -385,7 +377,7 @@ protected Iterable> getReplicationStatusForFile(Connector conn, * map of dead server to log file entries * @return total number of log files */ - private long getCurrent(Map > unusedLogs, Set currentServers) throws Exception { + private long getCurrent(Map> unusedLogs, Set currentServers) throws Exception { Set rootWALs = new HashSet<>(); // Get entries in zookeeper: String zpath = ZooUtil.getRoot(context.getInstance()) + RootTable.ZROOT_TABLET_WALOGS; @@ -425,7 +417,7 @@ private long getCurrent(Map > unusedLogs, Set iter = volume.getFileSystem().listFiles(volume.prefixChild(ServerConstants.WAL_DIR), true); + RemoteIterator iter = volume.getFileSystem().listFiles(volume.prefixChild(ServerConstants.WAL_DIR), true); while (iter.hasNext()) { LocatedFileStatus next = iter.next(); // recursive listing returns directories, too diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java index 455aaeeaaf5..68ea62dc281 100644 --- a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java +++ b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java @@ -117,11 +117,11 @@ public void run() { /* * ACCUMULO-3320 WALs cannot be closed while a TabletServer may still use it later. - * + * * In addition to the WALs that are actively referenced in the metadata table, tservers can also hold on to a WAL that is not presently referenced by any * tablet. For example, a tablet could MinC which would end in all logs for that tablet being removed. However, if more data was ingested into the table, * the same WAL could be re-used again by that tserver. - * + * * If this code happened to run after the compaction but before the log is again referenced by a tabletserver, we might delete the WAL reference, only to * have it recreated again which causes havoc with the replication status for a table. */ diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java index 20917a46a31..2b874f6264a 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java +++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java @@ -163,7 +163,7 @@ public void run() { List assigned = new ArrayList(); List assignedToDeadServers = new ArrayList(); Map unassigned = new HashMap(); - Map> logsForDeadServers = new TreeMap<>(); + Map> logsForDeadServers = new TreeMap<>(); MasterState masterState = master.getMasterState(); int[] counts = new int[TabletState.values().length]; @@ -738,14 +738,9 @@ private KeyExtent getHighTablet(KeyExtent range) throws AccumuloException { } } - private void flushChanges( - SortedMap currentTServers, - List assignments, - List assigned, - List assignedToDeadServers, - Map> logsForDeadServers, - Map unassigned) - throws DistributedStoreException, TException { + private void flushChanges(SortedMap currentTServers, List assignments, List assigned, + List assignedToDeadServers, Map> logsForDeadServers, Map unassigned) + throws DistributedStoreException, TException { if (!assignedToDeadServers.isEmpty()) { int maxServersToShow = min(assignedToDeadServers.size(), 100); Master.log.debug(assignedToDeadServers.size() + " assigned to dead servers: " + assignedToDeadServers.subList(0, maxServersToShow) + "..."); diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java index 3e1aa335580..50bf19dfcf8 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java @@ -50,4 +50,4 @@ public Repo call(long tid, Master master) throws Exception { public void undo(long tid, Master master) throws Exception { } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java index f6961986b74..4ee13110c97 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java @@ -71,7 +71,7 @@ private void readObject(java.io.ObjectInputStream in) throws IOException, ClassN /* * handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine - * + * * if the new machine has time in the future, that will work ok w/ hasCycled */ if (System.currentTimeMillis() < creationTime) { @@ -284,4 +284,4 @@ public void undo(long tid, Master environment) throws Exception { // nothing to do } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java index 85f9a8cfdc1..5ca325fe37f 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java @@ -61,4 +61,4 @@ public Repo call(long tid, Master master) throws Exception { ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid); return null; } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java index 335d65d0819..96ff9546143 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java @@ -33,4 +33,4 @@ class CloneInfo implements Serializable { Set propertiesToExclude; public String user; -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java index 045f6b11388..911080942a7 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java @@ -51,4 +51,4 @@ public void undo(long tid, Master environment) throws Exception { MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock()); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java index 072f5dec872..d9842b2dc08 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java @@ -73,4 +73,4 @@ public void undo(long tid, Master environment) throws Exception { Tables.clearCache(environment.getInstance()); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java index e3d08200c39..0db93c1f6fa 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java @@ -185,4 +185,4 @@ public void undo(long tid, Master environment) throws Exception { } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java index 8905c80fea8..3fe6d6cd5fc 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java @@ -42,4 +42,4 @@ public Repo call(long tid, Master master) throws Exception { ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid); return new CopyFailed(tableId, source, bulk, error); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java index 6221624243b..695f9be86f8 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java @@ -48,4 +48,4 @@ public void undo(long tid, Master master) throws Exception { fs.deleteRecursively(new Path(tableInfo.dir)); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java index 4f0e7f80787..69dcc49cf87 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java @@ -58,4 +58,4 @@ public Repo call(long tid, Master master) throws Exception { return new MapImportFileNames(tableInfo); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java index d8f276a1eee..3e8648883bc 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java @@ -26,4 +26,4 @@ class ExportInfo implements Serializable { public String tableID; public String exportDir; public String namespaceID; -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java index a502a3d3ecc..45fc8dfd07e 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java @@ -37,4 +37,4 @@ public Repo call(long tid, Master environment) throws Exception { public void undo(long tid, Master environment) throws Exception { } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java index 7c3701b8ed7..18eeae94496 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java @@ -61,4 +61,4 @@ public Repo call(long tid, Master environment) throws Exception { @Override public void undo(long tid, Master environment) throws Exception {} -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java index 93cc194d903..fc47c1e85f8 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java @@ -55,4 +55,4 @@ public String getReturn() { @Override public void undo(long tid, Master env) throws Exception {} -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java index 2343efb2938..fd7389a2c99 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java @@ -59,4 +59,4 @@ public String getReturn() { @Override public void undo(long tid, Master env) throws Exception {} -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java index 7dd76b1f165..e27b768867e 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java @@ -65,4 +65,4 @@ public String getReturn() { @Override public void undo(long tid, Master env) throws Exception {} -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java index f436fd35771..71e9124959e 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java @@ -101,4 +101,4 @@ public void undo(long tid, Master env) throws Exception { Utils.unreserveTable(tableInfo.tableId, tid, true); Tables.clearCache(instance); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java index 00fade9100b..cef89291b16 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java @@ -62,4 +62,4 @@ public Repo call(long tid, Master env) throws Exception { public void undo(long tid, Master env) throws Exception { AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java index 34bb6c8eb2b..7c09eb19a1c 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java @@ -28,4 +28,4 @@ class ImportedTableInfo implements Serializable { public String tableId; public String importDir; public String namespaceId; -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java index ef2becd0ae4..aadb24fd9ef 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java @@ -28,4 +28,4 @@ class NamespaceInfo implements Serializable { String user; public Map props; -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java index da13ecc382f..45a370dce05 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java @@ -51,4 +51,4 @@ public void undo(long tid, Master environment) throws Exception { MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock()); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java index 8ec8834535d..94a868ca5d0 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java @@ -74,4 +74,4 @@ public void undo(long tid, Master master) throws Exception { Tables.clearCache(instance); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java index bf101ae6885..6d0a215b674 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java @@ -71,4 +71,4 @@ public void undo(long tid, Master master) throws Exception { Utils.unreserveNamespace(namespaceInfo.namespaceId, tid, true); } -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java index e2057d1a4a8..ab418aeede1 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java @@ -32,4 +32,4 @@ class TableInfo implements Serializable { public Map props; public String dir = null; -} \ No newline at end of file +} diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java index ca31d488966..a4929579ea5 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java @@ -265,4 +265,4 @@ private static void exportConfig(AccumuloServerContext context, String tableID, osw.flush(); } -} \ No newline at end of file +} diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java index 252334dd137..0ba13c7d156 100644 --- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java +++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java @@ -109,7 +109,8 @@ public long getTimeout(TimeUnit timeUnit) { @Override public void close() {} - @Override public Authorizations getAuthorizations() { + @Override + public Authorizations getAuthorizations() { return null; } diff --git a/server/monitor/src/test/java/org/apache/accumulo/monitor/ShowTraceLinkTypeTest.java b/server/monitor/src/test/java/org/apache/accumulo/monitor/ShowTraceLinkTypeTest.java index e7e5a16589a..786e8e3a676 100644 --- a/server/monitor/src/test/java/org/apache/accumulo/monitor/ShowTraceLinkTypeTest.java +++ b/server/monitor/src/test/java/org/apache/accumulo/monitor/ShowTraceLinkTypeTest.java @@ -26,8 +26,7 @@ public class ShowTraceLinkTypeTest { private static RemoteSpan rs(long start, long stop, String description) { - return new RemoteSpan("sender", "svc", 0l, 0l, 0l, start, stop, description, Collections. emptyMap(), - Collections. emptyList()); + return new RemoteSpan("sender", "svc", 0l, 0l, 0l, start, stop, description, Collections. emptyMap(), Collections. emptyList()); } @Test diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java index 0ade243bb55..9b8705abf00 100644 --- a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java +++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java @@ -171,8 +171,8 @@ public void receiveSpan(Span s) { if (sendQueue.size() > maxQueueSize) { long now = System.currentTimeMillis(); if (now - lastNotificationOfDroppedSpans > 60 * 1000) { - log.warn("Tracing spans are being dropped because there are already " + maxQueueSize + " spans queued for delivery.\n" + - "This does not affect performance, security or data integrity, but distributed tracing information is being lost."); + log.warn("Tracing spans are being dropped because there are already " + maxQueueSize + " spans queued for delivery.\n" + + "This does not affect performance, security or data integrity, but distributed tracing information is being lost."); lastNotificationOfDroppedSpans = now; } return; diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java index 63c806f1002..9def071c257 100644 --- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java +++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java @@ -59,13 +59,8 @@ public SpanTypeCount() { @Override public String toString() { - return "{" + - "type='" + type + '\'' + - ", nonzeroCount=" + nonzeroCount + - ", zeroCount=" + zeroCount + - ", numTraces=" + traceIds.size() + - ", log10SpanLength=" + log10SpanLength + - '}'; + return "{" + "type='" + type + '\'' + ", nonzeroCount=" + nonzeroCount + ", zeroCount=" + zeroCount + ", numTraces=" + traceIds.size() + + ", log10SpanLength=" + log10SpanLength + '}'; } } @@ -80,7 +75,7 @@ public void count(Opts opts) throws AccumuloSecurityException, AccumuloException Connector conn = opts.getConnector(); Scanner scanner = conn.createScanner(opts.getTableName(), Authorizations.EMPTY); scanner.setRange(new Range(null, true, "idx:", false)); - Map counts = new TreeMap<>(); + Map counts = new TreeMap<>(); final SpanTypeCount hdfs = new SpanTypeCount(); hdfs.type = "HDFS"; final SpanTypeCount accumulo = new SpanTypeCount(); @@ -91,14 +86,13 @@ public void count(Opts opts) throws AccumuloSecurityException, AccumuloException for (Entry entry : scanner) { numSpans++; RemoteSpan span = TraceFormatter.getRemoteSpan(entry); - String id = span.getSvc()+":"+span.getDescription().replaceAll("[0-9][0-9][0-9]+", ""); + String id = span.getSvc() + ":" + span.getDescription().replaceAll("[0-9][0-9][0-9]+", ""); SpanTypeCount stc = counts.get(id); if (stc == null) { stc = new SpanTypeCount(); counts.put(id, stc); if (span.description.startsWith("org.apache.hadoop") || span.svc.equals("NameNode") || span.svc.equals("DataNode") - || span.description.contains("DFSOutputStream") || span.description.contains("DFSInputStream") - || span.description.contains("BlockReader")) { + || span.description.contains("DFSOutputStream") || span.description.contains("DFSInputStream") || span.description.contains("BlockReader")) { stc.type = hdfs.type; } else { stc.type = accumulo.type; @@ -110,18 +104,18 @@ public void count(Opts opts) throws AccumuloSecurityException, AccumuloException } else { increment(accumulo, span); } - maxSpanLength = Math.max(maxSpanLength, Math.log10(span.stop-span.start)); - maxSpanLengthMS = Math.max(maxSpanLengthMS, span.stop-span.start); + maxSpanLength = Math.max(maxSpanLength, Math.log10(span.stop - span.start)); + maxSpanLengthMS = Math.max(maxSpanLengthMS, span.stop - span.start); } System.out.println(); - System.out.println("log10 max span length "+maxSpanLength+" "+maxSpanLengthMS); - System.out.println("Total spans "+numSpans); + System.out.println("log10 max span length " + maxSpanLength + " " + maxSpanLengthMS); + System.out.println("Total spans " + numSpans); System.out.println("Percentage Accumulo nonzero of total " + accumulo.nonzeroCount + "/" + numSpans + " = " + (accumulo.nonzeroCount * 1.0 / numSpans)); System.out.println(hdfs + ", total " + (hdfs.nonzeroCount + hdfs.zeroCount)); - System.out.println(accumulo+ ", total " + (accumulo.nonzeroCount + accumulo.zeroCount)); + System.out.println(accumulo + ", total " + (accumulo.nonzeroCount + accumulo.zeroCount)); System.out.println(); System.out.println("source:desc={stats}"); - for (Entry c : counts.entrySet()) { + for (Entry c : counts.entrySet()) { System.out.println(c); } } @@ -147,7 +141,7 @@ else if (ms <= 100000) else if (ms <= 1000000) incrementIndex(stc.log10SpanLength, 6); else - throw new IllegalArgumentException("unexpected span length "+ms); + throw new IllegalArgumentException("unexpected span length " + ms); } } diff --git a/server/tserver/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java b/server/tserver/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java index 5fe254861bd..389a544f33c 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java +++ b/server/tserver/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java @@ -35,8 +35,7 @@ public class GarbageCollectionLogger { private long gcTimeIncreasedCount = 0; private static long lastMemoryCheckTime = 0; - public GarbageCollectionLogger() { - } + public GarbageCollectionLogger() {} public synchronized void logGCInfo(AccumuloConfiguration conf) { final long now = System.currentTimeMillis(); diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java index 6c8b0f3f4f8..739b92301c6 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java @@ -41,4 +41,4 @@ else if (k2 instanceof MemKey) return cmp; } -} \ No newline at end of file +} diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java index 891a0bab934..00c8be94cf8 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java @@ -93,4 +93,4 @@ public void setInterruptFlag(AtomicBoolean flag) { ((InterruptibleIterator) getSource()).setInterruptFlag(flag); } -} \ No newline at end of file +} diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java index 8e2f1130d10..5d0733b077d 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java @@ -51,4 +51,4 @@ public void setInterruptFlag(AtomicBoolean flag) { ((InterruptibleIterator) getSource()).setInterruptFlag(flag); } -} \ No newline at end of file +} diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java index 532405df782..6c5b63d85bc 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java @@ -43,7 +43,7 @@ public class TabletIteratorEnvironment implements IteratorEnvironment { private final ArrayList> topLevelIterators = new ArrayList>(); private Map files; - private final Authorizations authorizations; // these will only be supplied during scan scope + private final Authorizations authorizations; // these will only be supplied during scan scope public TabletIteratorEnvironment(IteratorScope scope, AccumuloConfiguration config) { if (scope == IteratorScope.majc) diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java index 96f33aa4d9b..7d9bf5edf16 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java @@ -1441,7 +1441,6 @@ public void run() { } } - @Override public void loadTablet(TInfo tinfo, TCredentials credentials, String lock, final TKeyExtent textent) { @@ -1726,8 +1725,7 @@ public List getActiveLogs(TInfo tinfo, TCredentials credentials) throws @Override public void removeLogs(TInfo tinfo, TCredentials credentials, List filenames) throws TException { log.warn("Garbage collector is attempting to remove logs through the tablet server"); - log.warn("This is probably because your file Garbage Collector is an older version than your tablet servers.\n" + - "Restart your file Garbage Collector."); + log.warn("This is probably because your file Garbage Collector is an older version than your tablet servers.\n" + "Restart your file Garbage Collector."); } } @@ -2111,14 +2109,14 @@ public void run() { /* * @formatter:off If a minor compaction starts after a tablet opens, this indicates a log recovery occurred. This recovered data must be minor * compacted. - * + * * There are three reasons to wait for this minor compaction to finish before placing the tablet in online tablets. - * + * * 1) The log recovery code does not handle data written to the tablet on multiple tablet servers. 2) The log recovery code does not block if memory is * full. Therefore recovering lots of tablets that use a lot of memory could run out of memory. 3) The minor compaction finish event did not make it to * the logs (the file will be in metadata, preventing replay of compacted data)... but do not want a majc to wipe the file out from metadata and then * have another process failure... this could cause duplicate data to replay. - * + * * @formatter:on */ if (tablet.getNumEntriesInMemory() > 0 && !tablet.minorCompactNow(MinorCompactionReason.RECOVERY)) { @@ -3004,7 +3002,7 @@ public SecurityOperation getSecurityOperation() { } // avoid unnecessary redundant markings to meta - final ConcurrentHashMap> metadataTableLogs = new ConcurrentHashMap<>(); + final ConcurrentHashMap> metadataTableLogs = new ConcurrentHashMap<>(); final Object levelLocks[] = new Object[TabletLevel.values().length]; { for (int i = 0; i < levelLocks.length; i++) { @@ -3012,7 +3010,6 @@ public SecurityOperation getSecurityOperation() { } } - // remove any meta entries after a rolled log is no longer referenced Set closedLogs = new HashSet<>(); diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java index 27101774a1f..0447abe5db9 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java @@ -27,7 +27,7 @@ public class EverythingCompactionStrategy extends CompactionStrategy { @Override public boolean shouldCompact(MajorCompactionRequest request) throws IOException { - return true; // ACCUMULO-3645 compact for empty files too + return true; // ACCUMULO-3645 compact for empty files too } @Override diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java index a2ab551d1b6..3fb3c86ec79 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java @@ -210,10 +210,10 @@ synchronized private void createLogger() throws IOException { startLogMaker(); Object next = nextLog.take(); if (next instanceof Exception) { - throw (Exception)next; + throw (Exception) next; } if (next instanceof DfsLogger) { - currentLog = (DfsLogger)next; + currentLog = (DfsLogger) next; logId.incrementAndGet(); log.info("Using next log " + currentLog.getFileName()); return; diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java index 0eb7d85508a..490ecd36961 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java @@ -105,7 +105,7 @@ public CompactionStats call() { return ret; } catch (IOException e) { - log.warn("MinC failed ({}) to create {} retrying ...", e.getMessage(),getOutputFile()); + log.warn("MinC failed ({}) to create {} retrying ...", e.getMessage(), getOutputFile()); ProblemReports.getInstance(tabletServer).report(new ProblemReport(getExtent().getTableId().toString(), ProblemType.FILE_WRITE, getOutputFile(), e)); reportedProblem = true; } catch (RuntimeException e) { diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/SplitInfo.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/SplitInfo.java index d866d991004..f2111c71617 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/SplitInfo.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/SplitInfo.java @@ -41,9 +41,10 @@ final public class SplitInfo { private final long initFlushID; private final long initCompactID; private final TServerInstance lastLocation; - private final Multimap bulkImported; + private final Multimap bulkImported; - SplitInfo(String d, SortedMap dfv, String time, long initFlushID, long initCompactID, TServerInstance lastLocation, Multimap bulkImported) { + SplitInfo(String d, SortedMap dfv, String time, long initFlushID, long initCompactID, TServerInstance lastLocation, + Multimap bulkImported) { this.dir = d; this.datafiles = dfv; this.time = time; @@ -77,7 +78,7 @@ public TServerInstance getLastLocation() { return lastLocation; } - public Multimap getBulkImported() { + public Multimap getBulkImported() { return bulkImported; } diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java index 0b2d5e3add9..5de32363c14 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java @@ -253,8 +253,7 @@ static enum CompactionState { private final ConfigurationObserver configObserver; - private final Cache > bulkImported = CacheBuilder.newBuilder().expireAfterAccess(4, TimeUnit.HOURS).build(); - + private final Cache> bulkImported = CacheBuilder.newBuilder().expireAfterAccess(4, TimeUnit.HOURS).build(); private final int logId; @@ -324,7 +323,7 @@ public Tablet(TabletServer tabletServer, KeyExtent extent, TabletResourceManager } private Tablet(TabletServer tabletServer, Text location, KeyExtent extent, TabletResourceManager trm, SortedMap datafiles, - String time, long initFlushID, long initCompactID, TServerInstance lastLocation, Multimap bulkImported) throws IOException { + String time, long initFlushID, long initCompactID, TServerInstance lastLocation, Multimap bulkImported) throws IOException { this(tabletServer, extent, location, trm, NO_LOG_ENTRIES, datafiles, time, lastLocation, new HashSet(), initFlushID, initCompactID, bulkImported); } @@ -447,8 +446,8 @@ private static TServerInstance lookupLastServer(SortedMap tabletsKeyV return null; } - private static Multimap lookupBulkImported(SortedMap tabletsKeyValues, VolumeManager fs) { - Multimap result = HashMultimap.create(); + private static Multimap lookupBulkImported(SortedMap tabletsKeyValues, VolumeManager fs) { + Multimap result = HashMultimap.create(); for (Entry entry : tabletsKeyValues.entrySet()) { if (entry.getKey().getColumnFamily().compareTo(BulkFileColumnFamily.NAME) == 0) { result.put(Long.decode(entry.getValue().toString()), new FileRef(fs, entry.getKey())); @@ -459,33 +458,18 @@ private static Multimap lookupBulkImported(SortedMap t public Tablet(TabletServer tabletServer, KeyExtent extent, Text location, TabletResourceManager trm, SortedMap tabletsKeyValues) throws IOException { - this(tabletServer, extent, location, trm, - lookupLogEntries(tabletsKeyValues, tabletServer, extent), - lookupDatafiles(tabletServer, tabletServer.getFileSystem(), extent, tabletsKeyValues), - lookupTime(tabletServer.getConfiguration(), extent, tabletsKeyValues), - lookupLastServer(tabletsKeyValues), - lookupScanFiles(tabletsKeyValues, tabletServer.getFileSystem()), - lookupFlushID(tabletsKeyValues), - lookupCompactID(tabletsKeyValues), - lookupBulkImported(tabletsKeyValues, tabletServer.getFileSystem())); + this(tabletServer, extent, location, trm, lookupLogEntries(tabletsKeyValues, tabletServer, extent), lookupDatafiles(tabletServer, + tabletServer.getFileSystem(), extent, tabletsKeyValues), lookupTime(tabletServer.getConfiguration(), extent, tabletsKeyValues), + lookupLastServer(tabletsKeyValues), lookupScanFiles(tabletsKeyValues, tabletServer.getFileSystem()), lookupFlushID(tabletsKeyValues), + lookupCompactID(tabletsKeyValues), lookupBulkImported(tabletsKeyValues, tabletServer.getFileSystem())); } /** * yet another constructor - this one allows us to avoid costly lookups into the Metadata table if we already know the files we need - as at split time */ - private Tablet( - final TabletServer tabletServer, - final KeyExtent extent, - final Text location, - final TabletResourceManager trm, - final List rawLogEntries, - final SortedMap rawDatafiles, - String time, - final TServerInstance lastLocation, - final Set scanFiles, - final long initFlushID, - final long initCompactID, - final Multimap bulkImported) throws IOException { + private Tablet(final TabletServer tabletServer, final KeyExtent extent, final Text location, final TabletResourceManager trm, + final List rawLogEntries, final SortedMap rawDatafiles, String time, final TServerInstance lastLocation, + final Set scanFiles, final long initFlushID, final long initCompactID, final Multimap bulkImported) throws IOException { TableConfiguration tblConf = tabletServer.getTableConfiguration(extent); if (null == tblConf) { @@ -675,8 +659,8 @@ public void receive(Mutation m) { currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.filename, logEntry.getColumnQualifier().toString())); } - log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + entriesUsedOnTablet.get() + " mutations applied, " + getTabletMemory().getNumEntries() - + " entries created)"); + log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + entriesUsedOnTablet.get() + " mutations applied, " + + getTabletMemory().getNumEntries() + " entries created)"); } String contextName = tableConfiguration.get(Property.TABLE_CLASSPATH); @@ -2295,7 +2279,7 @@ public TreeMap split(byte[] sp) throws IOException { // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load // finishes.... therefore split could propagate load flags for a finished bulk load... there is a special iterator // on the metadata table to clean up this type of garbage - Multimap bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(getTabletServer(), extent); + Multimap bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(getTabletServer(), extent); MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, getTabletServer(), getTabletServer().getLock()); MasterMetadataUtil.addNewTablet(getTabletServer(), low, lowDirectory, getTabletServer().getTabletSession(), lowDatafileSizes, bulkLoadedFiles, time, diff --git a/shell/src/main/java/org/apache/accumulo/shell/ShellOptions.java b/shell/src/main/java/org/apache/accumulo/shell/ShellOptions.java index f7f30176424..5a60420c42a 100644 --- a/shell/src/main/java/org/apache/accumulo/shell/ShellOptions.java +++ b/shell/src/main/java/org/apache/accumulo/shell/ShellOptions.java @@ -16,7 +16,6 @@ */ package org.apache.accumulo.shell; - /** * Abstract class to encompass the Options available on the Accumulo Shell */ diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java index df4437dceb4..8d66ac5e68d 100644 --- a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java +++ b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java @@ -102,8 +102,8 @@ public class AccumuloClassLoader { } /** - * Parses and XML Document for a property node for a <name> with the value propertyName if it finds one the function return that property's value for its - * <value> node. If not found the function will return null + * Parses and XML Document for a property node for a <name> with the value propertyName if it finds one the function return that property's value for + * its <value> node. If not found the function will return null * * @param d * XMLDocument to search through diff --git a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java index fb3c8a0840d..63630358791 100644 --- a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java +++ b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java @@ -16,7 +16,6 @@ */ package org.apache.accumulo.test; - public class EstimateInMemMapOverhead { private static void runTest(int numEntries, int keyLen, int colFamLen, int colQualLen, int colVisLen, int dataLen) { diff --git a/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java index f325524ee49..fb0050ff0c0 100644 --- a/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java +++ b/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java @@ -99,4 +99,4 @@ int getNumPasses() { String getName() { return "IMM " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen; } -} \ No newline at end of file +} diff --git a/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java index d83421a9871..7c135e729d9 100644 --- a/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java +++ b/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java @@ -62,4 +62,4 @@ int getNumPasses() { return passes; } -} \ No newline at end of file +} diff --git a/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java index 39e8d68f356..c5a417a49db 100644 --- a/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java +++ b/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java @@ -61,4 +61,4 @@ public void run() { } -} \ No newline at end of file +} diff --git a/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java index 011fbfe2d5e..6ded7db93ad 100644 --- a/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java +++ b/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java @@ -95,4 +95,4 @@ int getNumPasses() { String getName() { return "Mutation " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen; } -} \ No newline at end of file +} diff --git a/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java index 14b818412f9..b9bc37ac476 100644 --- a/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java +++ b/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java @@ -79,4 +79,4 @@ String getName() { return "Text " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen; } -} \ No newline at end of file +} diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java index c8a1143706d..1ea69a11eee 100644 --- a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java +++ b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java @@ -84,10 +84,10 @@ public static void main(String[] args) throws Exception { /* * private static void runSequentialScan(Scanner scanner, List ranges) { Set srowsSeen = new HashSet(); long st1 = * System.currentTimeMillis(); int scount = 0; for (Range range : ranges) { scanner.setRange(range); - * + * * for (Entry entry : scanner) { srowsSeen.add(entry.getKey().getRow()); scount++; } } - * - * + * + * * long st2 = System.currentTimeMillis(); System.out.println("SRQ "+(st2 - st1)+" "+srowsSeen.size() +" "+scount); } */ diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java b/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java index f53a6a600d0..cf23482d0f0 100644 --- a/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java +++ b/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java @@ -46,4 +46,4 @@ public boolean equals(Object obj) { public int compareTo(HistData o) { return ((Comparable) bin).compareTo(o.bin); } -} \ No newline at end of file +} diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/PrintScanTimeHistogram.java b/test/src/main/java/org/apache/accumulo/test/continuous/PrintScanTimeHistogram.java index 8c39a7f8ea1..a43c2ba2c8b 100644 --- a/test/src/main/java/org/apache/accumulo/test/continuous/PrintScanTimeHistogram.java +++ b/test/src/main/java/org/apache/accumulo/test/continuous/PrintScanTimeHistogram.java @@ -45,7 +45,7 @@ public static void main(String[] args) throws Exception { report = new StringBuilder(); report.append(String.format("%n *** Find start rows histogram *** %n")); fsrHist.print(report); - log.info("{}",report); + log.info("{}", report); } private static void processFile(InputStream ins, Histogram srqHist, Histogram fsrHist) throws FileNotFoundException, IOException { diff --git a/test/src/main/java/org/apache/accumulo/test/functional/AuthsIterator.java b/test/src/main/java/org/apache/accumulo/test/functional/AuthsIterator.java index 1bb82b6ef94..17151f56129 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/AuthsIterator.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/AuthsIterator.java @@ -43,7 +43,7 @@ public void init(SortedKeyValueIterator source, Map op @Override public Key getTopKey() { - if(env.getAuthorizations().equals(AUTHS)) + if (env.getAuthorizations().equals(AUTHS)) return new Key(new Text(SUCCESS)); else return new Key(new Text(FAIL)); diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/Environment.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/Environment.java index 616ec63c4ec..0c53b35ed84 100644 --- a/test/src/main/java/org/apache/accumulo/test/randomwalk/Environment.java +++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/Environment.java @@ -166,7 +166,7 @@ public AuthenticationToken getToken() { if (null != keytab) { File keytabFile = new File(keytab); if (!keytabFile.exists() || !keytabFile.isFile()) { - throw new IllegalArgumentException("Provided keytab is not a normal file: "+ keytab); + throw new IllegalArgumentException("Provided keytab is not a normal file: " + keytab); } try { return new KerberosToken(getUserName(), keytabFile, true); diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Replication.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Replication.java index a8db8e59f92..c1b2502a1a7 100644 --- a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Replication.java +++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Replication.java @@ -69,9 +69,8 @@ public void visit(State state, Environment env, Properties props) throws Excepti // Replicate to ourselves iOps.setProperty(REPLICATION_NAME.getKey(), instName); - iOps.setProperty(REPLICATION_PEERS.getKey() + instName, - getPeerConfigurationValue(AccumuloReplicaSystem.class, instName + "," + inst.getZooKeepers())); - iOps.setProperty(REPLICATION_PEER_USER.getKey() + instName , env.getUserName()); + iOps.setProperty(REPLICATION_PEERS.getKey() + instName, getPeerConfigurationValue(AccumuloReplicaSystem.class, instName + "," + inst.getZooKeepers())); + iOps.setProperty(REPLICATION_PEER_USER.getKey() + instName, env.getUserName()); iOps.setProperty(REPLICATION_PEER_PASSWORD.getKey() + instName, env.getPassword()); // Tweak some replication parameters to make the replication go faster iOps.setProperty(MASTER_REPLICATION_SCAN_INTERVAL.getKey(), "1s"); @@ -83,7 +82,8 @@ public void visit(State state, Environment env, Properties props) throws Excepti ReplicationTable.setOnline(c); boolean online = ReplicationTable.isOnline(c); for (int i = 0; i < 10; i++) { - if (online) break; + if (online) + break; UtilWaitThread.sleep(2000); online = ReplicationTable.isOnline(c); } @@ -92,7 +92,7 @@ public void visit(State state, Environment env, Properties props) throws Excepti // Make a source and destination table final String sourceTable = ("repl-source-" + UUID.randomUUID()).replace('-', '_'); final String destTable = ("repl-dest-" + UUID.randomUUID()).replace('-', '_'); - final String tables[] = new String[] { sourceTable, destTable }; + final String tables[] = new String[] {sourceTable, destTable}; for (String tableName : tables) { log.debug("creating " + tableName); @@ -182,8 +182,8 @@ private void assertEquals(int expected, int actual) { // junit isn't a dependency private void assertTrue(String string, boolean test) { - if (!test) - throw new RuntimeException(string); + if (!test) + throw new RuntimeException(string); } private static String itos(int i) { diff --git a/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java b/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java index df0ddf0d7b1..a0cca468aeb 100644 --- a/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java +++ b/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java @@ -129,7 +129,8 @@ public void setupCluster() throws Exception { case STANDALONE: StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf; ClientConfiguration clientConf = conf.getClientConf(); - StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers(), conf.getAccumuloServerUser()); + StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers(), + conf.getAccumuloServerUser()); // If these are provided in the configuration, pass them into the cluster standaloneCluster.setAccumuloHome(conf.getAccumuloHome()); standaloneCluster.setAccumuloConfDir(conf.getAccumuloConfDir()); diff --git a/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java b/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java index 67f51be1f7c..e221081b81a 100644 --- a/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java +++ b/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java @@ -118,7 +118,7 @@ public abstract class SimpleProxyBase extends SharedMiniClusterIT { @Override protected int defaultTimeoutSeconds() { - return 10*60; + return 10 * 60; } private static final long ZOOKEEPER_PROPAGATION_TIME = 10 * 1000; diff --git a/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java b/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java index 86468c53016..2d79dd8ab0f 100644 --- a/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java +++ b/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.accumulo.test; +package org.apache.accumulo.test; import java.util.SortedSet; import java.util.TreeSet; @@ -39,8 +39,7 @@ protected int defaultTimeoutSeconds() { } @Override - protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - } + protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {} @Test public void test() throws Exception { @@ -50,7 +49,8 @@ public void test() throws Exception { final Connector c = getConnector(); log.info("Creating table " + tableName); - c.tableOperations().create(tableName);; + c.tableOperations().create(tableName); + ; final SortedSet splits = new TreeSet<>(); for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) { splits.add(new Text(split)); diff --git a/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java b/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java index 0324e4af447..96ae5792def 100644 --- a/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java +++ b/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java @@ -78,4 +78,4 @@ private int countWALsInFS(MiniAccumuloClusterImpl cluster) throws Exception { } return result; } -} \ No newline at end of file +} diff --git a/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java b/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java index 0127e6e2b6e..ced9d6942da 100644 --- a/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java +++ b/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java @@ -305,8 +305,8 @@ public void testCompactEmptyTableWithGeneratorIterator_Splits_Cancel() throws Ta Map actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ for (Map.Entry entry : scanner) actual.put(entry.getKey(), entry.getValue()); - Assume.assumeFalse("Compaction successfully occurred due to weird timing but we hoped it would cancel.", - HardListIterator.allEntriesToInject.equals(actual)); + Assume + .assumeFalse("Compaction successfully occurred due to weird timing but we hoped it would cancel.", HardListIterator.allEntriesToInject.equals(actual)); assertTrue("Scan should be empty if compaction canceled. " + "Actual is " + actual, actual.isEmpty()); connector.tableOperations().delete(tableName); } diff --git a/test/src/test/java/org/apache/accumulo/test/VolumeIT.java b/test/src/test/java/org/apache/accumulo/test/VolumeIT.java index e2a0e03f528..2b242194af6 100644 --- a/test/src/test/java/org/apache/accumulo/test/VolumeIT.java +++ b/test/src/test/java/org/apache/accumulo/test/VolumeIT.java @@ -427,7 +427,7 @@ private void verifyVolumesUsed(String tableName, boolean shouldExist, Path... pa } Text path = new Text(); - for (String table : new String[]{RootTable.NAME, MetadataTable.NAME}) { + for (String table : new String[] {RootTable.NAME, MetadataTable.NAME}) { Scanner meta = conn.createScanner(table, Authorizations.EMPTY); meta.setRange(MetadataSchema.CurrentLogsSection.getRange()); outer: for (Entry entry : meta) { diff --git a/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java b/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java index afbcffe7411..054f9a4419a 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java @@ -147,7 +147,7 @@ public void testGetSplits() throws Exception { splits = inputFormat.getSplits(job); assertEquals(ranges.size(), splits.size()); - //BatchScan not available for offline scans + // BatchScan not available for offline scans AccumuloInputFormat.setBatchScan(job, true); // Reset auto-adjust ranges too AccumuloInputFormat.setAutoAdjustRanges(job, true); @@ -165,7 +165,7 @@ public void testGetSplits() throws Exception { splits = inputFormat.getSplits(job); assertEquals(2, splits.size()); - //BatchScan not available with isolated iterators + // BatchScan not available with isolated iterators AccumuloInputFormat.setScanIsolation(job, true); try { inputFormat.getSplits(job); @@ -177,7 +177,7 @@ public void testGetSplits() throws Exception { splits = inputFormat.getSplits(job); assertEquals(2, splits.size()); - //BatchScan not available with local iterators + // BatchScan not available with local iterators AccumuloInputFormat.setLocalIterators(job, true); try { inputFormat.getSplits(job); @@ -185,13 +185,13 @@ public void testGetSplits() throws Exception { } catch (IllegalArgumentException e) {} AccumuloInputFormat.setLocalIterators(job, false); - //Check we are getting back correct type pf split + // Check we are getting back correct type pf split conn.tableOperations().online(table); splits = inputFormat.getSplits(job); - for (InputSplit split: splits) - assert(split instanceof BatchInputSplit); + for (InputSplit split : splits) + assert (split instanceof BatchInputSplit); - //We should divide along the tablet lines similar to when using `setAutoAdjustRanges(job, true)` + // We should divide along the tablet lines similar to when using `setAutoAdjustRanges(job, true)` assertEquals(2, splits.size()); } diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java index 31437ccac0c..a6792515d82 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java @@ -80,7 +80,7 @@ protected int defaultTimeoutSeconds() { /* * Below is a diagram of the operations in this test over time. - * + * * Scan 0 |------------------------------| Scan 1 |----------| Minc 1 |-----| Scan 2 |----------| Scan 3 |---------------| Minc 2 |-----| Majc 1 |-----| */ diff --git a/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java b/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java index 2c1dddffbcf..596676abd25 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java @@ -499,8 +499,8 @@ public AuthenticationToken run() throws Exception { }); // A restarted master should reuse the same secret key after a restart if the secret key hasn't expired (1day by default) - DelegationTokenImpl dt1 = (DelegationTokenImpl)delegationToken1; - DelegationTokenImpl dt2 = (DelegationTokenImpl)delegationToken2; + DelegationTokenImpl dt1 = (DelegationTokenImpl) delegationToken1; + DelegationTokenImpl dt2 = (DelegationTokenImpl) delegationToken2; assertEquals(dt1.getIdentifier().getKeyId(), dt2.getIdentifier().getKeyId()); } @@ -551,7 +551,7 @@ public AuthenticationToken run() throws Exception { } }); - AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl)dt).getIdentifier(); + AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl) dt).getIdentifier(); assertTrue("Expected identifier to expire in no more than 5 minutes: " + identifier, identifier.getExpirationDate() - identifier.getIssueDate() <= (5 * 60 * 1000)); } diff --git a/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java index fda35861a5b..dbfe730cc87 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java @@ -241,8 +241,8 @@ public void testProxyClient() throws Exception { TSocket socket = new TSocket(hostname, proxyPort); log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname); - TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap( - "javax.security.sasl.qop", "auth"), null, socket); + TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop", + "auth"), null, socket); final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi); @@ -336,8 +336,8 @@ public void testDisallowedClientForImpersonation() throws Exception { log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname); // Should fail to open the tran - TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap( - "javax.security.sasl.qop", "auth"), null, socket); + TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop", + "auth"), null, socket); final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi); diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java index 9f22466c74a..725917746af 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java @@ -84,7 +84,7 @@ int countTrue(Collection bools) { int result = 0; for (Boolean b : bools) { if (b.booleanValue()) - result ++; + result++; } return result; } @@ -120,7 +120,7 @@ public void process(WatchedEvent event) { assertEquals("all WALs should be in use", 2, countTrue(walsAfterRoll.values())); // flush the tables - for (String table: new String[] { tableName, MetadataTable.NAME, RootTable.NAME} ) { + for (String table : new String[] {tableName, MetadataTable.NAME, RootTable.NAME}) { c.tableOperations().flush(table, null, null, true); } UtilWaitThread.sleep(1000); @@ -147,7 +147,7 @@ public void process(WatchedEvent event) { // allow a little time for the master to notice ASSIGNED_TO_DEAD_SERVER tablets UtilWaitThread.sleep(5 * 1000); Map> markers = getRecoveryMarkers(c); - //log.debug("markers " + markers); + // log.debug("markers " + markers); assertEquals("one tablet should have markers", 1, markers.keySet().size()); assertEquals("tableId of the keyExtent should be 1", markers.keySet().iterator().next().getTableId(), new Text("1")); @@ -157,7 +157,7 @@ public void process(WatchedEvent event) { writeSomeData(c, tableName, 100, 100); Map walsAfterRestart = getWals(c, zoo); - //log.debug("wals after " + walsAfterRestart); + // log.debug("wals after " + walsAfterRestart); assertEquals("used WALs after restart should be 1", 1, countTrue(walsAfterRestart.values())); control.start(GARBAGE_COLLECTOR); UtilWaitThread.sleep(5 * 1000); @@ -196,8 +196,8 @@ private void writeSomeData(Connector conn, String tableName, int row, int col) t bw.close(); } - private Map getWals(Connector c, ZooKeeper zoo) throws Exception { - Map result = new HashMap<>(); + private Map getWals(Connector c, ZooKeeper zoo) throws Exception { + Map result = new HashMap<>(); Scanner root = c.createScanner(RootTable.NAME, EMPTY); root.setRange(CurrentLogsSection.getRange()); Scanner meta = c.createScanner(MetadataTable.NAME, EMPTY); @@ -218,8 +218,8 @@ private Map getWals(Connector c, ZooKeeper zoo) throws Exceptio return result; } - private Map> getRecoveryMarkers(Connector c) throws Exception { - Map> result = new HashMap<>(); + private Map> getRecoveryMarkers(Connector c) throws Exception { + Map> result = new HashMap<>(); Scanner root = c.createScanner(RootTable.NAME, EMPTY); root.setRange(TabletsSection.getRange()); root.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME); diff --git a/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java b/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java index fcd1fd71c1a..5418e1c1d3e 100644 --- a/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java +++ b/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java @@ -71,14 +71,8 @@ private long ingest() throws Exception { log.info("Starting ingest"); final long start = System.currentTimeMillis(); - final String args[] = { - "-i", inst.getInstanceName(), - "-z", inst.getZooKeepers(), - "-u", "root", - "-p", ROOT_PASSWORD, - "--batchThreads", "2", - "--table", tableName, - "--num", Long.toString(1000*1000), // 1M 100 byte entries + final String args[] = {"-i", inst.getInstanceName(), "-z", inst.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--batchThreads", "2", "--table", + tableName, "--num", Long.toString(1000 * 1000), // 1M 100 byte entries }; ContinuousIngest.main(args); @@ -106,7 +100,7 @@ private void testWalPerformanceOnce() throws Exception { c.instanceOperations().setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1G"); c.tableOperations().flush(MetadataTable.NAME, null, null, true); c.tableOperations().flush(RootTable.NAME, null, null, true); - for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) { + for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) { getCluster().killProcess(ServerType.TABLET_SERVER, tserver); } getCluster().start(); @@ -118,7 +112,7 @@ private void testWalPerformanceOnce() throws Exception { assertTrue(percent < 125.); } - @Test(timeout= 20 * 60 * 1000) + @Test(timeout = 20 * 60 * 1000) public void testWalPerformance() throws Exception { testWalPerformanceOnce(); } diff --git a/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java b/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java index d1ecfe942a7..05c907cb9a7 100644 --- a/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java +++ b/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java @@ -46,7 +46,6 @@ protected int defaultTimeoutSeconds() { return 60; } - @Override protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setNumTservers(3); @@ -56,7 +55,6 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit cfg.setProperty(Property.TABLE_FILE_MAX, "9999"); } - @Test public void test() throws Exception { log.info("Creating table"); @@ -83,7 +81,8 @@ public void test() throws Exception { fs.mkdirs(bulkFailures); fs.mkdirs(files); for (int i = 0; i < 100; i++) { - FileSKVWriter writer = FileOperations.getInstance().openWriter(files.toString() + "/bulk_" + i + "." + RFile.EXTENSION, fs, fs.getConf(), AccumuloConfiguration.getDefaultConfiguration()); + FileSKVWriter writer = FileOperations.getInstance().openWriter(files.toString() + "/bulk_" + i + "." + RFile.EXTENSION, fs, fs.getConf(), + AccumuloConfiguration.getDefaultConfiguration()); writer.startDefaultLocalityGroup(); for (int j = 0x100; j < 0xfff; j += 3) { writer.append(new Key(Integer.toHexString(j)), new Value(new byte[0])); diff --git a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java index 768d8570fcb..54b42f40b18 100644 --- a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java +++ b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java @@ -133,7 +133,7 @@ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) private Multimap getLogs(Connector conn) throws TableNotFoundException { // Map of server to tableId - Multimap serverToTableID = HashMultimap.create(); + Multimap serverToTableID = HashMultimap.create(); Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY); scanner.setRange(MetadataSchema.TabletsSection.getRange()); scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME); @@ -154,7 +154,7 @@ private Multimap getLogs(Connector conn) throws TableNotFoundExce MetadataSchema.CurrentLogsSection.getPath(entry.getKey(), path); Text session = new Text(); Text hostPort = new Text(); - MetadataSchema.CurrentLogsSection.getTabletServer(entry.getKey(), hostPort , session); + MetadataSchema.CurrentLogsSection.getTabletServer(entry.getKey(), hostPort, session); TServerInstance server = new TServerInstance(AddressUtil.parseAddress(hostPort.toString(), false), session.toString()); for (String tableId : serverToTableID.get(server)) { logs.put(new Path(path.toString()).toString(), tableId); From e4cc8c8f2dbb35ad217dc198c3b12029fe6e9921 Mon Sep 17 00:00:00 2001 From: Christopher Tubbs Date: Wed, 6 May 2015 18:03:59 -0400 Subject: [PATCH 3/3] ACCUMULO-3771 Improvements to autoformatting * Ensure line endings are consistent * Eliminate whitespace in non-javadoc block comments * Insert @formatter:off/on tags to prevent some reformatting * Make eclipse ignore the plugin --- .../client/impl/ConditionalWriterImpl.java | 5 +++-- .../core/client/impl/ThriftTransportPool.java | 10 ++++----- .../client/mapred/AccumuloOutputFormat.java | 2 +- .../client/mapreduce/InputFormatBase.java | 2 +- .../core/conf/ConfigurationDocGen.java | 12 ++++++----- .../apache/accumulo/core/data/Mutation.java | 4 ++-- .../core/file/blockfile/cache/HeapSize.java | 1 - .../file/blockfile/cache/LruBlockCache.java | 19 +++++++++-------- .../lexicoder/impl/AbstractLexicoderTest.java | 2 +- .../core/client/mock/MockNamespacesTest.java | 7 ++----- .../accumulo/fate/zookeeper/ZooCache.java | 13 +++++------- pom.xml | 21 +++++++++++++++++-- .../server/problems/ProblemReports.java | 4 ++-- .../accumulo/server/util/TabletIterator.java | 9 -------- .../CloseWriteAheadLogReferences.java | 18 +++++++--------- .../accumulo/master/tableOps/CleanUp.java | 7 ++----- .../apache/accumulo/tserver/TabletServer.java | 20 +++++++----------- .../classloader/AccumuloClassLoader.java | 4 ++-- .../continuous/ContinuousBatchWalker.java | 10 --------- .../test/functional/ConcurrencyIT.java | 17 +++++++++------ 20 files changed, 87 insertions(+), 100 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java index 7cf4928957f..b8375dc9cc3 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java @@ -657,10 +657,11 @@ private void invalidateSession(HostAndPort location, TabletServerMutations * If a conditional mutation is taking a long time to process, then this method will wait for it to finish... unless this exceeds timeout. */ private void invalidateSession(SessionID sessionId, HostAndPort location) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java index 36d4e840e25..ba62cecdff9 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportPool.java @@ -190,12 +190,10 @@ final void setReserved(boolean reserved) { } final void checkForStuckIO(long threshold) { - /* - * checking for stuck io needs to be light weight. - * - * Tried to call System.currentTimeMillis() and Thread.currentThread() before every io operation.... this dramatically slowed things down. So switched to - * incrementing a counter before and after each io operation. - */ + // checking for stuck io needs to be light weight. + + // Tried to call System.currentTimeMillis() and Thread.currentThread() before every io operation.... this dramatically slowed things down. So switched to + // incrementing a counter before and after each io operation. if ((ioCount & 1) == 1) { // when ioCount is odd, it means I/O is currently happening diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java index af3197c0767..c194cf6d42e 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java @@ -83,7 +83,7 @@ public class AccumuloOutputFormat implements OutputFormat { /** * Sets the connector information needed to communicate with Accumulo in this job. - * + * *

* WARNING: Some tokens, when serialized, divulge sensitive information in the configuration as a means to pass the token to MapReduce tasks. This * information is BASE64 encoded to provide a charset safe conversion to a string, but this conversion is not intended to be secure. {@link PasswordToken} is diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java index ef4509f7244..6ab8a1926bc 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java +++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java @@ -376,7 +376,7 @@ protected void setupIterators(TaskAttemptContext context, Scanner scanner) { /** * Initialize a scanner over the given input split using this task attempt configuration. - * + * * @deprecated since 1.7.0; Use {@link #contextIterators} instead. */ @Deprecated diff --git a/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java b/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java index 5d95410c4af..7357a9b60cd 100644 --- a/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java +++ b/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java @@ -356,12 +356,14 @@ void generateAsciidoc() { new Asciidoc().generate(); } - /* + /** * Generates documentation for conf/accumulo-site.xml file usage. Arguments are: "--generate-doc", file to write to. - * - * @param args command-line arguments - * - * @throws IllegalArgumentException if args is invalid + * + * @param args + * command-line arguments + * + * @throws IllegalArgumentException + * if args is invalid */ public static void main(String[] args) throws FileNotFoundException, UnsupportedEncodingException { if (args.length == 2 && args[0].equals("--generate-html")) { diff --git a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java index aba5cdcbcd3..08a53b1ed23 100644 --- a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java +++ b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java @@ -98,9 +98,9 @@ private void serialize() { } } - /* + /** * This is so hashCode & equals can be called without changing this object. - * + * * It will return a copy of the current data buffer if serialized has not been called previously. Otherwise, this.data will be returned since the buffer is * null and will not change. */ diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java index b1d975227ee..88d5b88d810 100644 --- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java +++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/HeapSize.java @@ -30,7 +30,6 @@ * *

  * public class SampleObject implements HeapSize {
- * 
  *   int[] numbers;
  *   int x;
  * }
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
index 7d960561af5..2a14b367021 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
@@ -36,29 +36,29 @@
 /**
  * A block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an LRU eviction algorithm, and concurrent: backed by a
  * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} operations.
- * 

* + *

* Contains three levels of block priority to allow for scan-resistance and in-memory families. A block is added with an inMemory flag if necessary, otherwise a * block becomes a single access priority. Once a blocked is accessed again, it changes to multiple access. This is used to prevent scans from thrashing the * cache, adding a least-frequently-used element to the eviction algorithm. - *

* + *

* Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each priority will retain close to its maximum size, however, if * any priority is not using its entire chunk the others are able to grow beyond their chunk size. - *

* + *

* Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The block size is not especially important as this cache is * fully dynamic in its sizing of blocks. It is only used for pre-allocating data structures and in initial heap estimation of the map. - *

* + *

* The detailed constructor defines the sizes for the three priorities (they should total to the maximum size defined). It also sets the levels that trigger and * control the eviction thread. - *

* + *

* The acceptable size is the cache size level which triggers the eviction process to start. It evicts enough blocks to get the size below the minimum size * specified. - *

* + *

* Eviction happens in a separate thread and involves a single full-scan of the map. It determines how many bytes must be freed to reach the minimum size, and * then while scanning determines the fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times bytes to free). It then * uses the priority chunk sizes to evict fairly according to the relative sizes and usage. @@ -511,9 +511,10 @@ public long getEvictedCount() { return this.stats.getEvictedCount(); } - /* - * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows above the acceptable level.

- * + /** + * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows above the acceptable level. + * + *

* Thread is triggered into action by {@link LruBlockCache#runEviction()} */ private static class EvictionThread extends Thread { diff --git a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java index 5c0358828f7..d695ce972b0 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/lexicoder/impl/AbstractLexicoderTest.java @@ -22,7 +22,7 @@ /** * Assists in Testing classes that extend {@link org.apache.accumulo.core.client.lexicoder.AbstractEncoder}. It references methods not formally defined in the * {@link org.apache.accumulo.core.client.lexicoder.Lexicoder} interface. - * + * * @since 1.7.0 */ public abstract class AbstractLexicoderTest extends LexicoderTest { diff --git a/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java b/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java index 0b70d293666..308152e283e 100644 --- a/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java +++ b/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java @@ -210,11 +210,8 @@ public void testRenameAndCloneTableToNewNamespace() throws Exception { assertTrue(!c.tableOperations().exists(tableName)); // TODO implement clone in mock - /* - * c.tableOperations().clone(tableName1, tableName2, false, null, null); - * - * assertTrue(c.tableOperations().exists(tableName1)); assertTrue(c.tableOperations().exists(tableName2)); - */ + // c.tableOperations().clone(tableName1, tableName2, false, null, null); + // assertTrue(c.tableOperations().exists(tableName1)); assertTrue(c.tableOperations().exists(tableName2)); return; } diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java index ec92a7f3054..f043d838862 100644 --- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java +++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java @@ -256,14 +256,11 @@ public void run(ZooKeeper zooKeeper) throws KeeperException, InterruptedExceptio if (cache.containsKey(zPath)) return; - /* - * The following call to exists() is important, since we are caching that a node does not exist. Once the node comes into existence, it will be added to - * the cache. But this notification of a node coming into existence will only be given if exists() was previously called. - * - * If the call to exists() is bypassed and only getData() is called with a special case that looks for Code.NONODE in the KeeperException, then - * non-existence can not be cached. - */ - + // The following call to exists() is important, since we are caching that a node does not exist. Once the node comes into existence, it will be added to + // the cache. But this notification of a node coming into existence will only be given if exists() was previously called. + // + // If the call to exists() is bypassed and only getData() is called with a special case that looks for Code.NONODE in the KeeperException, then + // non-existence can not be cached. Stat stat = zooKeeper.exists(zPath, watcher); byte[] data = null; diff --git a/pom.xml b/pom.xml index 41bd12b561c..300c7a34e08 100644 --- a/pom.xml +++ b/pom.xml @@ -733,6 +733,19 @@ + + + com.googlecode.maven-java-formatter-plugin + maven-java-formatter-plugin + [0.4,) + + format + + + + + + org.apache.maven.plugins @@ -910,17 +923,21 @@ maven-java-formatter-plugin 0.4 + ${maven.compiler.source} + ${maven.compiler.source} + ${maven.compiler.target} **/thrift/*.java **/proto/*.java + LF + true org.eclipse.tycho org.eclipse.jdt.core 3.10.0.v20140604-1726 - compile @@ -976,7 +993,7 @@ - + diff --git a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java index 8298c29ad7e..d44efb18384 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java +++ b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java @@ -62,9 +62,9 @@ public class ProblemReports implements Iterable { private final LRUMap problemReports = new LRUMap(1000); - /* + /** * use a thread pool so that reporting a problem never blocks - * + * * make the thread pool use a bounded queue to avoid the case where problem reports are not being processed because the whole system is in a really bad state * (like HDFS is down) and everything is reporting lots of problems, but problem reports can not be processed */ diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java index 96ea9eb5af2..2137999e278 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java @@ -48,8 +48,6 @@ * * If a tablet that was returned by this iterator is subsequently deleted from the metadata table, then this iterator will throw a TabletDeletedException. This * could occur when a table is merged. - * - * */ public class TabletIterator implements Iterator> { @@ -77,18 +75,11 @@ public TabletDeletedException(String msg) { } } - /* - * public TabletIterator(String table, boolean returnPrevEndRow){ - * - * } - */ - /** * * @param s * A scanner over the entire metadata table configure to fetch needed columns. */ - public TabletIterator(Scanner s, Range range, boolean returnPrevEndRow, boolean returnDir) { this.scanner = s; this.range = range; diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java index 68ea62dc281..cb4b341594a 100644 --- a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java +++ b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java @@ -115,16 +115,14 @@ public void run() { log.debug("Referenced WALs: " + referencedWals); sw.reset(); - /* - * ACCUMULO-3320 WALs cannot be closed while a TabletServer may still use it later. - * - * In addition to the WALs that are actively referenced in the metadata table, tservers can also hold on to a WAL that is not presently referenced by any - * tablet. For example, a tablet could MinC which would end in all logs for that tablet being removed. However, if more data was ingested into the table, - * the same WAL could be re-used again by that tserver. - * - * If this code happened to run after the compaction but before the log is again referenced by a tabletserver, we might delete the WAL reference, only to - * have it recreated again which causes havoc with the replication status for a table. - */ + // ACCUMULO-3320 WALs cannot be closed while a TabletServer may still use it later. + // + // In addition to the WALs that are actively referenced in the metadata table, tservers can also hold on to a WAL that is not presently referenced by any + // tablet. For example, a tablet could MinC which would end in all logs for that tablet being removed. However, if more data was ingested into the table, + // the same WAL could be re-used again by that tserver. + // + // If this code happened to run after the compaction but before the log is again referenced by a tabletserver, we might delete the WAL reference, only to + // have it recreated again which causes havoc with the replication status for a table. final TInfo tinfo = Tracer.traceInfo(); Set activeWals; Span findActiveWalsSpan = Trace.start("findActiveWals"); diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java index 4ee13110c97..f22177548c9 100644 --- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java +++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java @@ -69,11 +69,8 @@ class CleanUp extends MasterRepo { private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); - /* - * handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine - * - * if the new machine has time in the future, that will work ok w/ hasCycled - */ + // handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine + // if the new machine has time in the future, that will work ok w/ hasCycled if (System.currentTimeMillis() < creationTime) { creationTime = System.currentTimeMillis(); } diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java index 7d9bf5edf16..6f2c9a287b0 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java @@ -2106,19 +2106,13 @@ public void run() { locationToOpen = VolumeUtil.switchRootTabletVolume(extent, locationToOpen); tablet = new Tablet(TabletServer.this, extent, locationToOpen, trm, tabletsKeyValues); - /* - * @formatter:off If a minor compaction starts after a tablet opens, this indicates a log recovery occurred. This recovered data must be minor - * compacted. - * - * There are three reasons to wait for this minor compaction to finish before placing the tablet in online tablets. - * - * 1) The log recovery code does not handle data written to the tablet on multiple tablet servers. 2) The log recovery code does not block if memory is - * full. Therefore recovering lots of tablets that use a lot of memory could run out of memory. 3) The minor compaction finish event did not make it to - * the logs (the file will be in metadata, preventing replay of compacted data)... but do not want a majc to wipe the file out from metadata and then - * have another process failure... this could cause duplicate data to replay. - * - * @formatter:on - */ + // If a minor compaction starts after a tablet opens, this indicates a log recovery occurred. This recovered data must be minor compacted. + // There are three reasons to wait for this minor compaction to finish before placing the tablet in online tablets. + // + // 1) The log recovery code does not handle data written to the tablet on multiple tablet servers. + // 2) The log recovery code does not block if memory is full. Therefore recovering lots of tablets that use a lot of memory could run out of memory. + // 3) The minor compaction finish event did not make it to the logs (the file will be in metadata, preventing replay of compacted data)... but do not + // want a majc to wipe the file out from metadata and then have another process failure... this could cause duplicate data to replay. if (tablet.getNumEntriesInMemory() > 0 && !tablet.minorCompactNow(MinorCompactionReason.RECOVERY)) { throw new RuntimeException("Minor compaction after recovery fails for " + extent); } diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java index 8d66ac5e68d..53b36b4fa38 100644 --- a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java +++ b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java @@ -102,8 +102,8 @@ public class AccumuloClassLoader { } /** - * Parses and XML Document for a property node for a <name> with the value propertyName if it finds one the function return that property's value for - * its <value> node. If not found the function will return null + * Parses an XML Document for a property node for a <name> with the value propertyName if it finds one the function return that property's value for its + * <value> node. If not found the function will return null. * * @param d * XMLDocument to search through diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java index 1ea69a11eee..5c54b08e1bf 100644 --- a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java +++ b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousBatchWalker.java @@ -81,16 +81,6 @@ public static void main(String[] args) throws Exception { } - /* - * private static void runSequentialScan(Scanner scanner, List ranges) { Set srowsSeen = new HashSet(); long st1 = - * System.currentTimeMillis(); int scount = 0; for (Range range : ranges) { scanner.setRange(range); - * - * for (Entry entry : scanner) { srowsSeen.add(entry.getKey().getRow()); scount++; } } - * - * - * long st2 = System.currentTimeMillis(); System.out.println("SRQ "+(st2 - st1)+" "+srowsSeen.size() +" "+scount); } - */ - private static void runBatchScan(int batchSize, BatchScanner bs, Set batch, List ranges) { bs.setRanges(ranges); diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java index a6792515d82..859eafdcdf8 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java @@ -78,12 +78,17 @@ protected int defaultTimeoutSeconds() { return 2 * 60; } - /* - * Below is a diagram of the operations in this test over time. - * - * Scan 0 |------------------------------| Scan 1 |----------| Minc 1 |-----| Scan 2 |----------| Scan 3 |---------------| Minc 2 |-----| Majc 1 |-----| - */ - + // @formatter:off + // Below is a diagram of the operations in this test over time. + // + // Scan 0 |------------------------------| + // Scan 1 |----------| + // Minc 1 |-----| + // Scan 2 |----------| + // Scan 3 |---------------| + // Minc 2 |-----| + // Majc 1 |-----| + // @formatter:on @Test public void run() throws Exception { Connector c = getConnector();