() {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
index 5a8f5927e2d..517cd19665d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
@@ -178,6 +178,7 @@ public static void setAutoAdjustRanges(JobConf job, boolean enableFeature) {
/**
* Determines whether a configuration has auto-adjust ranges enabled.
+ * Must be enabled when {@link #setBatchScan(JobConf, boolean)} is true.
*
* @param job
* the Hadoop context for the configured job
@@ -296,6 +297,48 @@ protected static boolean isOfflineScan(JobConf job) {
return InputConfigurator.isOfflineScan(CLASS, job);
}
+ /**
+ * Controls the use of the {@link org.apache.accumulo.core.client.BatchScanner} in this job.
+ * Using this feature will group Ranges by their source tablet, producing an InputSplit per tablet
+ * rather than per Range. This batching helps to reduce overhead when querying a large number of small ranges.
+ * (ex: when doing quad-tree decomposition for spatial queries)
+ *
+ * In order to achieve good locality of InputSplits this option always clips the input Ranges to tablet boundaries.
+ * This may result in one input Range contributing to several InputSplits.
+ *
+ * Note: that the value of {@link #setAutoAdjustRanges(JobConf, boolean)} is ignored and is assumed to be true when BatchScan option is enabled.
+ *
+ * This configuration is incompatible with:
+ *
+ * - {@link #setOfflineTableScan(JobConf, boolean)}
+ * - {@link #setLocalIterators(JobConf, boolean)}
+ * - {@link #setScanIsolation(JobConf, boolean)}
+ *
+ *
+ * By default, this feature is disabled.
+ *
+ * @param job
+ * the Hadoop job instance to be configured
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @since 1.7.0
+ */
+ public static void setBatchScan(JobConf job, boolean enableFeature) {
+ InputConfigurator.setBatchScan(CLASS, job, enableFeature);
+ }
+
+ /**
+ * Determines whether a configuration has the {@link org.apache.accumulo.core.client.BatchScanner} feature enabled.
+ *
+ * @param job
+ * the Hadoop context for the configured job
+ * @since 1.7.0
+ * @see #setBatchScan(JobConf, boolean)
+ */
+ public static boolean isBatchScan(JobConf job) {
+ return InputConfigurator.isBatchScan(CLASS, job);
+ }
+
/**
* Initializes an Accumulo {@link org.apache.accumulo.core.client.impl.TabletLocator} based on the configuration.
*
@@ -315,19 +358,8 @@ protected static TabletLocator getTabletLocator(JobConf job) throws TableNotFoun
protected abstract static class RecordReaderBase extends AbstractRecordReader {
@Override
- protected void setupIterators(JobConf job, Scanner scanner, String tableName, org.apache.accumulo.core.client.mapred.RangeInputSplit split) {
- List iterators = null;
-
- if (null == split) {
- iterators = getIterators(job);
- } else {
- iterators = split.getIterators();
- if (null == iterators) {
- iterators = getIterators(job);
- }
- }
-
- setupIterators(iterators, scanner);
+ protected List jobIterators(JobConf job, String tableName) {
+ return getIterators(job);
}
/**
@@ -337,7 +369,9 @@ protected void setupIterators(JobConf job, Scanner scanner, String tableName, or
* the iterators to set
* @param scanner
* the scanner to configure
+ * @deprecated since 1.7.0; Use {@link #jobIterators} instead.
*/
+ @Deprecated
protected void setupIterators(List iterators, Scanner scanner) {
for (IteratorSetting iterator : iterators) {
scanner.addScanIterator(iterator);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/impl/BatchInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/impl/BatchInputSplit.java
new file mode 100644
index 00000000000..619f9cd585c
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/impl/BatchInputSplit.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapred.impl;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.accumulo.core.data.Range;
+import org.apache.hadoop.mapred.InputSplit;
+
+/**
+ * The Class BatchInputSplit. Encapsulates Accumulo ranges for use in Map Reduce jobs.
+ * Can contain several Ranges per InputSplit.
+ */
+public class BatchInputSplit extends org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit implements InputSplit {
+
+ public BatchInputSplit() {
+ super();
+ }
+
+ public BatchInputSplit(BatchInputSplit split) throws IOException {
+ super(split);
+ }
+
+ public BatchInputSplit(String table, String tableId, Collection ranges, String[] location) {
+ super(table, tableId, ranges, location);
+ }
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index 300e92bba3d..e3c3c42a1c0 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@ -35,6 +35,8 @@
import org.apache.accumulo.core.client.Instance;
import org.apache.accumulo.core.client.IsolatedScanner;
import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.client.TableDeletedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TableOfflineException;
@@ -44,6 +46,8 @@
import org.apache.accumulo.core.client.impl.ScannerImpl;
import org.apache.accumulo.core.client.impl.Tables;
import org.apache.accumulo.core.client.impl.TabletLocator;
+import org.apache.accumulo.core.client.mapreduce.impl.AccumuloInputSplit;
+import org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit;
import org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase;
import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator;
import org.apache.accumulo.core.client.mock.MockInstance;
@@ -61,6 +65,7 @@
import org.apache.accumulo.core.security.Credentials;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
@@ -413,7 +418,50 @@ protected static ClientConfiguration getClientConfiguration(JobContext context)
protected abstract static class AbstractRecordReader extends RecordReader {
protected long numKeysRead;
protected Iterator> scannerIterator;
- protected RangeInputSplit split;
+ protected ScannerBase scannerBase;
+ protected AccumuloInputSplit split;
+
+ /**
+ * Extracts Iterators settings from the context to be used by RecordReader.
+ *
+ * @param context
+ * the Hadoop context for the configured job
+ * @param tableName
+ * the table name for which the scanner is configured
+ * @return List of iterator settings for given table
+ * @since 1.7.0
+ */
+ protected abstract List contextIterators(TaskAttemptContext context, String tableName);
+
+ /**
+ * Configures the iterators on a scanner for the given table name.
+ * Will attempt to use configuration from the InputSplit, on failure will try to extract them from TaskAttemptContext.
+ *
+ * @param context
+ * the Hadoop context for the configured job
+ * @param tableName
+ * the table name for which the scanner is configured
+ * @param scanner
+ * the scanner for which to configure the iterators
+ * @param split
+ * InputSplit containing configurations
+ * @since 1.7.0
+ */
+ private void setupIterators(TaskAttemptContext context, ScannerBase scanner, String tableName, AccumuloInputSplit split) {
+ List iterators = null;
+
+ if (null == split) {
+ iterators = contextIterators(context, tableName);
+ } else {
+ iterators = split.getIterators();
+ if (null == iterators) {
+ iterators = contextIterators(context, tableName);
+ }
+ }
+
+ for (IteratorSetting iterator : iterators)
+ scanner.addScanIterator(iterator);
+ }
/**
* Configures the iterators on a scanner for the given table name.
@@ -425,15 +473,18 @@ protected abstract static class AbstractRecordReader extends RecordReader> columns = split.getFetchedColumns();
@@ -512,22 +588,23 @@ public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IO
for (Pair c : columns) {
if (c.getSecond() != null) {
log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
- scanner.fetchColumn(c.getFirst(), c.getSecond());
+ scannerBase.fetchColumn(c.getFirst(), c.getSecond());
} else {
log.debug("Fetching column family " + c.getFirst());
- scanner.fetchColumnFamily(c.getFirst());
+ scannerBase.fetchColumnFamily(c.getFirst());
}
}
- scanner.setRange(split.getRange());
+ scannerIterator = scannerBase.iterator();
numKeysRead = 0;
-
- // do this last after setting all scanner options
- scannerIterator = scanner.iterator();
}
@Override
- public void close() {}
+ public void close() {
+ if (null != scannerBase) {
+ scannerBase.close();
+ }
+ }
@Override
public float getProgress() throws IOException {
@@ -592,26 +669,32 @@ public List getSplits(JobContext context) throws IOException {
InputTableConfig tableConfig = tableConfigEntry.getValue();
Instance instance = getInstance(context);
- boolean mockInstance;
String tableId;
// resolve table name to id once, and use id from this point forward
if (instance instanceof MockInstance) {
tableId = "";
- mockInstance = true;
} else {
try {
tableId = Tables.getTableId(instance, tableName);
} catch (TableNotFoundException e) {
throw new IOException(e);
}
- mockInstance = false;
}
Authorizations auths = getScanAuthorizations(context);
String principal = getPrincipal(context);
AuthenticationToken token = getAuthenticationToken(context);
+ boolean batchScan = InputConfigurator.isBatchScan(CLASS, context.getConfiguration());
+ boolean supportBatchScan =
+ !(tableConfig.isOfflineScan() || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators());
+ if (batchScan && !supportBatchScan)
+ throw new IllegalArgumentException("BatchScanner optimization not available for offline scan, isolated, or local iterators");
+
boolean autoAdjust = tableConfig.shouldAutoAdjustRanges();
+ if (batchScan && !autoAdjust)
+ throw new IllegalArgumentException("AutoAdjustRanges must be enabled when using BatchScanner optimization");
+
List ranges = autoAdjust ? Range.mergeOverlapping(tableConfig.getRanges()) : tableConfig.getRanges();
if (ranges.isEmpty()) {
ranges = new ArrayList(1);
@@ -654,6 +737,8 @@ public List getSplits(JobContext context) throws IOException {
throw new IOException(e);
}
+ // all of this code will add either range per each locations or split ranges and add range-location split
+ // Map from Range to Array of Locations, we only use this if we're don't split
HashMap> splitsToAdd = null;
if (!autoAdjust)
@@ -670,32 +755,35 @@ public List getSplits(JobContext context) throws IOException {
}
for (Map.Entry> extentRanges : tserverBin.getValue().entrySet()) {
Range ke = extentRanges.getKey().toDataRange();
- for (Range r : extentRanges.getValue()) {
- if (autoAdjust) {
- // divide ranges into smaller ranges, based on the tablets
- RangeInputSplit split = new RangeInputSplit(tableName, tableId, ke.clip(r), new String[] {location});
-
- split.setOffline(tableConfig.isOfflineScan());
- split.setIsolatedScan(tableConfig.shouldUseIsolatedScanners());
- split.setUsesLocalIterators(tableConfig.shouldUseLocalIterators());
- split.setMockInstance(mockInstance);
- split.setFetchedColumns(tableConfig.getFetchedColumns());
- split.setPrincipal(principal);
- split.setToken(token);
- split.setInstanceName(instance.getInstanceName());
- split.setZooKeepers(instance.getZooKeepers());
- split.setAuths(auths);
- split.setIterators(tableConfig.getIterators());
- split.setLogLevel(logLevel);
-
- splits.add(split);
- } else {
- // don't divide ranges
- ArrayList locations = splitsToAdd.get(r);
- if (locations == null)
- locations = new ArrayList(1);
- locations.add(location);
- splitsToAdd.put(r, locations);
+ if (batchScan) {
+ // group ranges by tablet to be read by a BatchScanner
+ ArrayList clippedRanges = new ArrayList();
+ for(Range r: extentRanges.getValue())
+ clippedRanges.add(ke.clip(r));
+ BatchInputSplit split = new BatchInputSplit(tableName, tableId, clippedRanges, new String[] {location});
+ AccumuloInputSplit.updateSplit(split, instance, tableConfig, principal, token, auths, logLevel);
+
+ splits.add(split);
+ } else {
+ // not grouping by tablet
+ for (Range r : extentRanges.getValue()) {
+ if (autoAdjust) {
+ // divide ranges into smaller ranges, based on the tablets
+ RangeInputSplit split = new RangeInputSplit(tableName, tableId, ke.clip(r), new String[] {location});
+ AccumuloInputSplit.updateSplit(split, instance, tableConfig, principal, token, auths, logLevel);
+ split.setOffline(tableConfig.isOfflineScan());
+ split.setIsolatedScan(tableConfig.shouldUseIsolatedScanners());
+ split.setUsesLocalIterators(tableConfig.shouldUseLocalIterators());
+
+ splits.add(split);
+ } else {
+ // don't divide ranges
+ ArrayList locations = splitsToAdd.get(r);
+ if (locations == null)
+ locations = new ArrayList(1);
+ locations.add(location);
+ splitsToAdd.put(r, locations);
+ }
}
}
}
@@ -704,23 +792,14 @@ public List getSplits(JobContext context) throws IOException {
if (!autoAdjust)
for (Map.Entry> entry : splitsToAdd.entrySet()) {
RangeInputSplit split = new RangeInputSplit(tableName, tableId, entry.getKey(), entry.getValue().toArray(new String[0]));
-
+ AccumuloInputSplit.updateSplit(split, instance, tableConfig, principal, token, auths, logLevel);
split.setOffline(tableConfig.isOfflineScan());
split.setIsolatedScan(tableConfig.shouldUseIsolatedScanners());
split.setUsesLocalIterators(tableConfig.shouldUseLocalIterators());
- split.setMockInstance(mockInstance);
- split.setFetchedColumns(tableConfig.getFetchedColumns());
- split.setPrincipal(principal);
- split.setToken(token);
- split.setInstanceName(instance.getInstanceName());
- split.setZooKeepers(instance.getZooKeepers());
- split.setAuths(auths);
- split.setIterators(tableConfig.getIterators());
- split.setLogLevel(logLevel);
splits.add(split);
}
}
return splits;
}
-}
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
index b98cb77e5a5..7af5e667249 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
@@ -21,6 +21,7 @@
import org.apache.accumulo.core.client.ClientConfiguration;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.mapreduce.impl.AccumuloInputSplit;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
@@ -52,17 +53,18 @@ public class AccumuloInputFormat extends InputFormatBase {
public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
log.setLevel(getLogLevel(context));
- // Override the log level from the configuration as if the RangeInputSplit has one it's the more correct one to use.
- if (split instanceof org.apache.accumulo.core.client.mapreduce.RangeInputSplit) {
- org.apache.accumulo.core.client.mapreduce.RangeInputSplit risplit = (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
- Level level = risplit.getLogLevel();
+ // Override the log level from the configuration as if the InputSplit has one it's the more correct one to use.
+ if (split instanceof AccumuloInputSplit) {
+ AccumuloInputSplit accSplit = (AccumuloInputSplit) split;
+ Level level = accSplit.getLogLevel();
if (null != level) {
log.setLevel(level);
}
+ } else {
+ throw new IllegalArgumentException("No RecordReader for " + split.getClass().toString());
}
return new RecordReaderBase() {
-
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (scannerIterator.hasNext()) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java
index bed0def612a..679256b61c5 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java
@@ -24,7 +24,6 @@
import org.apache.accumulo.core.client.ClientConfiguration;
import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.data.Key;
@@ -86,15 +85,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException {
}
@Override
- protected void setupIterators(TaskAttemptContext context, Scanner scanner, String tableName, RangeInputSplit split) {
- List iterators = split.getIterators();
- if (null == iterators) {
- iterators = getInputTableConfig(context, tableName).getIterators();
- }
-
- for (IteratorSetting setting : iterators) {
- scanner.addScanIterator(setting);
- }
+ protected List contextIterators(TaskAttemptContext context, String tableName) {
+ return getInputTableConfig(context, tableName).getIterators();
}
};
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
index 76ca40137c0..dcc4fd5cfac 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
@@ -177,6 +177,7 @@ public static void setAutoAdjustRanges(Job job, boolean enableFeature) {
/**
* Determines whether a configuration has auto-adjust ranges enabled.
+ * Must be enabled when {@link #setBatchScan(Job, boolean)} is true.
*
* @param context
* the Hadoop context for the configured job
@@ -295,6 +296,48 @@ protected static boolean isOfflineScan(JobContext context) {
return InputConfigurator.isOfflineScan(CLASS, context.getConfiguration());
}
+ /**
+ * Controls the use of the {@link org.apache.accumulo.core.client.BatchScanner} in this job.
+ * Using this feature will group Ranges by their source tablet, producing an InputSplit per tablet
+ * rather than per Range. This batching helps to reduce overhead when querying a large number of small ranges.
+ * (ex: when doing quad-tree decomposition for spatial queries)
+ *
+ * In order to achieve good locality of InputSplits this option always clips the input Ranges to tablet boundaries.
+ * This may result in one input Range contributing to several InputSplits.
+ *
+ * Note: that the value of {@link #setAutoAdjustRanges(Job, boolean)} is ignored and is assumed to be true when BatchScan option is enabled.
+ *
+ * This configuration is incompatible with:
+ *
+ * - {@link #setOfflineTableScan(org.apache.hadoop.mapreduce.Job, boolean)}
+ * - {@link #setLocalIterators(org.apache.hadoop.mapreduce.Job, boolean)}
+ * - {@link #setScanIsolation(org.apache.hadoop.mapreduce.Job, boolean)}
+ *
+ *
+ * By default, this feature is disabled.
+ *
+ * @param job
+ * the Hadoop job instance to be configured
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @since 1.7.0
+ */
+ public static void setBatchScan(Job job, boolean enableFeature) {
+ InputConfigurator.setBatchScan(CLASS, job.getConfiguration(), enableFeature);
+ }
+
+ /**
+ * Determines whether a configuration has the {@link org.apache.accumulo.core.client.BatchScanner} feature enabled.
+ *
+ * @param context
+ * the Hadoop context for the configured job
+ * @since 1.7.0
+ * @see #setBatchScan(Job, boolean)
+ */
+ public static boolean isBatchScan(JobContext context) {
+ return InputConfigurator.isBatchScan(CLASS, context.getConfiguration());
+ }
+
/**
* Initializes an Accumulo {@link org.apache.accumulo.core.client.impl.TabletLocator} based on the configuration.
*
@@ -314,8 +357,8 @@ protected static TabletLocator getTabletLocator(JobContext context) throws Table
protected abstract static class RecordReaderBase extends AbstractRecordReader {
@Override
- protected void setupIterators(TaskAttemptContext context, Scanner scanner, String tableName, org.apache.accumulo.core.client.mapreduce.RangeInputSplit split) {
- setupIterators(context, scanner, split);
+ protected List contextIterators(TaskAttemptContext context, String tableName) {
+ return getIterators(context);
}
/**
@@ -325,27 +368,21 @@ protected void setupIterators(TaskAttemptContext context, Scanner scanner, Strin
* the Hadoop context for the configured job
* @param scanner
* the scanner to configure
+ * @deprecated since 1.7.0; Use {@link #contextIterators} instead.
*/
@Deprecated
protected void setupIterators(TaskAttemptContext context, Scanner scanner) {
- setupIterators(context, scanner, null);
+ // tableName is given as null as it will be ignored in eventual call to #contextIterators
+ setupIterators(context, scanner, null, null);
}
/**
* Initialize a scanner over the given input split using this task attempt configuration.
+ * @deprecated since 1.7.0; Use {@link #contextIterators} instead.
*/
+ @Deprecated
protected void setupIterators(TaskAttemptContext context, Scanner scanner, org.apache.accumulo.core.client.mapreduce.RangeInputSplit split) {
- List iterators = null;
- if (null == split) {
- iterators = getIterators(context);
- } else {
- iterators = split.getIterators();
- if (null == iterators) {
- iterators = getIterators(context);
- }
- }
- for (IteratorSetting iterator : iterators)
- scanner.addScanIterator(iterator);
+ setupIterators(context, scanner, null, split);
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 872ee7b11de..6c870a0e017 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@ -16,114 +16,53 @@
*/
package org.apache.accumulo.core.client.mapreduce;
-import static java.nio.charset.StandardCharsets.UTF_8;
-
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
-import java.math.BigInteger;
-import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.TokenSource;
-import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator;
-import org.apache.accumulo.core.client.mock.MockInstance;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
-import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.client.mapreduce.impl.AccumuloInputSplit;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.Base64;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.log4j.Level;
/**
* The Class RangeInputSplit. Encapsulates an Accumulo range for use in Map Reduce jobs.
*/
-public class RangeInputSplit extends InputSplit implements Writable {
+public class RangeInputSplit extends AccumuloInputSplit {
private Range range;
- private String[] locations;
- private String tableId, tableName, instanceName, zooKeepers, principal;
- private TokenSource tokenSource;
- private String tokenFile;
- private AuthenticationToken token;
- private Boolean offline, mockInstance, isolatedScan, localIterators;
- private Authorizations auths;
- private Set> fetchedColumns;
- private List iterators;
- private Level level;
+ private Boolean offline, isolatedScan, localIterators;
public RangeInputSplit() {
range = new Range();
- locations = new String[0];
- tableName = "";
- tableId = "";
}
public RangeInputSplit(RangeInputSplit split) throws IOException {
+ super(split);
this.setRange(split.getRange());
- this.setLocations(split.getLocations());
- this.setTableName(split.getTableName());
- this.setTableId(split.getTableId());
}
protected RangeInputSplit(String table, String tableId, Range range, String[] locations) {
+ super(table, tableId, locations);
this.range = range;
- setLocations(locations);
- this.tableName = table;
- this.tableId = tableId;
- }
-
- public Range getRange() {
- return range;
- }
-
- private static byte[] extractBytes(ByteSequence seq, int numBytes) {
- byte[] bytes = new byte[numBytes + 1];
- bytes[0] = 0;
- for (int i = 0; i < numBytes; i++) {
- if (i >= seq.length())
- bytes[i + 1] = 0;
- else
- bytes[i + 1] = seq.byteAt(i);
- }
- return bytes;
- }
-
- public static float getProgress(ByteSequence start, ByteSequence end, ByteSequence position) {
- int maxDepth = Math.min(Math.max(end.length(), start.length()), position.length());
- BigInteger startBI = new BigInteger(extractBytes(start, maxDepth));
- BigInteger endBI = new BigInteger(extractBytes(end, maxDepth));
- BigInteger positionBI = new BigInteger(extractBytes(position, maxDepth));
- return (float) (positionBI.subtract(startBI).doubleValue() / endBI.subtract(startBI).doubleValue());
}
public float getProgress(Key currentKey) {
if (currentKey == null)
return 0f;
- if (range.getStartKey() != null && range.getEndKey() != null) {
- if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW) != 0) {
- // just look at the row progress
- return getProgress(range.getStartKey().getRowData(), range.getEndKey().getRowData(), currentKey.getRowData());
- } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM) != 0) {
- // just look at the column family progress
- return getProgress(range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), currentKey.getColumnFamilyData());
- } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL) != 0) {
- // just look at the column qualifier progress
- return getProgress(range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), currentKey.getColumnQualifierData());
+ if (range.contains(currentKey)) {
+ // find the current range and report as if that is the single range
+ if (range.getStartKey() != null && range.getEndKey() != null) {
+ if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW) != 0) {
+ // just look at the row progress
+ return getProgress(range.getStartKey().getRowData(), range.getEndKey().getRowData(), currentKey.getRowData());
+ } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM) != 0) {
+ // just look at the column family progress
+ return getProgress(range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), currentKey.getColumnFamilyData());
+ } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL) != 0) {
+ // just look at the column qualifier progress
+ return getProgress(range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), currentKey.getColumnQualifierData());
+ }
}
}
// if we can't figure it out, then claim no progress
@@ -135,38 +74,15 @@ public float getProgress(Key currentKey) {
*/
@Override
public long getLength() throws IOException {
- Text startRow = range.isInfiniteStartKey() ? new Text(new byte[] {Byte.MIN_VALUE}) : range.getStartKey().getRow();
- Text stopRow = range.isInfiniteStopKey() ? new Text(new byte[] {Byte.MAX_VALUE}) : range.getEndKey().getRow();
- int maxCommon = Math.min(7, Math.min(startRow.getLength(), stopRow.getLength()));
- long diff = 0;
-
- byte[] start = startRow.getBytes();
- byte[] stop = stopRow.getBytes();
- for (int i = 0; i < maxCommon; ++i) {
- diff |= 0xff & (start[i] ^ stop[i]);
- diff <<= Byte.SIZE;
- }
-
- if (startRow.getLength() != stopRow.getLength())
- diff |= 0xff;
-
- return diff + 1;
+ return getRangeLength(range);
}
- @Override
- public String[] getLocations() throws IOException {
- return Arrays.copyOf(locations, locations.length);
- }
@Override
public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+
range.readFields(in);
- tableName = in.readUTF();
- tableId = in.readUTF();
- int numLocs = in.readInt();
- locations = new String[numLocs];
- for (int i = 0; i < numLocs; ++i)
- locations[i] = in.readUTF();
if (in.readBoolean()) {
isolatedScan = in.readBoolean();
@@ -179,81 +95,13 @@ public void readFields(DataInput in) throws IOException {
if (in.readBoolean()) {
localIterators = in.readBoolean();
}
-
- if (in.readBoolean()) {
- mockInstance = in.readBoolean();
- }
-
- if (in.readBoolean()) {
- int numColumns = in.readInt();
- List columns = new ArrayList(numColumns);
- for (int i = 0; i < numColumns; i++) {
- columns.add(in.readUTF());
- }
-
- fetchedColumns = InputConfigurator.deserializeFetchedColumns(columns);
- }
-
- if (in.readBoolean()) {
- String strAuths = in.readUTF();
- auths = new Authorizations(strAuths.getBytes(UTF_8));
- }
-
- if (in.readBoolean()) {
- principal = in.readUTF();
- }
-
- if (in.readBoolean()) {
- int ordinal = in.readInt();
- this.tokenSource = TokenSource.values()[ordinal];
-
- switch (this.tokenSource) {
- case INLINE:
- String tokenClass = in.readUTF();
- byte[] base64TokenBytes = in.readUTF().getBytes(UTF_8);
- byte[] tokenBytes = Base64.decodeBase64(base64TokenBytes);
-
- this.token = AuthenticationTokenSerializer.deserialize(tokenClass, tokenBytes);
- break;
-
- case FILE:
- this.tokenFile = in.readUTF();
-
- break;
- default:
- throw new IOException("Cannot parse unknown TokenSource ordinal");
- }
- }
-
- if (in.readBoolean()) {
- instanceName = in.readUTF();
- }
-
- if (in.readBoolean()) {
- zooKeepers = in.readUTF();
- }
-
- if (in.readBoolean()) {
- int numIterators = in.readInt();
- iterators = new ArrayList(numIterators);
- for (int i = 0; i < numIterators; i++) {
- iterators.add(new IteratorSetting(in));
- }
- }
-
- if (in.readBoolean()) {
- level = Level.toLevel(in.readInt());
- }
}
@Override
public void write(DataOutput out) throws IOException {
+ super.write(out);
+
range.write(out);
- out.writeUTF(tableName);
- out.writeUTF(tableId);
- out.writeInt(locations.length);
- for (int i = 0; i < locations.length; ++i)
- out.writeUTF(locations[i]);
out.writeBoolean(null != isolatedScan);
if (null != isolatedScan) {
@@ -269,73 +117,13 @@ public void write(DataOutput out) throws IOException {
if (null != localIterators) {
out.writeBoolean(localIterators);
}
-
- out.writeBoolean(null != mockInstance);
- if (null != mockInstance) {
- out.writeBoolean(mockInstance);
- }
-
- out.writeBoolean(null != fetchedColumns);
- if (null != fetchedColumns) {
- String[] cols = InputConfigurator.serializeColumns(fetchedColumns);
- out.writeInt(cols.length);
- for (String col : cols) {
- out.writeUTF(col);
- }
- }
-
- out.writeBoolean(null != auths);
- if (null != auths) {
- out.writeUTF(auths.serialize());
- }
-
- out.writeBoolean(null != principal);
- if (null != principal) {
- out.writeUTF(principal);
- }
-
- out.writeBoolean(null != tokenSource);
- if (null != tokenSource) {
- out.writeInt(tokenSource.ordinal());
-
- if (null != token && null != tokenFile) {
- throw new IOException("Cannot use both inline AuthenticationToken and file-based AuthenticationToken");
- } else if (null != token) {
- out.writeUTF(token.getClass().getCanonicalName());
- out.writeUTF(Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(token)));
- } else {
- out.writeUTF(tokenFile);
- }
- }
-
- out.writeBoolean(null != instanceName);
- if (null != instanceName) {
- out.writeUTF(instanceName);
- }
-
- out.writeBoolean(null != zooKeepers);
- if (null != zooKeepers) {
- out.writeUTF(zooKeepers);
- }
-
- out.writeBoolean(null != iterators);
- if (null != iterators) {
- out.writeInt(iterators.size());
- for (IteratorSetting iterator : iterators) {
- iterator.write(out);
- }
- }
-
- out.writeBoolean(null != level);
- if (null != level) {
- out.writeInt(level.toInt());
- }
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(256);
- sb.append("Range: ").append(range);
+ sb.append("RangeInputSplit:");
+ sb.append(" Range: ").append(range);
sb.append(" Locations: ").append(Arrays.asList(locations));
sb.append(" Table: ").append(tableName);
sb.append(" TableID: ").append(tableId);
@@ -356,123 +144,12 @@ public String toString() {
return sb.toString();
}
- /**
- * Use {@link #getTableName}
- *
- * @deprecated since 1.6.1, use getTableName() instead.
- */
- @Deprecated
- public String getTable() {
- return getTableName();
- }
-
- public String getTableName() {
- return tableName;
- }
-
- /**
- * Use {@link #setTableName}
- *
- * @deprecated since 1.6.1, use setTableName() instead.
- */
- @Deprecated
- public void setTable(String table) {
- setTableName(table);
- }
-
- public void setTableName(String table) {
- this.tableName = table;
- }
-
- public void setTableId(String tableId) {
- this.tableId = tableId;
- }
-
- public String getTableId() {
- return tableId;
- }
-
- /**
- * @see #getInstance(ClientConfiguration)
- * @deprecated since 1.7.0, use getInstance(ClientConfiguration) instead.
- */
- @Deprecated
- public Instance getInstance() {
- return getInstance(ClientConfiguration.loadDefault());
- }
-
- public Instance getInstance(ClientConfiguration base) {
- if (null == instanceName) {
- return null;
- }
-
- if (isMockInstance()) {
- return new MockInstance(getInstanceName());
- }
-
- if (null == zooKeepers) {
- return null;
- }
-
- return new ZooKeeperInstance(base.withInstance(getInstanceName()).withZkHosts(getZooKeepers()));
- }
-
- public String getInstanceName() {
- return instanceName;
- }
-
- public void setInstanceName(String instanceName) {
- this.instanceName = instanceName;
- }
-
- public String getZooKeepers() {
- return zooKeepers;
- }
-
- public void setZooKeepers(String zooKeepers) {
- this.zooKeepers = zooKeepers;
- }
-
- public String getPrincipal() {
- return principal;
- }
-
- public void setPrincipal(String principal) {
- this.principal = principal;
- }
-
- public AuthenticationToken getToken() {
- return token;
- }
-
- public void setToken(AuthenticationToken token) {
- this.tokenSource = TokenSource.INLINE;
- this.token = token;
- }
-
- public void setToken(String tokenFile) {
- this.tokenSource = TokenSource.FILE;
- this.tokenFile = tokenFile;
- }
-
- public Boolean isOffline() {
- return offline;
- }
-
- public void setOffline(Boolean offline) {
- this.offline = offline;
- }
-
- public void setLocations(String[] locations) {
- this.locations = Arrays.copyOf(locations, locations.length);
- }
-
- public Boolean isMockInstance() {
- return mockInstance;
+ public Range getRange() {
+ return range;
}
- public void setMockInstance(Boolean mockInstance) {
- this.mockInstance = mockInstance;
+ public void setRange(Range range) {
+ this.range = range;
}
public Boolean isIsolatedScan() {
@@ -483,16 +160,12 @@ public void setIsolatedScan(Boolean isolatedScan) {
this.isolatedScan = isolatedScan;
}
- public Authorizations getAuths() {
- return auths;
- }
-
- public void setAuths(Authorizations auths) {
- this.auths = auths;
+ public Boolean isOffline() {
+ return offline;
}
- public void setRange(Range range) {
- this.range = range;
+ public void setOffline(Boolean offline) {
+ this.offline = offline;
}
public Boolean usesLocalIterators() {
@@ -502,35 +175,4 @@ public Boolean usesLocalIterators() {
public void setUsesLocalIterators(Boolean localIterators) {
this.localIterators = localIterators;
}
-
- public Set> getFetchedColumns() {
- return fetchedColumns;
- }
-
- public void setFetchedColumns(Collection> fetchedColumns) {
- this.fetchedColumns = new HashSet>();
- for (Pair columns : fetchedColumns) {
- this.fetchedColumns.add(columns);
- }
- }
-
- public void setFetchedColumns(Set> fetchedColumns) {
- this.fetchedColumns = fetchedColumns;
- }
-
- public List getIterators() {
- return iterators;
- }
-
- public void setIterators(List iterators) {
- this.iterators = iterators;
- }
-
- public Level getLogLevel() {
- return level;
- }
-
- public void setLogLevel(Level level) {
- this.level = level;
- }
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/AccumuloInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/AccumuloInputSplit.java
new file mode 100644
index 00000000000..94d00262bf1
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/AccumuloInputSplit.java
@@ -0,0 +1,445 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce.impl;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
+import org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.TokenSource;
+import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.data.Key;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.log4j.Level;
+
+/**
+ * Abstracts over configurations common to all InputSplits. Specifically it leaves out methods
+ * related to number of ranges and locations per InputSplit as those vary by implementation.
+ *
+ * @see org.apache.accumulo.core.client.mapreduce.RangeInputSplit
+ * @see org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit
+ */
+public abstract class AccumuloInputSplit extends InputSplit implements Writable {
+ protected String[] locations;
+ protected String tableId, tableName, instanceName, zooKeepers, principal;
+ protected TokenSource tokenSource;
+ protected String tokenFile;
+ protected AuthenticationToken token;
+ protected Boolean mockInstance;
+ protected Authorizations auths;
+ protected Set> fetchedColumns;
+ protected List iterators;
+ protected Level level;
+
+ public abstract float getProgress(Key currentKey);
+
+ public AccumuloInputSplit() {
+ locations = new String[0];
+ tableName = "";
+ tableId = "";
+ }
+
+ public AccumuloInputSplit(AccumuloInputSplit split) throws IOException {
+ this.setLocations(split.getLocations());
+ this.setTableName(split.getTableName());
+ this.setTableId(split.getTableId());
+ }
+
+ protected AccumuloInputSplit(String table, String tableId, String[] locations) {
+ setLocations(locations);
+ this.tableName = table;
+ this.tableId = tableId;
+ }
+
+ /**
+ * Central place to set common split configuration not handled by split constructors.
+ * The intention is to make it harder to miss optional setters in future refactor.
+ */
+ public static void updateSplit(AccumuloInputSplit split, Instance instance, InputTableConfig tableConfig,
+ String principal, AuthenticationToken token, Authorizations auths, Level logLevel) {
+ split.setInstanceName(instance.getInstanceName());
+ split.setZooKeepers(instance.getZooKeepers());
+ split.setMockInstance(instance instanceof MockInstance);
+
+ split.setPrincipal(principal);
+ split.setToken(token);
+ split.setAuths(auths);
+
+ split.setFetchedColumns(tableConfig.getFetchedColumns());
+ split.setIterators(tableConfig.getIterators());
+ split.setLogLevel(logLevel);
+ }
+
+ private static byte[] extractBytes(ByteSequence seq, int numBytes) {
+ byte[] bytes = new byte[numBytes + 1];
+ bytes[0] = 0;
+ for (int i = 0; i < numBytes; i++) {
+ if (i >= seq.length())
+ bytes[i + 1] = 0;
+ else
+ bytes[i + 1] = seq.byteAt(i);
+ }
+ return bytes;
+ }
+
+ public static float getProgress(ByteSequence start, ByteSequence end, ByteSequence position) {
+ int maxDepth = Math.min(Math.max(end.length(), start.length()), position.length());
+ BigInteger startBI = new BigInteger(extractBytes(start, maxDepth));
+ BigInteger endBI = new BigInteger(extractBytes(end, maxDepth));
+ BigInteger positionBI = new BigInteger(extractBytes(position, maxDepth));
+ return (float) (positionBI.subtract(startBI).doubleValue() / endBI.subtract(startBI).doubleValue());
+ }
+
+ public long getRangeLength(Range range) throws IOException {
+ Text startRow = range.isInfiniteStartKey() ? new Text(new byte[] {Byte.MIN_VALUE}) : range.getStartKey().getRow();
+ Text stopRow = range.isInfiniteStopKey() ? new Text(new byte[] {Byte.MAX_VALUE}) : range.getEndKey().getRow();
+ int maxCommon = Math.min(7, Math.min(startRow.getLength(), stopRow.getLength()));
+ long diff = 0;
+
+ byte[] start = startRow.getBytes();
+ byte[] stop = stopRow.getBytes();
+ for (int i = 0; i < maxCommon; ++i) {
+ diff |= 0xff & (start[i] ^ stop[i]);
+ diff <<= Byte.SIZE;
+ }
+
+ if (startRow.getLength() != stopRow.getLength())
+ diff |= 0xff;
+
+ return diff + 1;
+ }
+
+ @Override
+ public String[] getLocations() throws IOException {
+ return Arrays.copyOf(locations, locations.length);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ tableName = in.readUTF();
+ tableId = in.readUTF();
+ int numLocs = in.readInt();
+ locations = new String[numLocs];
+ for (int i = 0; i < numLocs; ++i)
+ locations[i] = in.readUTF();
+
+ if (in.readBoolean()) {
+ mockInstance = in.readBoolean();
+ }
+
+ if (in.readBoolean()) {
+ int numColumns = in.readInt();
+ List columns = new ArrayList(numColumns);
+ for (int i = 0; i < numColumns; i++) {
+ columns.add(in.readUTF());
+ }
+
+ fetchedColumns = InputConfigurator.deserializeFetchedColumns(columns);
+ }
+
+ if (in.readBoolean()) {
+ String strAuths = in.readUTF();
+ auths = new Authorizations(strAuths.getBytes(UTF_8));
+ }
+
+ if (in.readBoolean()) {
+ principal = in.readUTF();
+ }
+
+ if (in.readBoolean()) {
+ int ordinal = in.readInt();
+ this.tokenSource = TokenSource.values()[ordinal];
+
+ switch (this.tokenSource) {
+ case INLINE:
+ String tokenClass = in.readUTF();
+ byte[] base64TokenBytes = in.readUTF().getBytes(UTF_8);
+ byte[] tokenBytes = Base64.decodeBase64(base64TokenBytes);
+
+ this.token = AuthenticationTokenSerializer.deserialize(tokenClass, tokenBytes);
+ break;
+
+ case FILE:
+ this.tokenFile = in.readUTF();
+
+ break;
+ default:
+ throw new IOException("Cannot parse unknown TokenSource ordinal");
+ }
+ }
+
+ if (in.readBoolean()) {
+ instanceName = in.readUTF();
+ }
+
+ if (in.readBoolean()) {
+ zooKeepers = in.readUTF();
+ }
+
+ if (in.readBoolean()) {
+ int numIterators = in.readInt();
+ iterators = new ArrayList(numIterators);
+ for (int i = 0; i < numIterators; i++) {
+ iterators.add(new IteratorSetting(in));
+ }
+ }
+
+ if (in.readBoolean()) {
+ level = Level.toLevel(in.readInt());
+ }
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeUTF(tableName);
+ out.writeUTF(tableId);
+ out.writeInt(locations.length);
+ for (int i = 0; i < locations.length; ++i)
+ out.writeUTF(locations[i]);
+
+ out.writeBoolean(null != mockInstance);
+ if (null != mockInstance) {
+ out.writeBoolean(mockInstance);
+ }
+
+ out.writeBoolean(null != fetchedColumns);
+ if (null != fetchedColumns) {
+ String[] cols = InputConfigurator.serializeColumns(fetchedColumns);
+ out.writeInt(cols.length);
+ for (String col : cols) {
+ out.writeUTF(col);
+ }
+ }
+
+ out.writeBoolean(null != auths);
+ if (null != auths) {
+ out.writeUTF(auths.serialize());
+ }
+
+ out.writeBoolean(null != principal);
+ if (null != principal) {
+ out.writeUTF(principal);
+ }
+
+ out.writeBoolean(null != tokenSource);
+ if (null != tokenSource) {
+ out.writeInt(tokenSource.ordinal());
+
+ if (null != token && null != tokenFile) {
+ throw new IOException("Cannot use both inline AuthenticationToken and file-based AuthenticationToken");
+ } else if (null != token) {
+ out.writeUTF(token.getClass().getCanonicalName());
+ out.writeUTF(Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(token)));
+ } else {
+ out.writeUTF(tokenFile);
+ }
+ }
+
+ out.writeBoolean(null != instanceName);
+ if (null != instanceName) {
+ out.writeUTF(instanceName);
+ }
+
+ out.writeBoolean(null != zooKeepers);
+ if (null != zooKeepers) {
+ out.writeUTF(zooKeepers);
+ }
+
+ out.writeBoolean(null != iterators);
+ if (null != iterators) {
+ out.writeInt(iterators.size());
+ for (IteratorSetting iterator : iterators) {
+ iterator.write(out);
+ }
+ }
+
+ out.writeBoolean(null != level);
+ if (null != level) {
+ out.writeInt(level.toInt());
+ }
+ }
+
+ /**
+ * Use {@link #getTableName}
+ */
+ @Deprecated
+ public String getTable() {
+ return getTableName();
+ }
+
+ public String getTableName() {
+ return tableName;
+ }
+
+ /**
+ * Use {@link #setTableName}
+ */
+ @Deprecated
+ public void setTable(String table) {
+ setTableName(table);
+ }
+
+ public void setTableName(String table) {
+ this.tableName = table;
+ }
+
+ public void setTableId(String tableId) {
+ this.tableId = tableId;
+ }
+
+ public String getTableId() {
+ return tableId;
+ }
+
+ /**
+ * @see #getInstance(ClientConfiguration)
+ */
+ @Deprecated
+ public Instance getInstance() {
+ return getInstance(ClientConfiguration.loadDefault());
+ }
+
+ public Instance getInstance(ClientConfiguration base) {
+ if (null == instanceName) {
+ return null;
+ }
+
+ if (isMockInstance()) {
+ return new MockInstance(getInstanceName());
+ }
+
+ if (null == zooKeepers) {
+ return null;
+ }
+
+ return new ZooKeeperInstance(base.withInstance(getInstanceName()).withZkHosts(getZooKeepers()));
+ }
+
+ public String getInstanceName() {
+ return instanceName;
+ }
+
+ public void setInstanceName(String instanceName) {
+ this.instanceName = instanceName;
+ }
+
+ public String getZooKeepers() {
+ return zooKeepers;
+ }
+
+ public void setZooKeepers(String zooKeepers) {
+ this.zooKeepers = zooKeepers;
+ }
+
+ public String getPrincipal() {
+ return principal;
+ }
+
+ public void setPrincipal(String principal) {
+ this.principal = principal;
+ }
+
+ public AuthenticationToken getToken() {
+ return token;
+ }
+
+ public void setToken(AuthenticationToken token) {
+ this.tokenSource = TokenSource.INLINE;
+ this.token = token;
+ }
+
+ public void setToken(String tokenFile) {
+ this.tokenSource = TokenSource.FILE;
+ this.tokenFile = tokenFile;
+ }
+
+ public void setLocations(String[] locations) {
+ this.locations = Arrays.copyOf(locations, locations.length);
+ }
+
+ public Boolean isMockInstance() {
+ return mockInstance;
+ }
+
+ public void setMockInstance(Boolean mockInstance) {
+ this.mockInstance = mockInstance;
+ }
+
+ public Authorizations getAuths() {
+ return auths;
+ }
+
+ public void setAuths(Authorizations auths) {
+ this.auths = auths;
+ }
+
+
+ public Set> getFetchedColumns() {
+ return fetchedColumns;
+ }
+
+ public void setFetchedColumns(Collection> fetchedColumns) {
+ this.fetchedColumns = new HashSet>();
+ for (Pair columns : fetchedColumns) {
+ this.fetchedColumns.add(columns);
+ }
+ }
+
+ public void setFetchedColumns(Set> fetchedColumns) {
+ this.fetchedColumns = fetchedColumns;
+ }
+
+ public List getIterators() {
+ return iterators;
+ }
+
+ public void setIterators(List iterators) {
+ this.iterators = iterators;
+ }
+
+ public Level getLogLevel() {
+ return level;
+ }
+
+ public void setLogLevel(Level level) {
+ this.level = level;
+ }
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplit.java
new file mode 100644
index 00000000000..269622a767c
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplit.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce.impl;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+
+/**
+ * The Class BatchInputSplit. Encapsulates a set of Accumulo ranges on a single tablet for use in Map Reduce jobs.
+ * Can contain several Ranges per split.
+ */
+public class BatchInputSplit extends AccumuloInputSplit {
+ private Collection ranges;
+ private float[] rangeProgress = null;
+
+ public BatchInputSplit() {
+ ranges = Collections.emptyList();
+ }
+
+ public BatchInputSplit(BatchInputSplit split) throws IOException {
+ super(split);
+ this.setRanges(split.getRanges());
+ }
+
+ public BatchInputSplit(String table, String tableId, Collection ranges, String[] locations) {
+ super(table, tableId, locations);
+ this.ranges = ranges;
+ }
+
+ /**
+ * Save progress on each call to this function, implied by value of currentKey, and return average ranges in the split
+ */
+ public float getProgress(Key currentKey) {
+ if (null == rangeProgress)
+ rangeProgress = new float[ranges.size()];
+
+ float total = 0; // progress per range could be on different scales, this number is "fuzzy"
+
+ if (currentKey == null) {
+ for (float progress : rangeProgress)
+ total += progress;
+ } else {
+ int i = 0;
+ for (Range range : ranges) {
+ if (range.contains(currentKey)) {
+ // find the current range and report as if that is the single range
+ if (range.getStartKey() != null && range.getEndKey() != null) {
+ if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW) != 0) {
+ // just look at the row progress
+ rangeProgress[i] = getProgress(range.getStartKey().getRowData(), range.getEndKey().getRowData(), currentKey.getRowData());
+ } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM) != 0) {
+ // just look at the column family progress
+ rangeProgress[i] = getProgress(range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), currentKey.getColumnFamilyData());
+ } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL) != 0) {
+ // just look at the column qualifier progress
+ rangeProgress[i] = getProgress(range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), currentKey.getColumnQualifierData());
+ }
+ }
+ total += rangeProgress[i];
+ }
+ i++;
+ }
+ }
+
+ return total / ranges.size();
+ }
+
+ /**
+ * This implementation of length is only an estimate, it does not provide exact values. Do not have your code rely on this return value.
+ */
+ @Override
+ public long getLength() throws IOException {
+ long sum = 0;
+ for (Range range : ranges)
+ sum += getRangeLength(range);
+ return sum;
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+
+ int numRanges = in.readInt();
+ ranges = new ArrayList(numRanges);
+ for (int i = 0; i < numRanges; ++i){
+ Range r = new Range();
+ r.readFields(in);
+ ranges.add(r);
+ }
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+
+ out.writeInt(ranges.size());
+ for (Range r: ranges)
+ r.write(out);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder(256);
+ sb.append("BatchInputSplit:");
+ sb.append(" Ranges: ").append(Arrays.asList(ranges));
+ sb.append(" Location: ").append(Arrays.asList(locations));
+ sb.append(" Table: ").append(tableName);
+ sb.append(" TableID: ").append(tableId);
+ sb.append(" InstanceName: ").append(instanceName);
+ sb.append(" zooKeepers: ").append(zooKeepers);
+ sb.append(" principal: ").append(principal);
+ sb.append(" tokenSource: ").append(tokenSource);
+ sb.append(" authenticationToken: ").append(token);
+ sb.append(" authenticationTokenFile: ").append(tokenFile);
+ sb.append(" Authorizations: ").append(auths);
+ sb.append(" fetchColumns: ").append(fetchedColumns);
+ sb.append(" iterators: ").append(iterators);
+ sb.append(" logLevel: ").append(level);
+ return sb.toString();
+ }
+
+ public void setRanges(Collection ranges) {
+ this.ranges = ranges;
+ }
+
+ public Collection getRanges() {
+ return ranges;
+ }
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index b0360fa4f1e..6b8fe344f68 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@ -95,7 +95,7 @@ public static enum ScanOpts {
* @since 1.6.0
*/
public static enum Features {
- AUTO_ADJUST_RANGES, SCAN_ISOLATION, USE_LOCAL_ITERATORS, SCAN_OFFLINE
+ AUTO_ADJUST_RANGES, SCAN_ISOLATION, USE_LOCAL_ITERATORS, SCAN_OFFLINE, BATCH_SCANNER, BATCH_SCANNER_THREADS
}
/**
@@ -516,6 +516,40 @@ public static Boolean isOfflineScan(Class> implementingClass, Configuration co
return conf.getBoolean(enumToConfKey(implementingClass, Features.SCAN_OFFLINE), false);
}
+ /**
+ * Controls the use of the {@link BatchScanner} in this job.
+ * Using this feature will group ranges by their source tablet per InputSplit and use BatchScanner to read them.
+ *
+ *
+ * By default, this feature is disabled.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @since 1.7.0
+ */
+ public static void setBatchScan(Class> implementingClass, Configuration conf, boolean enableFeature) {
+ conf.setBoolean(enumToConfKey(implementingClass, Features.BATCH_SCANNER), enableFeature);
+ }
+
+ /**
+ * Determines whether a configuration has the BatchScanner feature enabled.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return true if the feature is enabled, false otherwise
+ * @since 1.7.0
+ * @see #setBatchScan(Class, Configuration, boolean)
+ */
+ public static Boolean isBatchScan(Class> implementingClass, Configuration conf) {
+ return conf.getBoolean(enumToConfKey(implementingClass, Features.BATCH_SCANNER), false);
+ }
+
/**
* Sets configurations for multiple tables at a time.
*
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
index 19276106679..a14560c5d29 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
@@ -232,8 +232,8 @@ protected void cleanup(Context context) throws IOException, InterruptedException
@Override
public int run(String[] args) throws Exception {
- if (args.length != 5) {
- throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " ");
+ if (args.length != 5 && args.length != 6) {
+ throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " []");
}
String user = args[0];
@@ -242,6 +242,10 @@ public int run(String[] args) throws Exception {
String instanceName = args[3];
String inputFormatClassName = args[4];
+ Boolean batchScan = false;
+ if (args.length == 6)
+ batchScan = Boolean.parseBoolean(args[5]);
+
@SuppressWarnings("unchecked")
Class extends InputFormat,?>> inputFormatClass = (Class extends InputFormat,?>>) Class.forName(inputFormatClassName);
@@ -253,6 +257,7 @@ public int run(String[] args) throws Exception {
AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
AccumuloInputFormat.setInputTableName(job, table);
AccumuloInputFormat.setMockInstance(job, instanceName);
+ AccumuloInputFormat.setBatchScan(job, batchScan);
job.setMapperClass(TestMapper.class);
job.setMapOutputKeyClass(Key.class);
@@ -294,6 +299,27 @@ public void testMap() throws Exception {
assertNull(e2);
}
+ @Test
+ public void testMapWithBatchScanner() throws Exception {
+ final String INSTANCE_NAME = PREFIX + "_mapreduce_instance";
+ final String TEST_TABLE_2 = PREFIX + "_mapreduce_table_2";
+
+ MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
+ Connector c = mockInstance.getConnector("root", new PasswordToken(""));
+ c.tableOperations().create(TEST_TABLE_2);
+ BatchWriter bw = c.createBatchWriter(TEST_TABLE_2, new BatchWriterConfig());
+ for (int i = 0; i < 100; i++) {
+ Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
+ m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
+ bw.addMutation(m);
+ }
+ bw.close();
+
+ Assert.assertEquals(0, MRTester.main(new String[] {"root", "", TEST_TABLE_2, INSTANCE_NAME, AccumuloInputFormat.class.getCanonicalName(), "True"}));
+ assertNull(e1);
+ assertNull(e2);
+ }
+
@Test
public void testCorrectRangeInputSplits() throws Exception {
Job job = Job.getInstance(new Configuration(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplitTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplitTest.java
new file mode 100644
index 00000000000..4f3caf0202c
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/impl/BatchInputSplitTest.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce.impl;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.core.iterators.user.WholeRowIterator;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class BatchInputSplitTest {
+
+ @Test
+ public void testSimpleWritable() throws IOException {
+ Range[] ranges = new Range[] {new Range(new Key("a"), new Key("b"))};
+ BatchInputSplit split = new BatchInputSplit("table", "1", Arrays.asList(ranges), new String[] {"localhost"});
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ DataOutputStream dos = new DataOutputStream(baos);
+ split.write(dos);
+
+ BatchInputSplit newSplit = new BatchInputSplit();
+
+ ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+ DataInputStream dis = new DataInputStream(bais);
+ newSplit.readFields(dis);
+
+ Assert.assertEquals(split.getTableName(), newSplit.getTableName());
+ Assert.assertEquals(split.getTableId(), newSplit.getTableId());
+ Assert.assertEquals(split.getRanges(), newSplit.getRanges());
+ Assert.assertTrue(Arrays.equals(split.getLocations(), newSplit.getLocations()));
+ }
+
+ @Test
+ public void testAllFieldsWritable() throws IOException {
+ Range[] ranges = new Range[] {new Range(new Key("a"), new Key("b"))};
+ BatchInputSplit split = new BatchInputSplit("table", "1", Arrays.asList(ranges), new String[] {"localhost"});
+
+ Set> fetchedColumns = new HashSet>();
+
+ fetchedColumns.add(new Pair(new Text("colf1"), new Text("colq1")));
+ fetchedColumns.add(new Pair(new Text("colf2"), new Text("colq2")));
+
+ // Fake some iterators
+ ArrayList iterators = new ArrayList();
+ IteratorSetting setting = new IteratorSetting(50, SummingCombiner.class);
+ setting.addOption("foo", "bar");
+ iterators.add(setting);
+
+ setting = new IteratorSetting(100, WholeRowIterator.class);
+ setting.addOption("bar", "foo");
+ iterators.add(setting);
+
+ split.setTableName("table");
+ split.setAuths(new Authorizations("foo"));
+ split.setFetchedColumns(fetchedColumns);
+ split.setToken(new PasswordToken("password"));
+ split.setPrincipal("root");
+ split.setMockInstance(true);
+ split.setInstanceName("instance");
+ split.setZooKeepers("localhost");
+ split.setIterators(iterators);
+ split.setLogLevel(Level.WARN);
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ DataOutputStream dos = new DataOutputStream(baos);
+ split.write(dos);
+
+ BatchInputSplit newSplit = new BatchInputSplit();
+
+ ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+ DataInputStream dis = new DataInputStream(bais);
+ newSplit.readFields(dis);
+
+ Assert.assertEquals(split.getRanges(), newSplit.getRanges());
+ Assert.assertArrayEquals(split.getLocations(), newSplit.getLocations());
+
+ Assert.assertEquals(split.getTableName(), newSplit.getTableName());
+ Assert.assertEquals(split.getAuths(), newSplit.getAuths());
+ Assert.assertEquals(split.getFetchedColumns(), newSplit.getFetchedColumns());
+ Assert.assertEquals(split.getToken(), newSplit.getToken());
+ Assert.assertEquals(split.getPrincipal(), newSplit.getPrincipal());
+ Assert.assertEquals(split.getInstanceName(), newSplit.getInstanceName());
+ Assert.assertEquals(split.isMockInstance(), newSplit.isMockInstance());
+ Assert.assertEquals(split.getZooKeepers(), newSplit.getZooKeepers());
+ Assert.assertEquals(split.getIterators(), newSplit.getIterators());
+ Assert.assertEquals(split.getLogLevel(), newSplit.getLogLevel());
+ }
+
+}
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java b/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
index 02a00f8f02b..bc607c3c4a8 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
@@ -32,9 +32,9 @@
import org.apache.accumulo.core.client.ClientConfiguration;
import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.DefaultConfiguration;
@@ -75,7 +75,7 @@ public void before() {
* Tests several different paths through the getSplits() method by setting different properties and verifying the results.
*/
@Test
- public void testGetSplits() throws IOException, AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException {
+ public void testGetSplits() throws Exception {
Connector conn = getConnector();
String table = getUniqueNames(1)[0];
conn.tableOperations().create(table);
@@ -128,7 +128,7 @@ public void testGetSplits() throws IOException, AccumuloSecurityException, Accum
try {
inputFormat.getSplits(job);
fail("An exception should have been thrown");
- } catch (Exception e) {}
+ } catch (IOException e) {}
conn.tableOperations().offline(table);
splits = inputFormat.getSplits(job);
@@ -146,6 +146,49 @@ public void testGetSplits() throws IOException, AccumuloSecurityException, Accum
AccumuloInputFormat.setAutoAdjustRanges(job, false);
splits = inputFormat.getSplits(job);
assertEquals(ranges.size(), splits.size());
+
+ //BatchScan not available for offline scans
+ AccumuloInputFormat.setBatchScan(job, true);
+
+ AccumuloInputFormat.setOfflineTableScan(job, true);
+ try {
+ inputFormat.getSplits(job);
+ fail("An exception should have been thrown");
+ } catch (IOException e) {}
+ AccumuloInputFormat.setOfflineTableScan(job, false);
+
+ // test for resumption of success
+ inputFormat.getSplits(job);
+ assertEquals(2, splits.size());
+
+ //BatchScan not available with isolated iterators
+ AccumuloInputFormat.setScanIsolation(job, true);
+ try {
+ inputFormat.getSplits(job);
+ fail("An exception should have been thrown");
+ } catch (IOException e) {}
+ AccumuloInputFormat.setScanIsolation(job, false);
+
+ // test for resumption of success
+ inputFormat.getSplits(job);
+ assertEquals(2, splits.size());
+
+ //BatchScan not available with local iterators
+ AccumuloInputFormat.setLocalIterators(job, true);
+ try {
+ inputFormat.getSplits(job);
+ fail("An exception should have been thrown");
+ } catch (IOException e) {}
+ AccumuloInputFormat.setLocalIterators(job, false);
+
+ //Check we are getting back correct type pf split
+ conn.tableOperations().online(table);
+ splits = inputFormat.getSplits(job);
+ for (InputSplit split: splits)
+ assert(split instanceof BatchInputSplit);
+
+ //We should divide along the tablet lines similar to when using `setAutoAdjustRanges(job, true)`
+ assertEquals(2, splits.size());
}
private void insertData(String tableName, long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {