Skip to content

Commit

Permalink
HBASE-11544 [Ergonomics] hbase.client.scanner.caching is dogged and w…
Browse files Browse the repository at this point in the history
…ill try to return batch even if it means OOME

Added in some check-style fixes to bring us back under the limit

Signed-off-by: stack <stack@apache.org>
  • Loading branch information
Jonathan Lawlor authored and saintstack committed Apr 8, 2015
1 parent a9d7c49 commit 26ba621
Show file tree
Hide file tree
Showing 59 changed files with 1,192 additions and 787 deletions.
Expand Up @@ -159,6 +159,9 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
*/ */
@Deprecated @Deprecated
public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH"; public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
/**
* @deprecated
*/
@Deprecated @Deprecated
private static final Bytes DEFERRED_LOG_FLUSH_KEY = private static final Bytes DEFERRED_LOG_FLUSH_KEY =
new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH)); new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
Expand Down Expand Up @@ -315,6 +318,7 @@ public HTableDescriptor(final TableName name) {
* Construct a table descriptor specifying a byte array table name * Construct a table descriptor specifying a byte array table name
* @param name Table name. * @param name Table name.
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a> * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
* @deprecated
*/ */
@Deprecated @Deprecated
public HTableDescriptor(final byte[] name) { public HTableDescriptor(final byte[] name) {
Expand All @@ -325,6 +329,7 @@ public HTableDescriptor(final byte[] name) {
* Construct a table descriptor specifying a String table name * Construct a table descriptor specifying a String table name
* @param name Table name. * @param name Table name.
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a> * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
* @deprecated
*/ */
@Deprecated @Deprecated
public HTableDescriptor(final String name) { public HTableDescriptor(final String name) {
Expand Down Expand Up @@ -699,13 +704,17 @@ public String getRegionSplitPolicyClassName() {
* Set the name of the table. * Set the name of the table.
* *
* @param name name of table * @param name name of table
* @deprecated
*/ */
@Deprecated @Deprecated
public HTableDescriptor setName(byte[] name) { public HTableDescriptor setName(byte[] name) {
setName(TableName.valueOf(name)); setName(TableName.valueOf(name));
return this; return this;
} }


/**
* @deprecated
*/
@Deprecated @Deprecated
public HTableDescriptor setName(TableName name) { public HTableDescriptor setName(TableName name) {
this.name = name; this.name = name;
Expand Down Expand Up @@ -1340,6 +1349,7 @@ public void removeCoprocessor(String className) {
* @param rootdir qualified path of HBase root directory * @param rootdir qualified path of HBase root directory
* @param tableName name of table * @param tableName name of table
* @return {@link Path} for table * @return {@link Path} for table
* @deprecated
*/ */
@Deprecated @Deprecated
public static Path getTableDir(Path rootdir, final byte [] tableName) { public static Path getTableDir(Path rootdir, final byte [] tableName) {
Expand All @@ -1353,6 +1363,7 @@ public static Path getTableDir(Path rootdir, final byte [] tableName) {
/** Table descriptor for <code>hbase:meta</code> catalog table /** Table descriptor for <code>hbase:meta</code> catalog table
* Deprecated, use TableDescriptors#get(TableName.META_TABLE) or * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
* Admin#getTableDescriptor(TableName.META_TABLE) instead. * Admin#getTableDescriptor(TableName.META_TABLE) instead.
* @deprecated
*/ */
@Deprecated @Deprecated
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
Expand Down Expand Up @@ -1412,12 +1423,18 @@ public static Path getTableDir(Path rootdir, final byte [] tableName) {
.setCacheDataInL1(true) .setCacheDataInL1(true)
}); });


/**
* @deprecated
*/
@Deprecated @Deprecated
public HTableDescriptor setOwner(User owner) { public HTableDescriptor setOwner(User owner) {
return setOwnerString(owner != null ? owner.getShortName() : null); return setOwnerString(owner != null ? owner.getShortName() : null);
} }


// used by admin.rb:alter(table_name,*args) to update owner. /**
* used by admin.rb:alter(table_name,*args) to update owner.
* @deprecated
*/
@Deprecated @Deprecated
public HTableDescriptor setOwnerString(String ownerString) { public HTableDescriptor setOwnerString(String ownerString) {
if (ownerString != null) { if (ownerString != null) {
Expand All @@ -1428,6 +1445,9 @@ public HTableDescriptor setOwnerString(String ownerString) {
return this; return this;
} }


/**
* @deprecated
*/
@Deprecated @Deprecated
public String getOwnerString() { public String getOwnerString() {
if (getValue(OWNER_KEY) != null) { if (getValue(OWNER_KEY) != null) {
Expand Down
Expand Up @@ -401,6 +401,9 @@ protected void loadCache() throws IOException {
// happens for the cases where we see exceptions. Since only openScanner // happens for the cases where we see exceptions. Since only openScanner
// would have happened, values would be null // would have happened, values would be null
if (values == null && callable.switchedToADifferentReplica()) { if (values == null && callable.switchedToADifferentReplica()) {
// Any accumulated partial results are no longer valid since the callable will
// openScanner with the correct startkey and we must pick up from there
clearPartialResults();
this.currentRegion = callable.getHRegionInfo(); this.currentRegion = callable.getHRegionInfo();
continue; continue;
} }
Expand Down
Expand Up @@ -292,21 +292,39 @@ private int addCallsForOtherReplicas(
continue; //this was already scheduled earlier continue; //this was already scheduled earlier
} }
ScannerCallable s = currentScannerCallable.getScannerCallableForReplica(id); ScannerCallable s = currentScannerCallable.getScannerCallableForReplica(id);

setStartRowForReplicaCallable(s);
if (this.lastResult != null) {
if(s.getScan().isReversed()){
s.getScan().setStartRow(createClosestRowBefore(this.lastResult.getRow()));
}else {
s.getScan().setStartRow(Bytes.add(this.lastResult.getRow(), new byte[1]));
}
}
outstandingCallables.add(s); outstandingCallables.add(s);
RetryingRPC retryingOnReplica = new RetryingRPC(s); RetryingRPC retryingOnReplica = new RetryingRPC(s);
cs.submit(retryingOnReplica, scannerTimeout, id); cs.submit(retryingOnReplica, scannerTimeout, id);
} }
return max - min + 1; return max - min + 1;
} }


/**
* Set the start row for the replica callable based on the state of the last result received.
* @param callable The callable to set the start row on
*/
private void setStartRowForReplicaCallable(ScannerCallable callable) {
if (this.lastResult == null || callable == null) return;

if (this.lastResult.isPartial()) {
// The last result was a partial result which means we have not received all of the cells
// for this row. Thus, use the last result's row as the start row. If a replica switch
// occurs, the scanner will ensure that any accumulated partial results are cleared,
// and the scan can resume from this row.
callable.getScan().setStartRow(this.lastResult.getRow());
} else {
// The last result was not a partial result which means it contained all of the cells for
// that row (we no longer need any information from it). Set the start row to the next
// closest row that could be seen.
if (callable.getScan().isReversed()) {
callable.getScan().setStartRow(createClosestRowBefore(this.lastResult.getRow()));
} else {
callable.getScan().setStartRow(Bytes.add(this.lastResult.getRow(), new byte[1]));
}
}
}

@VisibleForTesting @VisibleForTesting
boolean isAnyRPCcancelled() { boolean isAnyRPCcancelled() {
return someRPCcancelled; return someRPCcancelled;
Expand Down
Expand Up @@ -574,6 +574,7 @@ public static long estimatedHeapSizeOf(final Cell cell) {
* backwards compatible with estimations done by older clients. We need to * backwards compatible with estimations done by older clients. We need to
* pretend that tags never exist and cells aren't serialized with tag * pretend that tags never exist and cells aren't serialized with tag
* length included. See HBASE-13262 and HBASE-13303 * length included. See HBASE-13262 and HBASE-13303
* @deprecated See above comment
*/ */
@Deprecated @Deprecated
public static long estimatedHeapSizeOfWithoutTags(final Cell cell) { public static long estimatedHeapSizeOfWithoutTags(final Cell cell) {
Expand Down
19 changes: 11 additions & 8 deletions hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
Expand Up @@ -391,7 +391,7 @@ public enum OperationStatusCode {


/** /**
* The hbase:meta table's name. * The hbase:meta table's name.
* * @deprecated For 0.94 to 0.96 compatibility. Replaced by define in TableName
*/ */
@Deprecated // for compat from 0.94 -> 0.96. @Deprecated // for compat from 0.94 -> 0.96.
public static final byte[] META_TABLE_NAME = TableName.META_TABLE_NAME.getName(); public static final byte[] META_TABLE_NAME = TableName.META_TABLE_NAME.getName();
Expand Down Expand Up @@ -579,7 +579,7 @@ public enum OperationStatusCode {
* 1, 2, 3, 5, 10, 20, 40, 100, 100, 100. * 1, 2, 3, 5, 10, 20, 40, 100, 100, 100.
* With 100ms, a back-off of 200 means 20s * With 100ms, a back-off of 200 means 20s
*/ */
public static final int RETRY_BACKOFF[] = {1, 2, 3, 5, 10, 20, 40, 100, 100, 100, 100, 200, 200}; public static final int [] RETRY_BACKOFF = {1, 2, 3, 5, 10, 20, 40, 100, 100, 100, 100, 200, 200};


public static final String REGION_IMPL = "hbase.hregion.impl"; public static final String REGION_IMPL = "hbase.hregion.impl";


Expand Down Expand Up @@ -780,7 +780,8 @@ public static enum Modify {
/** /**
* timeout for short operation RPC * timeout for short operation RPC
*/ */
public static final String HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY = "hbase.rpc.shortoperation.timeout"; public static final String HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY =
"hbase.rpc.shortoperation.timeout";


/** /**
* Default value of {@link #HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY} * Default value of {@link #HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY}
Expand Down Expand Up @@ -835,8 +836,8 @@ public static enum Modify {
*/ */
public static final float HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD = 0.2f; public static final float HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD = 0.2f;


public static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile public static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile(
("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); "^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
public static final Pattern CP_HTD_ATTR_VALUE_PATTERN = public static final Pattern CP_HTD_ATTR_VALUE_PATTERN =
Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");


Expand Down Expand Up @@ -889,7 +890,7 @@ public static enum Modify {
* 1 => Abort only all of the handers have died * 1 => Abort only all of the handers have died
*/ */
public static final String REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT = public static final String REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT =
"hbase.regionserver.handler.abort.on.error.percent"; "hbase.regionserver.handler.abort.on.error.percent";
public static final double DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT = 0.5; public static final double DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT = 0.5;


//High priority handlers to deal with admin requests and system table operation requests //High priority handlers to deal with admin requests and system table operation requests
Expand Down Expand Up @@ -949,7 +950,8 @@ public static enum Modify {
public static final String DEFAULT_WAL_STORAGE_POLICY = "NONE"; public static final String DEFAULT_WAL_STORAGE_POLICY = "NONE";


/** Region in Transition metrics threshold time */ /** Region in Transition metrics threshold time */
public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD="hbase.metrics.rit.stuck.warning.threshold"; public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD =
"hbase.metrics.rit.stuck.warning.threshold";


public static final String LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop"; public static final String LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop";


Expand Down Expand Up @@ -1044,7 +1046,8 @@ public static enum Modify {
* 0.0.0.0. * 0.0.0.0.
* @see <a href="https://issues.apache.org/jira/browse/HBASE-9961">HBASE-9961</a> * @see <a href="https://issues.apache.org/jira/browse/HBASE-9961">HBASE-9961</a>
*/ */
public static final String STATUS_MULTICAST_BIND_ADDRESS = "hbase.status.multicast.bind.address.ip"; public static final String STATUS_MULTICAST_BIND_ADDRESS =
"hbase.status.multicast.bind.address.ip";
public static final String DEFAULT_STATUS_MULTICAST_BIND_ADDRESS = "0.0.0.0"; public static final String DEFAULT_STATUS_MULTICAST_BIND_ADDRESS = "0.0.0.0";


/** /**
Expand Down
Expand Up @@ -46,7 +46,6 @@
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
import org.apache.hadoop.hbase.regionserver.OperationStatus; import org.apache.hadoop.hbase.regionserver.OperationStatus;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.RegionScanner;
Expand Down Expand Up @@ -137,7 +136,7 @@ public void delete(RpcController controller, BulkDeleteRequest request,
List<List<Cell>> deleteRows = new ArrayList<List<Cell>>(rowBatchSize); List<List<Cell>> deleteRows = new ArrayList<List<Cell>>(rowBatchSize);
for (int i = 0; i < rowBatchSize; i++) { for (int i = 0; i < rowBatchSize; i++) {
List<Cell> results = new ArrayList<Cell>(); List<Cell> results = new ArrayList<Cell>();
hasMore = NextState.hasMoreValues(scanner.next(results)); hasMore = scanner.next(results);
if (results.size() > 0) { if (results.size() > 0) {
deleteRows.add(results); deleteRows.add(results);
} }
Expand Down
Expand Up @@ -34,7 +34,6 @@
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;


import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcCallback;
Expand Down Expand Up @@ -81,7 +80,7 @@ public void getRowCount(RpcController controller, ExampleProtos.CountRequest req
byte[] lastRow = null; byte[] lastRow = null;
long count = 0; long count = 0;
do { do {
hasMore = NextState.hasMoreValues(scanner.next(results)); hasMore = scanner.next(results);
for (Cell kv : results) { for (Cell kv : results) {
byte[] currentRow = CellUtil.cloneRow(kv); byte[] currentRow = CellUtil.cloneRow(kv);
if (lastRow == null || !Bytes.equals(lastRow, currentRow)) { if (lastRow == null || !Bytes.equals(lastRow, currentRow)) {
Expand Down Expand Up @@ -120,7 +119,7 @@ public void getKeyValueCount(RpcController controller, ExampleProtos.CountReques
boolean hasMore = false; boolean hasMore = false;
long count = 0; long count = 0;
do { do {
hasMore = NextState.hasMoreValues(scanner.next(results)); hasMore = scanner.next(results);
for (Cell kv : results) { for (Cell kv : results) {
count++; count++;
} }
Expand Down
Expand Up @@ -22,16 +22,17 @@
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;


import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.NoLimitScannerContext;
import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.mortbay.log.Log; import org.mortbay.log.Log;


Expand Down Expand Up @@ -72,10 +73,7 @@ public ClientSideRegionScanner(Configuration conf, FileSystem fs,
public Result next() throws IOException { public Result next() throws IOException {
values.clear(); values.clear();


// negative values indicate no limits scanner.nextRaw(values, NoLimitScannerContext.NO_LIMIT);
final long remainingResultSize = -1;
final int batchLimit = -1;
scanner.nextRaw(values, batchLimit, remainingResultSize);
if (values.isEmpty()) { if (values.isEmpty()) {
//we are done //we are done
return null; return null;
Expand Down
Expand Up @@ -38,7 +38,6 @@
import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse; import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse;
import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService;
import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;


import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
import com.google.protobuf.Message; import com.google.protobuf.Message;
Expand Down Expand Up @@ -92,7 +91,7 @@ public void getMax(RpcController controller, AggregateRequest request,
// qualifier can be null. // qualifier can be null.
boolean hasMoreRows = false; boolean hasMoreRows = false;
do { do {
hasMoreRows = NextState.hasMoreValues(scanner.next(results)); hasMoreRows = scanner.next(results);
int listSize = results.size(); int listSize = results.size();
for (int i = 0; i < listSize; i++) { for (int i = 0; i < listSize; i++) {
temp = ci.getValue(colFamily, qualifier, results.get(i)); temp = ci.getValue(colFamily, qualifier, results.get(i));
Expand Down Expand Up @@ -146,7 +145,7 @@ public void getMin(RpcController controller, AggregateRequest request,
} }
boolean hasMoreRows = false; boolean hasMoreRows = false;
do { do {
hasMoreRows = NextState.hasMoreValues(scanner.next(results)); hasMoreRows = scanner.next(results);
int listSize = results.size(); int listSize = results.size();
for (int i = 0; i < listSize; i++) { for (int i = 0; i < listSize; i++) {
temp = ci.getValue(colFamily, qualifier, results.get(i)); temp = ci.getValue(colFamily, qualifier, results.get(i));
Expand Down Expand Up @@ -200,7 +199,7 @@ public void getSum(RpcController controller, AggregateRequest request,
List<Cell> results = new ArrayList<Cell>(); List<Cell> results = new ArrayList<Cell>();
boolean hasMoreRows = false; boolean hasMoreRows = false;
do { do {
hasMoreRows = NextState.hasMoreValues(scanner.next(results)); hasMoreRows = scanner.next(results);
int listSize = results.size(); int listSize = results.size();
for (int i = 0; i < listSize; i++) { for (int i = 0; i < listSize; i++) {
temp = ci.getValue(colFamily, qualifier, results.get(i)); temp = ci.getValue(colFamily, qualifier, results.get(i));
Expand Down Expand Up @@ -254,7 +253,7 @@ public void getRowNum(RpcController controller, AggregateRequest request,
scanner = env.getRegion().getScanner(scan); scanner = env.getRegion().getScanner(scan);
boolean hasMoreRows = false; boolean hasMoreRows = false;
do { do {
hasMoreRows = NextState.hasMoreValues(scanner.next(results)); hasMoreRows = scanner.next(results);
if (results.size() > 0) { if (results.size() > 0) {
counter++; counter++;
} }
Expand Down Expand Up @@ -313,7 +312,7 @@ public void getAvg(RpcController controller, AggregateRequest request,


do { do {
results.clear(); results.clear();
hasMoreRows = NextState.hasMoreValues(scanner.next(results)); hasMoreRows = scanner.next(results);
int listSize = results.size(); int listSize = results.size();
for (int i = 0; i < listSize; i++) { for (int i = 0; i < listSize; i++) {
sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily,
Expand Down Expand Up @@ -374,7 +373,7 @@ public void getStd(RpcController controller, AggregateRequest request,


do { do {
tempVal = null; tempVal = null;
hasMoreRows = NextState.hasMoreValues(scanner.next(results)); hasMoreRows = scanner.next(results);
int listSize = results.size(); int listSize = results.size();
for (int i = 0; i < listSize; i++) { for (int i = 0; i < listSize; i++) {
tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily,
Expand Down Expand Up @@ -441,7 +440,7 @@ public void getMedian(RpcController controller, AggregateRequest request,
do { do {
tempVal = null; tempVal = null;
tempWeight = null; tempWeight = null;
hasMoreRows = NextState.hasMoreValues(scanner.next(results)); hasMoreRows = scanner.next(results);
int listSize = results.size(); int listSize = results.size();
for (int i = 0; i < listSize; i++) { for (int i = 0; i < listSize; i++) {
Cell kv = results.get(i); Cell kv = results.get(i);
Expand Down

0 comments on commit 26ba621

Please sign in to comment.