Skip to content

Commit

Permalink
Added conditional to key bounds decomposition function to detect when…
Browse files Browse the repository at this point in the history
… it is an "all the things" (full key bounds) query, eliminating a problem with heavy memory footprint when large number of key ranges are generated and then converted into one large query string.
  • Loading branch information
metasim committed Apr 26, 2017
1 parent 3c37af6 commit dce7022
Showing 1 changed file with 4 additions and 2 deletions.
Expand Up @@ -48,10 +48,12 @@ class AccumuloLayerReader(val attributeStore: AttributeStore)(implicit sc: Spark

val queryKeyBounds = tileQuery(metadata)

val decompose = (bounds: KeyBounds[K]) =>
keyIndex.indexRanges(bounds).map { case (min, max) =>
val decompose = (bounds: KeyBounds[K]) => {
if(queryKeyBounds.size == 1 && queryKeyBounds.head == bounds) Seq(new AccumuloRange())
else keyIndex.indexRanges(bounds).map { case (min, max) =>
new AccumuloRange(new Text(AccumuloKeyEncoder.long2Bytes(min)), new Text(AccumuloKeyEncoder.long2Bytes(max)))
}
}

val rdd = AccumuloRDDReader.read[K, V](header.tileTable, columnFamily(id), queryKeyBounds, decompose, filterIndexOnly, Some(writerSchema))
new ContextRDD(rdd, metadata)
Expand Down

0 comments on commit dce7022

Please sign in to comment.