Skip to content

Commit

Permalink
Disabled query size estimation in percolator, because this is too exp…
Browse files Browse the repository at this point in the history
…ensive cpu wise.

Lucene's RamUsageEstimator.sizeOf(Object) is to expensive.
Query size estimation will be enabled when a cheaper way of query size estimation can be found.

Closes #5372
Relates to #5339
  • Loading branch information
martijnvg committed Mar 14, 2014
1 parent 7cad9b7 commit 7d84fd6
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 21 deletions.
1 change: 0 additions & 1 deletion pom.xml
Expand Up @@ -988,7 +988,6 @@
<exclude>org/elasticsearch/bootstrap/Bootstrap.class</exclude>
<exclude>org/elasticsearch/Version.class</exclude>
<exclude>org/apache/lucene/queries/XTermsFilter.class</exclude>
<exclude>org/elasticsearch/index/percolator/stats/ShardPercolateService$RamEstimator.class</exclude>
<exclude>org/elasticsearch/index/merge/Merges.class</exclude>
<!-- end excludes for valid system-out -->
<!-- start excludes for Unsafe -->
Expand Down
Expand Up @@ -18,6 +18,7 @@
*/
package org.elasticsearch.index.percolator.stats;

import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
Expand All @@ -37,7 +38,7 @@ public class PercolateStats implements Streamable, ToXContent {
private long percolateCount;
private long percolateTimeInMillis;
private long current;
private long memorySizeInBytes;
private long memorySizeInBytes = -1;
private long numQueries;

/**
Expand Down Expand Up @@ -90,7 +91,9 @@ public long getNumQueries() {
}

/**
* @return The total size the loaded queries take in memory.
* @return Temporarily returns <code>-1</code>, but this used to return the total size the loaded queries take in
* memory, but this is disabled now because the size estimation was too expensive cpu wise. This will be enabled
* again when a cheaper size estimation can be found.
*/
public long getMemorySizeInBytes() {
return memorySizeInBytes;
Expand Down Expand Up @@ -124,7 +127,6 @@ public void add(PercolateStats percolate) {
percolateCount += percolate.getCount();
percolateTimeInMillis += percolate.getTimeInMillis();
current += percolate.getCurrent();
memorySizeInBytes += percolate.getMemorySizeInBytes();
numQueries += percolate.getNumQueries();
}

Expand All @@ -150,7 +152,11 @@ public void readFrom(StreamInput in) throws IOException {
percolateCount = in.readVLong();
percolateTimeInMillis = in.readVLong();
current = in.readVLong();
memorySizeInBytes = in.readVLong();
if (in.getVersion().before(Version.V_1_1_0)) {
in.readVLong();
} else {
in.readLong();
}
numQueries = in.readVLong();
}

Expand All @@ -159,7 +165,11 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(percolateCount);
out.writeVLong(percolateTimeInMillis);
out.writeVLong(current);
out.writeVLong(memorySizeInBytes);
if (out.getVersion().before(Version.V_1_1_0)) {
out.writeVLong(0);
} else {
out.writeLong(-1);
}
out.writeVLong(numQueries);
}
}
Expand Up @@ -20,7 +20,6 @@
package org.elasticsearch.index.percolator.stats;

import org.apache.lucene.search.Query;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.HashedBytesRef;
import org.elasticsearch.common.metrics.CounterMetric;
Expand All @@ -38,7 +37,6 @@
* <li> total time spent in percolate api
* <li> the current number of percolate requests
* <li> number of registered percolate queries
* <li> the estimated amount of memory the registered queries take
* </ul>
*/
public class ShardPercolateService extends AbstractIndexShardComponent {
Expand All @@ -52,7 +50,6 @@ public ShardPercolateService(ShardId shardId, @IndexSettings Settings indexSetti
private final CounterMetric currentMetric = new CounterMetric();

private final CounterMetric numberOfQueries = new CounterMetric();
private final CounterMetric memorySizeInBytes = new CounterMetric();

public void prePercolate() {
currentMetric.inc();
Expand All @@ -64,27 +61,22 @@ public void postPercolate(long tookInNanos) {
}

public void addedQuery(HashedBytesRef id, Query previousQuery, Query newQuery) {
if (previousQuery != null) {
memorySizeInBytes.dec(computeSizeInMemory(id, previousQuery));
} else {
numberOfQueries.inc();
}
memorySizeInBytes.inc(computeSizeInMemory(id, newQuery));
numberOfQueries.inc();
}

public void removedQuery(HashedBytesRef id, Query query) {
numberOfQueries.dec();
memorySizeInBytes.dec(computeSizeInMemory(id, query));
}

/**
* @return The current metrics
*/
public PercolateStats stats() {
return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), memorySizeInBytes.count(), numberOfQueries.count());
return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count());
}

private static long computeSizeInMemory(HashedBytesRef id, Query query) {
// Enable when a more efficient manner is found for estimating the size of a Lucene query.
/*private static long computeSizeInMemory(HashedBytesRef id, Query query) {
long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length;
size += RamEstimator.sizeOf(query);
return size;
Expand All @@ -96,6 +88,6 @@ private static final class RamEstimator {
static long sizeOf(Query query) {
return RamUsageEstimator.sizeOf(query);
}
}
}*/

}
Expand Up @@ -558,7 +558,7 @@ public void testPercolateStatistics() throws Exception {
assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries));
assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies
assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), greaterThan(0l));
assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));

NodesStatsResponse nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
long percolateCount = 0;
Expand All @@ -580,7 +580,7 @@ public void testPercolateStatistics() throws Exception {
assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries * 2));
assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies
assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), greaterThan(0l));
assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));

percolateCount = 0;
nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
Expand Down

0 comments on commit 7d84fd6

Please sign in to comment.