Skip to content

Commit

Permalink
Query Cache: Add hit and miss count
Browse files Browse the repository at this point in the history
closes #7355
  • Loading branch information
kimchy committed Aug 20, 2014
1 parent fb2cd0b commit 6cbfef0
Show file tree
Hide file tree
Showing 6 changed files with 58 additions and 3 deletions.
Expand Up @@ -35,18 +35,24 @@ public class QueryCacheStats implements Streamable, ToXContent {

long memorySize;
long evictions;
long hitCount;
long missCount;

public QueryCacheStats() {
}

public QueryCacheStats(long memorySize, long evictions) {
public QueryCacheStats(long memorySize, long evictions, long hitCount, long missCount) {
this.memorySize = memorySize;
this.evictions = evictions;
this.hitCount = hitCount;
this.missCount = missCount;
}

public void add(QueryCacheStats stats) {
this.memorySize += stats.memorySize;
this.evictions += stats.evictions;
this.hitCount += stats.hitCount;
this.missCount += stats.missCount;
}

public long getMemorySizeInBytes() {
Expand All @@ -61,23 +67,37 @@ public long getEvictions() {
return this.evictions;
}

public long getHitCount() {
return this.hitCount;
}

public long getMissCount() {
return this.missCount;
}

@Override
public void readFrom(StreamInput in) throws IOException {
memorySize = in.readVLong();
evictions = in.readVLong();
hitCount = in.readVLong();
missCount = in.readVLong();
}

@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(memorySize);
out.writeVLong(evictions);
out.writeVLong(hitCount);
out.writeVLong(missCount);
}

@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.QUERY_CACHE_STATS);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
builder.field(Fields.EVICTIONS, getEvictions());
builder.field(Fields.HIT_COUNT, getHitCount());
builder.field(Fields.MISS_COUNT, getMissCount());
builder.endObject();
return builder;
}
Expand All @@ -87,5 +107,7 @@ static final class Fields {
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count");
static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count");
}
}
Expand Up @@ -40,14 +40,24 @@ public class ShardQueryCache extends AbstractIndexShardComponent implements Remo

final CounterMetric evictionsMetric = new CounterMetric();
final CounterMetric totalMetric = new CounterMetric();
final CounterMetric hitCount = new CounterMetric();
final CounterMetric missCount = new CounterMetric();

@Inject
public ShardQueryCache(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}

public QueryCacheStats stats() {
return new QueryCacheStats(totalMetric.count(), evictionsMetric.count());
return new QueryCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count());
}

public void onHit() {
hitCount.inc();
}

public void onMiss() {
missCount.inc();
}

public void onCached(IndicesQueryCache.Key key, BytesReference value) {
Expand Down
Expand Up @@ -99,7 +99,6 @@ public class IndicesQueryCache extends AbstractComponent implements RemovalListe
//TODO make these changes configurable on the cluster level
private volatile String size;
private volatile TimeValue expire;
//TODO expose this in our stats APIs
private volatile Cache<Key, BytesReference> cache;

@Inject
Expand Down Expand Up @@ -218,6 +217,7 @@ public QuerySearchResultProvider load(final ShardSearchRequest request, final Se
Loader loader = new Loader(queryPhase, context, key);
BytesReference value = cache.get(key, loader);
if (loader.isLoaded()) {
key.shard.queryCache().onMiss();
// see if its the first time we see this reader, and make sure to register a cleanup key
CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion());
if (!registeredClosedListeners.containsKey(cleanupKey)) {
Expand All @@ -226,6 +226,8 @@ public QuerySearchResultProvider load(final ShardSearchRequest request, final Se
context.searcher().getIndexReader().addReaderClosedListener(cleanupKey);
}
}
} else {
key.shard.queryCache().onHit();
}

// try and be smart, and reuse an already loaded and constructed QueryResult of in VM execution
Expand Down
Expand Up @@ -129,6 +129,12 @@ Table getTableWithHeader(final RestRequest request) {
table.addCell("query_cache.evictions", "sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
table.addCell("pri.query_cache.evictions", "default:false;text-align:right;desc:query cache evictions");

table.addCell("query_cache.hit_count", "sibling:pri;alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit count");
table.addCell("pri.query_cache.hit_count", "default:false;text-align:right;desc:query cache hit count");

table.addCell("query_cache.miss_count", "sibling:pri;alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss count");
table.addCell("pri.query_cache.miss_count", "default:false;text-align:right;desc:query cache miss count");

table.addCell("flush.total", "sibling:pri;alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
table.addCell("pri.flush.total", "default:false;text-align:right;desc:number of flushes");

Expand Down Expand Up @@ -314,6 +320,12 @@ private Table buildTable(RestRequest request, String[] indices, ClusterHealthRes
table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getEvictions());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getEvictions());

table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getHitCount());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getHitCount());

table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getMissCount());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getMissCount());

table.addCell(indexStats == null ? null : indexStats.getTotal().getFlush().getTotal());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFlush().getTotal());

Expand Down
Expand Up @@ -124,6 +124,8 @@ Table getTableWithHeader(final RestRequest request) {

table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache");
table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
table.addCell("query_cache.hit_count", "alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit counts");
table.addCell("query_cache.miss_count", "alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss counts");

table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
Expand Down Expand Up @@ -231,6 +233,8 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR

table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getMemorySize());
table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getEvictions());
table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getHitCount());
table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getMissCount());

table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotal());
table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotalTime());
Expand Down
Expand Up @@ -27,6 +27,7 @@
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
Expand Down Expand Up @@ -225,10 +226,14 @@ public void testQueryCache() throws Exception {
}

assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), equalTo(0l));
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), equalTo(0l));
for (int i = 0; i < 10; i++) {
assertThat(client().prepareSearch("idx").setSearchType(SearchType.COUNT).get().getHits().getTotalHits(), equalTo((long) numDocs));
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
}
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), greaterThan(0l));
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), greaterThan(0l));

// index the data again...
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
Expand Down

0 comments on commit 6cbfef0

Please sign in to comment.