diff --git a/docs/changelog/130857.yaml b/docs/changelog/130857.yaml new file mode 100644 index 0000000000000..5e5a5a309dfeb --- /dev/null +++ b/docs/changelog/130857.yaml @@ -0,0 +1,6 @@ +pr: 130857 +summary: Improving statsByShard performance when the number of shards is very large +area: Stats +type: bug +issues: + - 97222 diff --git a/docs/changelog/138126.yaml b/docs/changelog/138126.yaml new file mode 100644 index 0000000000000..e69e2ad20f67a --- /dev/null +++ b/docs/changelog/138126.yaml @@ -0,0 +1,6 @@ +pr: 138126 +summary: Fix stats performance +area: Stats +type: bug +issues: + - 97222 diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 94936a3c1b320..ee1df8d041a4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.common.util.CancellableSingleObjectCache; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.FixForMultiProject; @@ -48,6 +49,8 @@ import org.elasticsearch.index.seqno.RetentionLeaseStats; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeService; @@ -75,6 +78,7 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; import java.util.function.BooleanSupplier; +import java.util.function.Supplier; import java.util.stream.Collectors; /** @@ -260,9 +264,13 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false ); + Supplier> shardIdToSharedRam = CachedSupplier.wrap( + () -> IndicesQueryCache.getSharedRamSizeForAllShards(indicesService) + ); List shardsStats = new ArrayList<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { + // get the shared ram for this shard id (or zero if there's nothing in the map) cancellableTask.ensureNotCancelled(); if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { // only report on fully started shards @@ -283,7 +291,12 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq new ShardStats( indexShard.routingEntry(), indexShard.shardPath(), - CommonStats.getShardLevelStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), + CommonStats.getShardLevelStats( + indicesService.getIndicesQueryCache(), + indexShard, + SHARD_STATS_FLAGS, + () -> shardIdToSharedRam.get().getOrDefault(indexShard.shardId(), 0L) + ), commitStats, seqNoStats, retentionLeaseStats, @@ -314,7 +327,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq clusterStatus, nodeInfo, nodeStats, - shardsStats.toArray(new ShardStats[shardsStats.size()]), + shardsStats.toArray(new ShardStats[0]), searchUsageStats, repositoryUsageStats, ccsTelemetry, @@ -476,7 +489,7 @@ protected void sendItemRequest(String clusterAlias, ActionListener v.acceptResponse(response)); + remoteClustersStats.computeIfPresent(clusterAlias, (ignored, v) -> v.acceptResponse(response)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 0fba9bdcd0a92..8c83cc80c5137 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -46,6 +46,7 @@ import java.io.IOException; import java.util.Objects; +import java.util.function.Supplier; public class CommonStats implements Writeable, ToXContentFragment { @@ -154,7 +155,12 @@ public CommonStats(CommonStatsFlags flags) { /** * Filters the given flags for {@link CommonStatsFlags#SHARD_LEVEL} flags and calculates the corresponding statistics. */ - public static CommonStats getShardLevelStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) { + public static CommonStats getShardLevelStats( + IndicesQueryCache indicesQueryCache, + IndexShard indexShard, + CommonStatsFlags flags, + Supplier precomputedSharedRam + ) { // Filter shard level flags CommonStatsFlags filteredFlags = flags.clone(); for (CommonStatsFlags.Flag flag : filteredFlags.getFlags()) { @@ -174,7 +180,7 @@ public static CommonStats getShardLevelStats(IndicesQueryCache indicesQueryCache case Refresh -> stats.refresh = indexShard.refreshStats(); case Flush -> stats.flush = indexShard.flushStats(); case Warmer -> stats.warmer = indexShard.warmerStats(); - case QueryCache -> stats.queryCache = indicesQueryCache.getStats(indexShard.shardId()); + case QueryCache -> stats.queryCache = indicesQueryCache.getStats(indexShard.shardId(), precomputedSharedRam); case FieldData -> stats.fieldData = indexShard.fieldDataStats(flags.fieldDataFields()); case Completion -> stats.completion = indexShard.completionStats(flags.completionDataFields()); case Segments -> stats.segments = indexShard.segmentStats( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 77b6264671063..0728e8f9edbae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -22,11 +22,13 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.seqno.RetentionLeaseStats; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; @@ -35,12 +37,13 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.function.Supplier; public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< IndicesStatsRequest, IndicesStatsResponse, ShardStats, - Void> { + Supplier> { private final IndicesService indicesService; private final ProjectResolver projectResolver; @@ -112,19 +115,32 @@ protected IndicesStatsRequest readRequestFrom(StreamInput in) throws IOException return new IndicesStatsRequest(in); } + @Override + protected Supplier createNodeContext() { + return CachedSupplier.wrap(() -> IndicesQueryCache.getCacheTotalsForAllShards(indicesService)); + } + @Override protected void shardOperation( IndicesStatsRequest request, ShardRouting shardRouting, Task task, - Void nodeContext, + Supplier context, ActionListener listener ) { ActionListener.completeWith(listener, () -> { assert task instanceof CancellableTask; IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); - CommonStats commonStats = CommonStats.getShardLevelStats(indicesService.getIndicesQueryCache(), indexShard, request.flags()); + CommonStats commonStats = CommonStats.getShardLevelStats( + indicesService.getIndicesQueryCache(), + indexShard, + request.flags(), + () -> { + final IndicesQueryCache queryCache = indicesService.getIndicesQueryCache(); + return (queryCache == null) ? 0L : queryCache.getSharedRamSizeForShard(indexShard.shardId(), context.get()); + } + ); CommitStats commitStats; SeqNoStats seqNoStats; RetentionLeaseStats retentionLeaseStats; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 6c4694b4f8235..1f7977ee41de0 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -26,17 +26,21 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Predicates; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import java.io.Closeable; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.IdentityHashMap; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; +import java.util.function.Supplier; public class IndicesQueryCache implements QueryCache, Closeable { @@ -67,6 +71,38 @@ public class IndicesQueryCache implements QueryCache, Closeable { private final Map shardStats = new ConcurrentHashMap<>(); private volatile long sharedRamBytesUsed; + /** + * Calculates a map of {@link ShardId} to {@link Long} which contains the calculated share of the {@link IndicesQueryCache} shared ram + * size for a given shard (that is, the sum of all the longs is the size of the indices query cache). Since many shards will not + * participate in the cache, shards whose calculated share is zero will not be contained in the map at all. As a consequence, the + * correct pattern for using the returned map will be via {@link Map#getOrDefault(Object, Object)} with a {@code defaultValue} of + * {@code 0L}. + */ + public static Map getSharedRamSizeForAllShards(IndicesService indicesService) { + Map shardIdToSharedRam = new HashMap<>(); + IndicesQueryCache.CacheTotals cacheTotals = IndicesQueryCache.getCacheTotalsForAllShards(indicesService); + for (IndexService indexService : indicesService) { + for (IndexShard indexShard : indexService) { + final var queryCache = indicesService.getIndicesQueryCache(); + long sharedRam = (queryCache == null) ? 0L : queryCache.getSharedRamSizeForShard(indexShard.shardId(), cacheTotals); + // as a size optimization, only store non-zero values in the map + if (sharedRam > 0L) { + shardIdToSharedRam.put(indexShard.shardId(), sharedRam); + } + } + } + return Collections.unmodifiableMap(shardIdToSharedRam); + } + + public long getCacheSizeForShard(ShardId shardId) { + Stats stats = shardStats.get(shardId); + return stats != null ? stats.cacheSize : 0L; + } + + public long getSharedRamBytesUsed() { + return sharedRamBytesUsed; + } + // This is a hack for the fact that the close listener for the // ShardCoreKeyMap will be called before onDocIdSetEviction // See onDocIdSetEviction for more info @@ -89,40 +125,58 @@ private static QueryCacheStats toQueryCacheStatsSafe(@Nullable Stats stats) { return stats == null ? new QueryCacheStats() : stats.toQueryCacheStats(); } - private long getShareOfAdditionalRamBytesUsed(long itemsInCacheForShard) { - if (sharedRamBytesUsed == 0L) { - return 0L; - } - - /* - * We have some shared ram usage that we try to distribute proportionally to the number of segment-requests in the cache for each - * shard. - */ - // TODO avoid looping over all local shards here - see https://github.com/elastic/elasticsearch/issues/97222 + /** + * This computes the total cache size in bytes, and the total shard count in the cache for all shards. + * @param indicesService + * @return A CacheTotals object containing the computed total number of items in the cache and the number of shards seen in the cache + */ + public static CacheTotals getCacheTotalsForAllShards(IndicesService indicesService) { + IndicesQueryCache queryCache = indicesService.getIndicesQueryCache(); + boolean hasQueryCache = queryCache != null; long totalItemsInCache = 0L; int shardCount = 0; - if (itemsInCacheForShard == 0L) { - for (final var stats : shardStats.values()) { - shardCount += 1; - if (stats.cacheSize > 0L) { - // some shard has nonzero cache footprint, so we apportion the shared size by cache footprint, and this shard has none - return 0L; - } - } - } else { - // branchless loop for the common case - for (final var stats : shardStats.values()) { - shardCount += 1; - totalItemsInCache += stats.cacheSize; + for (final IndexService indexService : indicesService) { + for (final IndexShard indexShard : indexService) { + final var shardId = indexShard.shardId(); + long cacheSize = hasQueryCache ? queryCache.getCacheSizeForShard(shardId) : 0L; + shardCount++; + assert cacheSize >= 0 : "Unexpected cache size of " + cacheSize + " for shard " + shardId; + totalItemsInCache += cacheSize; } } + return new CacheTotals(totalItemsInCache, shardCount); + } + public static long getSharedRamSizeForShard(IndicesService indicesService, ShardId shardId) { + IndicesQueryCache.CacheTotals cacheTotals = IndicesQueryCache.getCacheTotalsForAllShards(indicesService); + final var queryCache = indicesService.getIndicesQueryCache(); + return (queryCache == null) ? 0L : queryCache.getSharedRamSizeForShard(shardId, cacheTotals); + } + + /** + * This method computes the shared RAM size in bytes for the given indexShard. + * @param shardId The shard to compute the shared RAM size for + * @param cacheTotals Shard totals computed in getCacheTotalsForAllShards() + * @return the shared RAM size in bytes allocated to the given shard, or 0 if unavailable + */ + public long getSharedRamSizeForShard(ShardId shardId, CacheTotals cacheTotals) { + long sharedRamBytesUsed = getSharedRamBytesUsed(); + if (sharedRamBytesUsed == 0L) { + return 0L; + } + + int shardCount = cacheTotals.shardCount(); if (shardCount == 0) { // Sometimes it's not possible to do this when there are no shard entries at all, which can happen as the shared ram usage can // extend beyond the closing of all shards. return 0L; } - + /* + * We have some shared ram usage that we try to distribute proportionally to the number of segment-requests in the cache for each + * shard. + */ + long totalItemsInCache = cacheTotals.totalItemsInCache(); + long itemsInCacheForShard = getCacheSizeForShard(shardId); final long additionalRamBytesUsed; if (totalItemsInCache == 0) { // all shards have zero cache footprint, so we apportion the size of the shared bytes equally across all shards @@ -143,10 +197,12 @@ private long getShareOfAdditionalRamBytesUsed(long itemsInCacheForShard) { return additionalRamBytesUsed; } + public record CacheTotals(long totalItemsInCache, int shardCount) {} + /** Get usage statistics for the given shard. */ - public QueryCacheStats getStats(ShardId shard) { + public QueryCacheStats getStats(ShardId shard, Supplier precomputedSharedRamBytesUsed) { final QueryCacheStats queryCacheStats = toQueryCacheStatsSafe(shardStats.get(shard)); - queryCacheStats.addRamBytesUsed(getShareOfAdditionalRamBytesUsed(queryCacheStats.getCacheSize())); + queryCacheStats.addRamBytesUsed(precomputedSharedRamBytesUsed.get()); return queryCacheStats; } @@ -243,7 +299,7 @@ QueryCacheStats toQueryCacheStats() { public String toString() { return "{shardId=" + shardId - + ", ramBytedUsed=" + + ", ramBytesUsed=" + ramBytesUsed + ", hitCount=" + hitCount @@ -340,11 +396,7 @@ protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { shardStats.cacheCount += 1; shardStats.ramBytesUsed += ramBytesUsed; - StatsAndCount statsAndCount = stats2.get(readerCoreKey); - if (statsAndCount == null) { - statsAndCount = new StatsAndCount(shardStats); - stats2.put(readerCoreKey, statsAndCount); - } + StatsAndCount statsAndCount = stats2.computeIfAbsent(readerCoreKey, ignored -> new StatsAndCount(shardStats)); statsAndCount.count += 1; } @@ -357,7 +409,7 @@ protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sum if (numEntries > 0) { // We can't use ShardCoreKeyMap here because its core closed // listener is called before the listener of the cache which - // triggers this eviction. So instead we use use stats2 that + // triggers this eviction. So instead we use stats2 that // we only evict when nothing is cached anymore on the segment // instead of relying on close listeners final StatsAndCount statsAndCount = stats2.get(readerCoreKey); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 9c7f1d277bf9d..7d09b203722fb 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -187,6 +187,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; +import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -520,33 +521,40 @@ static Map statsByIndex(final IndicesService indicesService, } static Map> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { + Map shardIdToSharedRam = IndicesQueryCache.getSharedRamSizeForAllShards(indicesService); final Map> statsByShard = new HashMap<>(); - for (final IndexService indexService : indicesService) { for (final IndexShard indexShard : indexService) { + // get the shared ram for this shard id (or zero if there's nothing in the map) try { - final IndexShardStats indexShardStats = indicesService.indexShardStats(indicesService, indexShard, flags); - + final IndexShardStats indexShardStats = indicesService.indexShardStats( + indicesService, + indexShard, + flags, + () -> shardIdToSharedRam.getOrDefault(indexShard.shardId(), 0L) + ); if (indexShardStats == null) { continue; } - if (statsByShard.containsKey(indexService.index()) == false) { statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); } else { statsByShard.get(indexService.index()).add(indexShardStats); } } catch (IllegalIndexShardStateException | AlreadyClosedException e) { - // we can safely ignore illegal state on ones that are closing for example logger.trace(() -> format("%s ignoring shard stats", indexShard.shardId()), e); } } } - return statsByShard; } - IndexShardStats indexShardStats(final IndicesService indicesService, final IndexShard indexShard, final CommonStatsFlags flags) { + IndexShardStats indexShardStats( + final IndicesService indicesService, + final IndexShard indexShard, + final CommonStatsFlags flags, + final Supplier precomputedSharedRam + ) { if (indexShard.routingEntry() == null) { return null; } @@ -571,7 +579,7 @@ IndexShardStats indexShardStats(final IndicesService indicesService, final Index new ShardStats( indexShard.routingEntry(), indexShard.shardPath(), - CommonStats.getShardLevelStats(indicesService.getIndicesQueryCache(), indexShard, flags), + CommonStats.getShardLevelStats(indicesService.getIndicesQueryCache(), indexShard, flags, precomputedSharedRam), commitStats, seqNoStats, retentionLeaseStats, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index fbd6e0916eefe..990ed39ca1e4e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -115,7 +115,7 @@ public void testCreation() { ShardStats shardStats = new ShardStats( shardRouting, new ShardPath(false, path, path, shardRouting.shardId()), - CommonStats.getShardLevelStats(null, indexShard, new CommonStatsFlags(CommonStatsFlags.Flag.Store)), + CommonStats.getShardLevelStats(null, indexShard, new CommonStatsFlags(CommonStatsFlags.Flag.Store), () -> 0L), null, null, null, diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 25ee257c3eeb5..13612c745ce12 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1626,7 +1626,7 @@ public void testShardStats() throws IOException { ShardStats stats = new ShardStats( shard.routingEntry(), shard.shardPath(), - CommonStats.getShardLevelStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), + CommonStats.getShardLevelStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags(), () -> 0L), shard.commitStats(), shard.seqNoStats(), shard.getRetentionLeaseStats(), diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index 5629fe81511c6..7070a2ca0895f 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -30,8 +30,11 @@ import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -44,6 +47,8 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class IndicesQueryCacheTests extends ESTestCase { @@ -56,10 +61,6 @@ private static class DummyQuery extends Query implements Accountable { this(Integer.toString(id), 10); } - DummyQuery(String id) { - this(id, 10); - } - DummyQuery(String id, long sizeInCache) { this.id = id; this.sizeInCache = sizeInCache; @@ -86,10 +87,10 @@ public String toString(String field) { } @Override - public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) { return new ConstantScoreWeight(this, boost) { @Override - public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { + public ScorerSupplier scorerSupplier(LeafReaderContext context) { Scorer scorer = new ConstantScoreScorer(score(), scoreMode, DocIdSetIterator.all(context.reader().maxDoc())); return new DefaultScorerSupplier(scorer); } @@ -125,7 +126,7 @@ public void testBasics() throws IOException { IndicesQueryCache cache = new IndicesQueryCache(settings); s.setQueryCache(cache); - QueryCacheStats stats = cache.getStats(shard); + QueryCacheStats stats = cache.getStats(shard, () -> 0L); assertEquals(0L, stats.getCacheSize()); assertEquals(0L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); @@ -133,7 +134,7 @@ public void testBasics() throws IOException { assertEquals(1, s.count(new DummyQuery(0))); - stats = cache.getStats(shard); + stats = cache.getStats(shard, () -> 0L); assertEquals(1L, stats.getCacheSize()); assertEquals(1L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); @@ -143,7 +144,7 @@ public void testBasics() throws IOException { assertEquals(1, s.count(new DummyQuery(i))); } - stats = cache.getStats(shard); + stats = cache.getStats(shard, () -> 0L); assertEquals(10L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); @@ -151,7 +152,7 @@ public void testBasics() throws IOException { s.count(new DummyQuery(10)); - stats = cache.getStats(shard); + stats = cache.getStats(shard, () -> 0L); assertEquals(10L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(1L, stats.getHitCount()); @@ -160,7 +161,7 @@ public void testBasics() throws IOException { IOUtils.close(r, dir); // got emptied, but no changes to other metrics - stats = cache.getStats(shard); + stats = cache.getStats(shard, () -> 0L); assertEquals(0L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(1L, stats.getHitCount()); @@ -169,7 +170,7 @@ public void testBasics() throws IOException { cache.onClose(shard); // forgot everything - stats = cache.getStats(shard); + stats = cache.getStats(shard, () -> 0L); assertEquals(0L, stats.getCacheSize()); assertEquals(0L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); @@ -209,13 +210,13 @@ public void testTwoShards() throws IOException { assertEquals(1, s1.count(new DummyQuery(0))); - QueryCacheStats stats1 = cache.getStats(shard1); + QueryCacheStats stats1 = cache.getStats(shard1, () -> 0L); assertEquals(1L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(2L, stats1.getMissCount()); - QueryCacheStats stats2 = cache.getStats(shard2); + QueryCacheStats stats2 = cache.getStats(shard2, () -> 0L); assertEquals(0L, stats2.getCacheSize()); assertEquals(0L, stats2.getCacheCount()); assertEquals(0L, stats2.getHitCount()); @@ -223,13 +224,13 @@ public void testTwoShards() throws IOException { assertEquals(1, s2.count(new DummyQuery(0))); - stats1 = cache.getStats(shard1); + stats1 = cache.getStats(shard1, () -> 0L); assertEquals(1L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(2L, stats1.getMissCount()); - stats2 = cache.getStats(shard2); + stats2 = cache.getStats(shard2, () -> 0L); assertEquals(1L, stats2.getCacheSize()); assertEquals(1L, stats2.getCacheCount()); assertEquals(0L, stats2.getHitCount()); @@ -239,13 +240,13 @@ public void testTwoShards() throws IOException { assertEquals(1, s2.count(new DummyQuery(i))); } - stats1 = cache.getStats(shard1); + stats1 = cache.getStats(shard1, () -> 0L); assertEquals(0L, stats1.getCacheSize()); // evicted assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(2L, stats1.getMissCount()); - stats2 = cache.getStats(shard2); + stats2 = cache.getStats(shard2, () -> 0L); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); @@ -254,13 +255,13 @@ public void testTwoShards() throws IOException { IOUtils.close(r1, dir1); // no changes - stats1 = cache.getStats(shard1); + stats1 = cache.getStats(shard1, () -> 0L); assertEquals(0L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(2L, stats1.getMissCount()); - stats2 = cache.getStats(shard2); + stats2 = cache.getStats(shard2, () -> 0L); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); @@ -269,13 +270,13 @@ public void testTwoShards() throws IOException { cache.onClose(shard1); // forgot everything about shard1 - stats1 = cache.getStats(shard1); + stats1 = cache.getStats(shard1, () -> 0L); assertEquals(0L, stats1.getCacheSize()); assertEquals(0L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(0L, stats1.getMissCount()); - stats2 = cache.getStats(shard2); + stats2 = cache.getStats(shard2, () -> 0L); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); @@ -285,14 +286,14 @@ public void testTwoShards() throws IOException { cache.onClose(shard2); // forgot everything about shard2 - stats1 = cache.getStats(shard1); + stats1 = cache.getStats(shard1, () -> 0L); assertEquals(0L, stats1.getCacheSize()); assertEquals(0L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(0L, stats1.getMissCount()); assertEquals(0L, stats1.getMemorySizeInBytes()); - stats2 = cache.getStats(shard2); + stats2 = cache.getStats(shard2, () -> 0L); assertEquals(0L, stats2.getCacheSize()); assertEquals(0L, stats2.getCacheCount()); assertEquals(0L, stats2.getHitCount()); @@ -341,7 +342,7 @@ public void testStatsOnEviction() throws IOException { assertEquals(1, s2.count(new DummyQuery(i))); } - QueryCacheStats stats1 = cache.getStats(shard1); + QueryCacheStats stats1 = cache.getStats(shard1, () -> 0L); assertEquals(0L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); @@ -420,7 +421,7 @@ public void testDelegatesScorerSupplier() throws Exception { assertNotSame(weight, cached); assertFalse(weight.scorerCalled); assertFalse(weight.scorerSupplierCalled); - cached.scorerSupplier(s.getIndexReader().leaves().get(0)); + cached.scorerSupplier(s.getIndexReader().leaves().getFirst()); assertFalse(weight.scorerCalled); assertTrue(weight.scorerSupplierCalled); IOUtils.close(r, dir); @@ -428,6 +429,49 @@ public void testDelegatesScorerSupplier() throws Exception { cache.close(); } + public void testGetSharedRamSizeForShard() { + ShardId shardId1 = new ShardId("index", "_na_", 0); + ShardId shardId2 = new ShardId("index", "_na_", 1); + IndexShard shard1 = mock(IndexShard.class); + IndexShard shard2 = mock(IndexShard.class); + when(shard1.shardId()).thenReturn(shardId1); + when(shard2.shardId()).thenReturn(shardId2); + + // Case 1: sharedRamBytesUsed = 0 + IndicesService indicesService = getTestIndicesService(List.of(Tuple.tuple(shard1, 100L), Tuple.tuple(shard2, 200L)), 0); + long sharedRam = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shardId1); + assertEquals(0L, sharedRam); + // Case 2: sharedRamBytesUsed > 0, totalSize > 0, proportional + indicesService = getTestIndicesService(List.of(Tuple.tuple(shard1, 100L), Tuple.tuple(shard2, 200L)), 600); + long sharedRam1 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shardId1); + assertEquals(200L, sharedRam1); + long sharedRam2 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shardId2); + assertEquals(400L, sharedRam2); + // Case 3: totalSize == 0, shared equally + indicesService = getTestIndicesService(List.of(Tuple.tuple(shard1, 0L), Tuple.tuple(shard2, 0L)), 600); + long sharedRamEq = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shardId1); + assertEquals(300L, sharedRamEq); + } + + private IndicesService getTestIndicesService(List> shardsAndSizes, long sharedRamBytesUsed) { + IndicesQueryCache queryCache = mock(IndicesQueryCache.class); + for (Tuple shardAndSize : shardsAndSizes) { + when(queryCache.getCacheSizeForShard(shardAndSize.v1().shardId())).thenReturn(shardAndSize.v2()); + } + when(queryCache.getSharedRamBytesUsed()).thenReturn(sharedRamBytesUsed); + return getTestIndicesService(shardsAndSizes.stream().map(Tuple::v1).toList(), queryCache); + } + + private IndicesService getTestIndicesService(List shards, IndicesQueryCache queryCache) { + IndexService indexService = mock(IndexService.class); + when(indexService.iterator()).thenAnswer(ignored -> shards.iterator()); + IndicesService indicesService = mock(IndicesService.class); + when(indicesService.iterator()).thenAnswer(ignored -> List.of(indexService).iterator()); + when(indicesService.getIndicesQueryCache()).thenReturn(queryCache); + when(indicesService.getIndicesQueryCache()).thenReturn(queryCache); + return indicesService; + } + public void testGetStatsMemory() throws Exception { /* * This test creates 2 shards, one with two segments and one with one. It makes unique queries against all 3 segments (so that each @@ -458,7 +502,7 @@ public void testGetStatsMemory() throws Exception { shard1Segment2Searcher.setQueryCache(cache); shard2Searcher.setQueryCache(cache); - assertEquals(0L, cache.getStats(shard1).getMemorySizeInBytes()); + assertEquals(0L, cache.getStats(shard1, () -> 0L).getMemorySizeInBytes()); final long largeQuerySize = randomIntBetween(100, 1000); final long smallQuerySize = randomIntBetween(10, 50); @@ -469,65 +513,88 @@ public void testGetStatsMemory() throws Exception { for (int i = 0; i < shard1Queries; ++i) { shard1Segment1Searcher.count(new DummyQuery("ingest1-" + i, largeQuerySize)); } + IndexShard indexShard1 = mock(IndexShard.class); + when(indexShard1.shardId()).thenReturn(shard1); + IndexShard indexShard2 = mock(IndexShard.class); + when(indexShard2.shardId()).thenReturn(shard2); long shard1Segment1CacheMemory = calculateActualCacheMemoryForSegment(shard1Queries, largeQuerySize, shard1Segment1Docs); - assertThat(cache.getStats(shard1).getMemorySizeInBytes(), equalTo(shard1Segment1CacheMemory)); - assertThat(cache.getStats(shard2).getMemorySizeInBytes(), equalTo(0L)); - for (int i = 0; i < shard2Queries; ++i) { - shard2Searcher.count(new DummyQuery("ingest2-" + i, smallQuerySize)); + { + IndicesService indicesService = getTestIndicesService(List.of(indexShard1), cache); + final long sharedRamSizeShard1 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shard1); + final long sharedRamSizeShard2 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shard2); + assertThat(cache.getStats(shard1, () -> sharedRamSizeShard1).getMemorySizeInBytes(), equalTo(shard1Segment1CacheMemory)); + assertThat(cache.getStats(shard2, () -> sharedRamSizeShard2).getMemorySizeInBytes(), equalTo(0L)); + for (int i = 0; i < shard2Queries; ++i) { + shard2Searcher.count(new DummyQuery("ingest2-" + i, smallQuerySize)); + } } - /* - * Now that we have cached some smaller things for shard2, the cache memory for shard1 has gone down. This is expected because we - * report cache memory proportional to the number of segments for each shard, ignoring the number of documents or the actual - * document sizes. Since the shard2 requests were smaller, the average cache memory size per segment has now gone down. - */ - assertThat(cache.getStats(shard1).getMemorySizeInBytes(), lessThan(shard1Segment1CacheMemory)); - long shard1CacheBytes = cache.getStats(shard1).getMemorySizeInBytes(); - long shard2CacheBytes = cache.getStats(shard2).getMemorySizeInBytes(); - long shard2Segment1CacheMemory = calculateActualCacheMemoryForSegment(shard2Queries, smallQuerySize, shard2Segment1Docs); - - long totalMemory = shard1Segment1CacheMemory + shard2Segment1CacheMemory; - // Each shard has some fixed overhead that we need to account for: - long shard1Overhead = calculateOverheadForSegment(shard1Queries, shard1Segment1Docs); - long shard2Overhead = calculateOverheadForSegment(shard2Queries, shard2Segment1Docs); - long totalMemoryMinusOverhead = totalMemory - (shard1Overhead + shard2Overhead); - /* - * Note that the expected amount of memory we're calculating is based on the proportion of the number of queries to each shard - * (since each shard currently only has queries to one segment) - */ - double shard1Segment1CacheMemoryShare = ((double) shard1Queries / (shard1Queries + shard2Queries)) * (totalMemoryMinusOverhead) - + shard1Overhead; - double shard2Segment1CacheMemoryShare = ((double) shard2Queries / (shard1Queries + shard2Queries)) * (totalMemoryMinusOverhead) - + shard2Overhead; - assertThat((double) shard1CacheBytes, closeTo(shard1Segment1CacheMemoryShare, 1)); // accounting for rounding - assertThat((double) shard2CacheBytes, closeTo(shard2Segment1CacheMemoryShare, 1)); // accounting for rounding - - // Now we cache just more "big" searches on shard1, but on a different segment: - for (int i = 0; i < shard1Queries; ++i) { - shard1Segment2Searcher.count(new DummyQuery("ingest3-" + i, largeQuerySize)); + double shard1Segment1CacheMemoryShare; + { + /* + * Now that we have cached some smaller things for shard2, the cache memory for shard1 has gone down. This is expected because + * we report cache memory proportional to the number of segments for each shard, ignoring the number of documents or the actual + * document sizes. Since the shard2 requests were smaller, the average cache memory size per segment has now gone down. + */ + IndicesService indicesService = getTestIndicesService(List.of(indexShard1, indexShard2), cache); + final long sharedRamSizeShard1 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shard1); + final long sharedRamSizeShard2 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shard2); + assertThat(cache.getStats(shard1, () -> sharedRamSizeShard1).getMemorySizeInBytes(), lessThan(shard1Segment1CacheMemory)); + long shard1CacheBytes = cache.getStats(shard1, () -> sharedRamSizeShard1).getMemorySizeInBytes(); + long shard2CacheBytes = cache.getStats(shard2, () -> sharedRamSizeShard2).getMemorySizeInBytes(); + long shard2Segment1CacheMemory = calculateActualCacheMemoryForSegment(shard2Queries, smallQuerySize, shard2Segment1Docs); + + long totalMemory = shard1Segment1CacheMemory + shard2Segment1CacheMemory; + // Each shard has some fixed overhead that we need to account for: + long shard1Overhead = calculateOverheadForSegment(shard1Queries, shard1Segment1Docs); + long shard2Overhead = calculateOverheadForSegment(shard2Queries, shard2Segment1Docs); + long totalMemoryMinusOverhead = totalMemory - (shard1Overhead + shard2Overhead); + /* + * Note that the expected amount of memory we're calculating is based on the proportion of the number of queries to each shard + * (since each shard currently only has queries to one segment) + */ + shard1Segment1CacheMemoryShare = ((double) shard1Queries / (shard1Queries + shard2Queries)) * (totalMemoryMinusOverhead) + + shard1Overhead; + double shard2Segment1CacheMemoryShare = ((double) shard2Queries / (shard1Queries + shard2Queries)) * (totalMemoryMinusOverhead) + + shard2Overhead; + assertThat((double) shard1CacheBytes, closeTo(shard1Segment1CacheMemoryShare, 1)); // accounting for rounding + assertThat((double) shard2CacheBytes, closeTo(shard2Segment1CacheMemoryShare, 1)); // accounting for rounding + + // Now we cache just more "big" searches on shard1, but on a different segment: + for (int i = 0; i < shard1Queries; ++i) { + shard1Segment2Searcher.count(new DummyQuery("ingest3-" + i, largeQuerySize)); + } + long shard1Segment2CacheMemory = calculateActualCacheMemoryForSegment(shard1Queries, largeQuerySize, shard1Segment2Docs); + totalMemory = shard1Segment1CacheMemory + shard2Segment1CacheMemory + shard1Segment2CacheMemory; + // Each shard has some fixed overhead that we need to account for: + shard1Overhead = shard1Overhead + calculateOverheadForSegment(shard1Queries, shard1Segment2Docs); + totalMemoryMinusOverhead = totalMemory - (shard1Overhead + shard2Overhead); + /* + * Note that the expected amount of memory we're calculating is based on the proportion of the number of queries to each + * segment. The number of documents and the size of documents is irrelevant (aside from computing the fixed overhead). + */ + shard1Segment1CacheMemoryShare = ((double) (2 * shard1Queries) / ((2 * shard1Queries) + shard2Queries)) + * (totalMemoryMinusOverhead) + shard1Overhead; } - long shard1Segment2CacheMemory = calculateActualCacheMemoryForSegment(shard1Queries, largeQuerySize, shard1Segment2Docs); - totalMemory = shard1Segment1CacheMemory + shard2Segment1CacheMemory + shard1Segment2CacheMemory; - // Each shard has some fixed overhead that we need to account for: - shard1Overhead = shard1Overhead + calculateOverheadForSegment(shard1Queries, shard1Segment2Docs); - totalMemoryMinusOverhead = totalMemory - (shard1Overhead + shard2Overhead); - /* - * Note that the expected amount of memory we're calculating is based on the proportion of the number of queries to each segment. - * The number of documents and the size of documents is irrelevant (aside from computing the fixed overhead). - */ - shard1Segment1CacheMemoryShare = ((double) (2 * shard1Queries) / ((2 * shard1Queries) + shard2Queries)) * (totalMemoryMinusOverhead) - + shard1Overhead; - shard1CacheBytes = cache.getStats(shard1).getMemorySizeInBytes(); - assertThat((double) shard1CacheBytes, closeTo(shard1Segment1CacheMemoryShare, 1)); // accounting for rounding - - // Now make sure the cache only has items for shard2: - for (int i = 0; i < (maxCacheSize * 2); ++i) { - shard2Searcher.count(new DummyQuery("ingest4-" + i, smallQuerySize)); + { + IndicesService indicesService = getTestIndicesService(List.of(indexShard1, indexShard2), cache); + final long sharedRamSizeShard1 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shard1); + final long shard1CacheBytes = cache.getStats(shard1, () -> sharedRamSizeShard1).getMemorySizeInBytes(); + assertThat((double) shard1CacheBytes, closeTo(shard1Segment1CacheMemoryShare, 1)); // accounting for rounding + } + { + // Now make sure the cache only has items for shard2: + for (int i = 0; i < (maxCacheSize * 2); ++i) { + shard2Searcher.count(new DummyQuery("ingest4-" + i, smallQuerySize)); + } + IndicesService indicesService = getTestIndicesService(List.of(indexShard2), cache); + final long sharedRamSizeShard1 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shard1); + final long sharedRamSizeShard2 = IndicesQueryCache.getSharedRamSizeForShard(indicesService, shard2); + assertThat(cache.getStats(shard1, () -> sharedRamSizeShard1).getMemorySizeInBytes(), equalTo(0L)); + assertThat( + cache.getStats(shard2, () -> sharedRamSizeShard2).getMemorySizeInBytes(), + equalTo(calculateActualCacheMemoryForSegment(maxCacheSize, smallQuerySize, shard2Segment1Docs)) + ); } - assertThat(cache.getStats(shard1).getMemorySizeInBytes(), equalTo(0L)); - assertThat( - cache.getStats(shard2).getMemorySizeInBytes(), - equalTo(calculateActualCacheMemoryForSegment(maxCacheSize, smallQuerySize, shard2Segment1Docs)) - ); IOUtils.close(closeableList); cache.onClose(shard1); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java index 78df6c9e88c65..3ef7b1ada0e01 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java @@ -180,17 +180,17 @@ public void testCloseAfterRequestHasUsedQueryCache() throws Exception { assertEquals(1, searcher.getIndexReader().maxDoc()); Query query = LongPoint.newRangeQuery("foo", 0, 5); - assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + assertEquals(0L, cache.getStats(shard.shardId(), () -> 0L).getCacheSize()); searcher.search(new ConstantScoreQuery(query), 1); - assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); + assertEquals(1L, cache.getStats(shard.shardId(), () -> 0L).getCacheSize()); searcher.close(); assertEquals(2, indicesService.indicesRefCount.refCount()); - assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); + assertEquals(1L, cache.getStats(shard.shardId(), () -> 0L).getCacheSize()); node.close(); assertEquals(0, indicesService.indicesRefCount.refCount()); - assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + assertEquals(0L, cache.getStats(shard.shardId(), () -> 0L).getCacheSize()); } public void testCloseWhileOngoingRequestUsesQueryCache() throws Exception { @@ -221,13 +221,13 @@ public void testCloseWhileOngoingRequestUsesQueryCache() throws Exception { assertEquals(1, indicesService.indicesRefCount.refCount()); Query query = LongPoint.newRangeQuery("foo", 0, 5); - assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + assertEquals(0L, cache.getStats(shard.shardId(), () -> 0L).getCacheSize()); searcher.search(new ConstantScoreQuery(query), 1); - assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); + assertEquals(1L, cache.getStats(shard.shardId(), () -> 0L).getCacheSize()); searcher.close(); assertEquals(0, indicesService.indicesRefCount.refCount()); - assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + assertEquals(0L, cache.getStats(shard.shardId(), () -> 0L).getCacheSize()); } public void testCloseWhileOngoingRequestUsesRequestCache() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 7f2aa62f7cf83..b5ad8d5b9a706 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -613,14 +613,18 @@ public void testStatsByShardDoesNotDieFromExpectedExceptions() { shardStats.add(successfulShardStats); - when(mockIndicesService.indexShardStats(mockIndicesService, shard, CommonStatsFlags.ALL)).thenReturn(successfulShardStats); + when(mockIndicesService.indexShardStats(mockIndicesService, shard, CommonStatsFlags.ALL, () -> 0L)).thenReturn( + successfulShardStats + ); } else { - when(mockIndicesService.indexShardStats(mockIndicesService, shard, CommonStatsFlags.ALL)).thenThrow(expectedException); + when(mockIndicesService.indexShardStats(mockIndicesService, shard, CommonStatsFlags.ALL, () -> 0L)).thenThrow( + expectedException + ); } } - when(mockIndicesService.iterator()).thenReturn(Collections.singleton(indexService).iterator()); - when(indexService.iterator()).thenReturn(shards.iterator()); + when(mockIndicesService.iterator()).thenAnswer(invocation -> Collections.singleton(indexService).iterator()); + when(indexService.iterator()).thenAnswer(unused -> shards.iterator()); when(indexService.index()).thenReturn(index); // real one, which has a logger defined