diff --git a/docs/changelog/133314.yaml b/docs/changelog/133314.yaml new file mode 100644 index 0000000000000..8221624d8864d --- /dev/null +++ b/docs/changelog/133314.yaml @@ -0,0 +1,5 @@ +pr: 133314 +summary: Add hits and misses timing stats to DLS cache +area: Authorization +type: enhancement +issues: [] diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java index f5d79b7a76c65..0122499583799 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java @@ -42,8 +42,8 @@ public String toString() { } }; - private final LongSupplier relativeNanoTimeProvider; private final Cache cache; + private final LongSupplier relativeNanoTimeProvider; private final LongAdder hitsTimeInNanos = new LongAdder(); private final LongAdder missesTimeInNanos = new LongAdder(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java index 1f0d2598d9928..0208c3f34941e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java @@ -52,7 +52,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.LongSupplier; /** * This is a cache for {@link BitSet} instances that are used with the {@link DocumentSubsetReader}. @@ -122,18 +124,27 @@ public final class DocumentSubsetBitsetCache implements IndexReader.ClosedListen private final Cache bitsetCache; private final Map> keysByIndex; private final AtomicLong cacheFullWarningTime; + private final LongSupplier relativeNanoTimeProvider; + private final LongAdder hitsTimeInNanos = new LongAdder(); + private final LongAdder missesTimeInNanos = new LongAdder(); public DocumentSubsetBitsetCache(Settings settings, ThreadPool threadPool) { this(settings, threadPool.executor(ThreadPool.Names.GENERIC)); } + // visible for testing + DocumentSubsetBitsetCache(Settings settings, ExecutorService cleanupExecutor) { + this(settings, cleanupExecutor, System::nanoTime); + } + /** * @param settings The global settings object for this node * @param cleanupExecutor An executor on which the cache cleanup tasks can be run. Due to the way the cache is structured internally, * it is sometimes necessary to run an asynchronous task to synchronize the internal state. + * @param relativeNanoTimeProvider Provider of nanos for code that needs to measure relative time. */ // visible for testing - DocumentSubsetBitsetCache(Settings settings, ExecutorService cleanupExecutor) { + DocumentSubsetBitsetCache(Settings settings, ExecutorService cleanupExecutor, LongSupplier relativeNanoTimeProvider) { final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); this.cacheEvictionLock = new ReleasableLock(readWriteLock.writeLock()); this.cacheModificationLock = new ReleasableLock(readWriteLock.readLock()); @@ -150,6 +161,7 @@ public DocumentSubsetBitsetCache(Settings settings, ThreadPool threadPool) { this.keysByIndex = new ConcurrentHashMap<>(); this.cacheFullWarningTime = new AtomicLong(0); + this.relativeNanoTimeProvider = Objects.requireNonNull(relativeNanoTimeProvider); } @Override @@ -222,6 +234,8 @@ public long ramBytesUsed() { */ @Nullable public BitSet getBitSet(final Query query, final LeafReaderContext context) throws ExecutionException { + final long cacheStart = relativeNanoTimeProvider.getAsLong(); + final IndexReader.CacheHelper coreCacheHelper = context.reader().getCoreCacheHelper(); if (coreCacheHelper == null) { try { @@ -235,7 +249,9 @@ public BitSet getBitSet(final Query query, final LeafReaderContext context) thro final BitsetCacheKey cacheKey = new BitsetCacheKey(indexKey, query); try (ReleasableLock ignored = cacheModificationLock.acquire()) { + final boolean[] cacheKeyWasPresent = new boolean[] { true }; final BitSet bitSet = bitsetCache.computeIfAbsent(cacheKey, ignore1 -> { + cacheKeyWasPresent[0] = false; // This ensures all insertions into the set are guarded by ConcurrentHashMap's atomicity guarantees. keysByIndex.compute(indexKey, (ignore2, set) -> { if (set == null) { @@ -264,6 +280,11 @@ public BitSet getBitSet(final Query query, final LeafReaderContext context) thro } return result; }); + if (cacheKeyWasPresent[0]) { + hitsTimeInNanos.add(relativeNanoTimeProvider.getAsLong() - cacheStart); + } else { + missesTimeInNanos.add(relativeNanoTimeProvider.getAsLong() - cacheStart); + } if (bitSet == NULL_MARKER) { return null; } else { @@ -331,6 +352,8 @@ public Map usageStats() { stats.put("hits", cacheStats.getHits()); stats.put("misses", cacheStats.getMisses()); stats.put("evictions", cacheStats.getEvictions()); + stats.put("hits_time_in_millis", TimeValue.nsecToMSec(hitsTimeInNanos.sum())); + stats.put("misses_time_in_millis", TimeValue.nsecToMSec(missesTimeInNanos.sum())); return Collections.unmodifiableMap(stats); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index 19c48c182b85b..c0da20407d2fc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -62,7 +62,9 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -536,6 +538,8 @@ public void testHitsMissesAndEvictionsStats() throws Exception { stats.put("hits", 0L); stats.put("misses", 0L); stats.put("evictions", 0L); + stats.put("hits_time_in_millis", 0L); + stats.put("misses_time_in_millis", 0L); return stats; }; @@ -551,12 +555,14 @@ public void testHitsMissesAndEvictionsStats() throws Exception { expectedStats.put("misses", 1L); expectedStats.put("memory", EXPECTED_BYTES_PER_BIT_SET + "b"); expectedStats.put("memory_in_bytes", EXPECTED_BYTES_PER_BIT_SET); + expectedStats.put("misses_time_in_millis", 1L); assertThat(cache.usageStats(), equalTo(expectedStats)); // second same lookup - hit final BitSet bitSet1Again = cache.getBitSet(query1, leafContext); assertThat(bitSet1Again, sameInstance(bitSet1)); expectedStats.put("hits", 1L); + expectedStats.put("hits_time_in_millis", 1L); assertThat(cache.usageStats(), equalTo(expectedStats)); // second query - miss, should evict the first one @@ -568,6 +574,9 @@ public void testHitsMissesAndEvictionsStats() throws Exception { // see https://github.com/elastic/elasticsearch/issues/132842 expectedStats.put("misses", 3L); expectedStats.put("evictions", 1L); + // underlying Cache class tracks hits/misses, but timing is in DLS cache, which is why we have `2L` here, + // because DLS cache is only hit once + expectedStats.put("misses_time_in_millis", 2L); assertBusy(() -> { assertThat(cache.usageStats(), equalTo(expectedStats)); }, 200, TimeUnit.MILLISECONDS); }); @@ -575,6 +584,8 @@ public void testHitsMissesAndEvictionsStats() throws Exception { finalStats.put("hits", 1L); finalStats.put("misses", 3L); finalStats.put("evictions", 2L); + finalStats.put("hits_time_in_millis", 1L); + finalStats.put("misses_time_in_millis", 2L); assertThat(cache.usageStats(), equalTo(finalStats)); } @@ -697,6 +708,8 @@ private void runTestOnIndices(int numberIndices, CheckedConsumer TimeUnit.MILLISECONDS.toNanos(increasingMillisTime.getAndIncrement()); + return new DocumentSubsetBitsetCache(settings, singleThreadExecutor, relativeNanoTimeProvider); } } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java index 882dc67eac53f..0bb5bd2a327e7 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java @@ -46,10 +46,10 @@ public final class EnrichCache { private static final CacheValue EMPTY_CACHE_VALUE = new CacheValue(List.of(), CacheKey.CACHE_KEY_SIZE); private final Cache cache; + private final LongAdder sizeInBytes = new LongAdder(); private final LongSupplier relativeNanoTimeProvider; private final LongAdder hitsTimeInNanos = new LongAdder(); private final LongAdder missesTimeInNanos = new LongAdder(); - private final LongAdder sizeInBytes = new LongAdder(); EnrichCache(long maxSize) { this(maxSize, System::nanoTime);