Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions solr/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,15 @@ Other Changes
---------------------
(No changes)

================== 9.9.1 ==================
Bug Fixes
---------------------
* SOLR-17831: ExitableDirectoryReader always initialized with QueryLimits.NONE (Andrzej Białecki)

Dependency Upgrades
---------------------
(No changes)

================== 9.9.0 ==================
New Features
---------------------
Expand Down
4 changes: 2 additions & 2 deletions solr/core/src/java/org/apache/solr/search/QueryLimits.java
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@
* or other resource limits. Exceeding any specified limit will cause {@link #shouldExit()} to
* return true the next time it is checked (it may be checked in either Lucene code or Solr code)
*/
public class QueryLimits implements QueryTimeout {
public final class QueryLimits implements QueryTimeout {
public static final String UNLIMITED = "This request is unlimited.";
private final List<QueryLimit> limits =
new ArrayList<>(3); // timeAllowed, cpu, and memory anticipated

public static QueryLimits NONE = new QueryLimits();
public static final QueryLimits NONE = new QueryLimits();

private final SolrQueryResponse rsp;
private final boolean allowPartialResults;
Expand Down
32 changes: 25 additions & 7 deletions solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiPostingsEnum;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.QueryTimeout;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
Expand Down Expand Up @@ -137,16 +138,15 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI

public static final String STATS_SOURCE = "org.apache.solr.stats_source";
public static final String STATISTICS_KEY = "searcher";

public static final String EXITABLE_READER_PROPERTY = "solr.useExitableDirectoryReader";

// These should *only* be used for debugging or monitoring purposes
public static final AtomicLong numOpens = new AtomicLong();
public static final AtomicLong numCloses = new AtomicLong();
private static final Map<String, SolrCache<?, ?>> NO_GENERIC_CACHES = Collections.emptyMap();
private static final SolrCache<?, ?>[] NO_CACHES = new SolrCache<?, ?>[0];

// If you find this useful, let us know in dev@solr.apache.org. Likely to be removed eventually.
private static final boolean useExitableDirectoryReader =
Boolean.getBoolean("solr.useExitableDirectoryReader");

public static final int EXECUTOR_MAX_CPU_THREADS = Runtime.getRuntime().availableProcessors();

private final SolrCore core;
Expand Down Expand Up @@ -207,14 +207,31 @@ private static DirectoryReader getReader(
}
}

private static final class QueryLimitsTimeout implements QueryTimeout {
private static final QueryLimitsTimeout INSTANCE = new QueryLimitsTimeout();

private QueryLimitsTimeout() {}

@Override
public boolean shouldExit() {
return QueryLimits.getCurrentLimits().shouldExit();
}

@Override
public String toString() {
return QueryLimits.getCurrentLimits().limitStatusMessage();
}
}

// TODO: wrap elsewhere and return a "map" from the schema that overrides get() ?
// this reader supports reopen
private static DirectoryReader wrapReader(SolrCore core, DirectoryReader reader)
throws IOException {
assert reader != null;
reader = UninvertingReader.wrap(reader, core.getLatestSchema().getUninversionMapper());
if (useExitableDirectoryReader) { // SOLR-16693 legacy; may be removed. Probably inefficient.
reader = ExitableDirectoryReader.wrap(reader, QueryLimits.getCurrentLimits());
// see SOLR-16693 and SOLR-17831 for more details
if (EnvUtils.getPropertyAsBool(EXITABLE_READER_PROPERTY, Boolean.FALSE)) {
reader = ExitableDirectoryReader.wrap(reader, QueryLimitsTimeout.INSTANCE);
}
return reader;
}
Expand Down Expand Up @@ -775,7 +792,8 @@ public QueryResult search(QueryResult qr, QueryCommand cmd) throws IOException {
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector)
throws IOException {
QueryLimits queryLimits = QueryLimits.getCurrentLimits();
if (useExitableDirectoryReader || !queryLimits.isLimitsEnabled()) {
if (EnvUtils.getPropertyAsBool(EXITABLE_READER_PROPERTY, Boolean.FALSE)
|| !queryLimits.isLimitsEnabled()) {
// no timeout. Pass through to super class
super.search(leaves, weight, collector);
} else {
Expand Down
225 changes: 54 additions & 171 deletions solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -16,33 +16,18 @@
*/
package org.apache.solr.core;

import static org.apache.solr.common.util.Utils.fromJSONString;

import java.util.Map;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.response.SolrQueryResponse;
import org.junit.BeforeClass;
import org.apache.solr.search.CallerSpecificQueryLimit;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.TestInjection;
import org.junit.After;
import org.junit.Test;

/**
* Test that checks that long-running queries are exited by Solr using the SolrQueryTimeoutImpl
* implementation.
*/
public class ExitableDirectoryReaderTest extends SolrTestCaseJ4 {

static final int NUM_DOCS = 100;
static final String assertionString = "/response/numFound==" + NUM_DOCS;
static final String failureAssertionString = "/responseHeader/partialResults==true]";
static final String longTimeout = "10000";
static final String sleep = "2";

@BeforeClass
public static void beforeClass() throws Exception {
initCore("solrconfig-delaying-component.xml", "schema_latest.xml");
createIndex();
}

public static void createIndex() {
for (int i = 0; i < NUM_DOCS; i++) {
Expand All @@ -59,167 +44,65 @@ public static void createIndex() {
assertU(commit());
}

@Test
public void testPrefixQuery() throws Exception {
String q = "name:a*";
assertJQ(req("q", q, "timeAllowed", "1", "sleep", sleep), failureAssertionString);

// do the same query and test for both success, and that the number of documents matched (i.e.
// make sure no partial results were cached)
assertJQ(req("q", q, "timeAllowed", longTimeout), assertionString);

// this time we should get a query cache hit and hopefully no exception? this may change in the
// future if time checks are put into other places.

// 2024-4-15: it did change..., and now this fails with 1 or 2 ms and passes with 3ms... I see
// no way this won't be terribly brittle. Maybe TestInjection of some sort to bring this back?

// assertJQ(req("q", q, "timeAllowed", "2", "sleep", sleep), assertionString);

// The idea that the request won't time out due to caching is a flawed test methodology,
// It relies on the test running quickly and not stalling. The above test should possibly
// be doing something along the lines of this (but we lack api for it)
//
// SolrCores solrCores = ExitableDirectoryReaderTest.h.getCoreContainer().solrCores;
// List<SolrCore> cores = solrCores.getCores();
// for (SolrCore core : cores) {
// if (<<< find the right core >>> ) {
// ((SolrCache)core.getSearcher().get().<<<check cache for a key like name:a* >>>
// }
// }

// now do the same for the filter cache
// 2024-4-15: this still passes probably because *:* is so fast, but it still worries me
assertJQ(req("q", "*:*", "fq", q, "timeAllowed", "1", "sleep", sleep), failureAssertionString);

// make sure that the result succeeds this time, and that a bad filter wasn't cached
assertJQ(req("q", "*:*", "fq", q, "timeAllowed", longTimeout), assertionString);

// test that Long.MAX_VALUE works
assertJQ(req("q", "name:b*", "timeAllowed", Long.toString(Long.MAX_VALUE)), assertionString);

// negative timeAllowed should disable timeouts.
assertJQ(req("q", "name:c*", "timeAllowed", "-7"), assertionString);
@After
public void tearDownCore() {
deleteCore();
}

// There are lots of assumptions about how/when cache entries should be changed in this method.
// The simple case above shows the root problem without the confusion. testFilterSimpleCase should
// be removed once it is running and this test should be un-ignored and the assumptions verified.
// With all the weirdness, I'm not going to vouch for this test. Feel free to change it.
@Test
public void testCacheAssumptions() throws Exception {
String fq = "name:d*";
SolrCore core = h.getCore();
MetricsMap filterCacheStats =
(MetricsMap)
((SolrMetricManager.GaugeWrapper<?>)
core.getCoreMetricManager()
.getRegistry()
.getMetrics()
.get("CACHE.searcher.filterCache"))
.getGauge();
long fqInserts = (long) filterCacheStats.getValue().get("inserts");

MetricsMap queryCacheStats =
(MetricsMap)
((SolrMetricManager.GaugeWrapper<?>)
core.getCoreMetricManager()
.getRegistry()
.getMetrics()
.get("CACHE.searcher.queryResultCache"))
.getGauge();
long qrInserts = (long) queryCacheStats.getValue().get("inserts");

// This gets 0 docs back. Use 10000 instead of 1 for timeAllowed, and it gets 100 back and the
// for loop below succeeds.
String response =
JQ(req("q", "*:*", "fq", fq, "indent", "true", "timeAllowed", "1", "sleep", sleep));
Map<?, ?> res = (Map<?, ?>) fromJSONString(response);
Map<?, ?> body = (Map<?, ?>) (res.get("response"));
assertTrue("Should have fewer docs than " + NUM_DOCS, (long) (body.get("numFound")) < NUM_DOCS);

Map<?, ?> header = (Map<?, ?>) (res.get("responseHeader"));
assertTrue(
"Should have partial results",
(Boolean) (header.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY)));

assertEquals(
"Should NOT have inserted partial results in the cache!",
(long) queryCacheStats.getValue().get("inserts"),
qrInserts);

assertEquals(
"Should NOT have another insert",
fqInserts,
(long) filterCacheStats.getValue().get("inserts"));

// At the end of all this, we should have no hits in the queryResultCache.
response = JQ(req("q", "*:*", "fq", fq, "indent", "true", "timeAllowed", longTimeout));

// Check that we did insert this one.
assertEquals("Hits should still be 0", (long) filterCacheStats.getValue().get("hits"), 0L);
assertEquals(
"Inserts should be bumped",
(long) filterCacheStats.getValue().get("inserts"),
fqInserts + 1);

res = (Map<?, ?>) fromJSONString(response);
body = (Map<?, ?>) (res.get("response"));

assertEquals("Should have exactly " + NUM_DOCS, (long) (body.get("numFound")), NUM_DOCS);
header = (Map<?, ?>) (res.get("responseHeader"));
assertNull(
"Should NOT have partial results",
header.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
public void testWithExitableDirectoryReader() throws Exception {
doTestWithExitableDirectoryReader(true);
}

// When looking at a problem raised on the user's list I ran across this anomaly with timeAllowed
// This tests for the second query NOT returning partial results, along with some other
@Test
public void testQueryResults() throws Exception {
String q = "name:e*";
SolrCore core = h.getCore();
MetricsMap queryCacheStats =
(MetricsMap)
((SolrMetricManager.GaugeWrapper<?>)
core.getCoreMetricManager()
.getRegistry()
.getMetrics()
.get("CACHE.searcher.queryResultCache"))
.getGauge();
Map<String, Object> nl = queryCacheStats.getValue();
long inserts = (long) nl.get("inserts");

String response = JQ(req("q", q, "indent", "true", "timeAllowed", "1", "sleep", sleep));

// The queryResultCache should NOT get an entry here.
nl = queryCacheStats.getValue();
assertEquals("Should NOT have inserted partial results!", inserts, (long) nl.get("inserts"));

Map<?, ?> res = (Map<?, ?>) fromJSONString(response);
Map<?, ?> body = (Map<?, ?>) (res.get("response"));
Map<?, ?> header = (Map<?, ?>) (res.get("responseHeader"));

assertTrue("Should have fewer docs than " + NUM_DOCS, (long) (body.get("numFound")) < NUM_DOCS);
assertTrue(
"Should have partial results",
(Boolean) (header.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY)));

response = JQ(req("q", q, "indent", "true", "timeAllowed", longTimeout));
public void testWithoutExitableDirectoryReader() throws Exception {
doTestWithExitableDirectoryReader(false);
}

// Check that we did insert this one.
Map<String, Object> nl2 = queryCacheStats.getValue();
assertEquals("Hits should still be 0", (long) nl.get("hits"), (long) nl2.get("hits"));
assertTrue("Inserts should be bumped", inserts < (long) nl2.get("inserts"));
private void doTestWithExitableDirectoryReader(boolean withExitableDirectoryReader)
throws Exception {
System.setProperty(
SolrIndexSearcher.EXITABLE_READER_PROPERTY, String.valueOf(withExitableDirectoryReader));
initCore("solrconfig-delaying-component.xml", "schema_latest.xml");
createIndex();

res = (Map<?, ?>) fromJSONString(response);
body = (Map<?, ?>) (res.get("response"));
header = (Map<?, ?>) (res.get("responseHeader"));
// create a limit that will not trip but will report calls to shouldExit()
// NOTE: we need to use the inner class name to capture the calls
String callerExpr = "ExitableTermsEnum:-1";
CallerSpecificQueryLimit queryLimit = new CallerSpecificQueryLimit(callerExpr);
TestInjection.queryTimeout = queryLimit;
String q = "name:a*";
assertJQ(req("q", q), assertionString);
Map<String, Integer> callCounts = queryLimit.getCallCounts();
if (withExitableDirectoryReader) {
assertTrue(
"there should be some calls from ExitableTermsEnum: " + callCounts,
callCounts.get(callerExpr) > 0);
} else {
assertEquals(
"there should be no calls from ExitableTermsEnum", 0, (int) callCounts.get(callerExpr));
}

assertEquals("Should have exactly " + NUM_DOCS, NUM_DOCS, (long) (body.get("numFound")));
Boolean test = (Boolean) (header.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
if (test != null) {
assertFalse("Should NOT have partial results", test);
// check that the limits are tripped in ExitableDirectoryReader if it's in use
int maxCount = random().nextInt(10) + 1;
callerExpr = "ExitableTermsEnum:" + maxCount;
queryLimit = new CallerSpecificQueryLimit(callerExpr);
TestInjection.queryTimeout = queryLimit;
// avoid using the cache
q = "name:b*";
if (withExitableDirectoryReader) {
assertJQ(req("q", q), failureAssertionString);
} else {
assertJQ(req("q", q), assertionString);
}
callCounts = queryLimit.getCallCounts();
if (withExitableDirectoryReader) {
assertTrue(
"there should be at least " + maxCount + " calls from ExitableTermsEnum: " + callCounts,
callCounts.get(callerExpr) >= maxCount);
} else {
assertEquals(
"there should be no calls from ExitableTermsEnum", 0, (int) callCounts.get(callerExpr));
}
}
}
Loading