Skip to content

Commit

Permalink
Make field_range optimization work with FrozenEngine (#69357)
Browse files Browse the repository at this point in the history
Searchable snapshot indices that are mounted using the shared_cache option are now using the FrozenEngine. This
engine does not properly implement getRawFieldRange, however, resulting in the index to be loaded on the cluster stat
applier thread.
  • Loading branch information
ywelsch committed Feb 23, 2021
1 parent f4fa74b commit e243e4d
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@
*/
public class ReadOnlyEngine extends Engine {

public static final String FIELD_RANGE_SEARCH_SOURCE = "field_range";

/**
* Reader attributes used for read only engines. These attributes prevent loading term dictionaries on-heap even if the field is an
* ID field.
Expand Down Expand Up @@ -578,7 +580,7 @@ public CompletionStats completionStats(String... fieldNamePatterns) {
*/
@Override
public ShardLongFieldRange getRawFieldRange(String field) throws IOException {
try (Searcher searcher = acquireSearcher("field_range")) {
try (Searcher searcher = acquireSearcher(FIELD_RANGE_SEARCH_SOURCE)) {
final DirectoryReader directoryReader = searcher.getDirectoryReader();

final byte[] minPackedValue = PointValues.getMinPackedValue(directoryReader, field);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,7 @@ private Engine.Searcher openSearcher(String source, SearcherScope scope) throws
case "segments":
case "segments_stats":
case "completion_stats":
case FIELD_RANGE_SEARCH_SOURCE: // special case for field_range - we use the cached point values reader
case CAN_MATCH_SEARCH_SOURCE: // special case for can_match phase - we use the cached point values reader
maybeOpenReader = false;
break;
Expand All @@ -230,7 +231,7 @@ private Engine.Searcher openSearcher(String source, SearcherScope scope) throws
}
ElasticsearchDirectoryReader reader = maybeOpenReader ? getOrOpenReader() : getReader();
if (reader == null) {
if (CAN_MATCH_SEARCH_SOURCE.equals(source)) {
if (CAN_MATCH_SEARCH_SOURCE.equals(source) || FIELD_RANGE_SEARCH_SOURCE.equals(source)) {
canMatchReader.incRef();
return new Searcher(source, canMatchReader, engineConfig.getSimilarity(), engineConfig.getQueryCache(),
engineConfig.getQueryCachingPolicy(), canMatchReader::decRef);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
Expand Down Expand Up @@ -539,11 +538,6 @@ public void testComputesTimestampRangeFromMilliseconds() {
assertTrue(timestampFieldRange.isComplete());
assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse("2010-01-05T01:02:03.456Z").toEpochMilli()));
assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli()));

for (ShardStats shardStats : client().admin().indices().prepareStats("index").clear().setRefresh(true).get().getShards()) {
assertThat("shard " + shardStats.getShardRouting() + " refreshed to get the timestamp range",
shardStats.getStats().refresh.getTotal(), greaterThanOrEqualTo(1L));
}
}

public void testComputesTimestampRangeFromNanoseconds() throws IOException {
Expand Down Expand Up @@ -575,11 +569,6 @@ public void testComputesTimestampRangeFromNanoseconds() throws IOException {
equalTo(resolution.convert(Instant.parse("2010-01-05T01:02:03.456789012Z"))));
assertThat(timestampFieldRange.getMax(),
equalTo(resolution.convert(Instant.parse("2010-01-06T02:03:04.567890123Z"))));

for (ShardStats shardStats : client().admin().indices().prepareStats("index").clear().setRefresh(true).get().getShards()) {
assertThat("shard " + shardStats.getShardRouting() + " refreshed to get the timestamp range",
shardStats.getStats().refresh.getTotal(), greaterThanOrEqualTo(1L));
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ public void testSearchableSnapshotShardsAreSkippedWithoutQueryingAnyNodeWhenThey
restoredIndexSettings,
Strings.EMPTY_ARRAY,
false,
MountSearchableSnapshotRequest.Storage.FULL_COPY
randomFrom(MountSearchableSnapshotRequest.Storage.values())
);
client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet();

Expand Down Expand Up @@ -267,7 +267,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped()
restoredIndexSettings,
Strings.EMPTY_ARRAY,
false,
MountSearchableSnapshotRequest.Storage.FULL_COPY
randomFrom(MountSearchableSnapshotRequest.Storage.values())
);
client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet();
final int searchableSnapshotShardCount = indexOutsideSearchRangeShardCount;
Expand Down Expand Up @@ -380,7 +380,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo
restoredIndexSettings,
Strings.EMPTY_ARRAY,
false,
MountSearchableSnapshotRequest.Storage.FULL_COPY
randomFrom(MountSearchableSnapshotRequest.Storage.values())
);
client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet();

Expand Down

0 comments on commit e243e4d

Please sign in to comment.