Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into advance_tv_for_stateless
Browse files Browse the repository at this point in the history
  • Loading branch information
Tim-Brooks committed Jul 14, 2023
2 parents 67fdfe0 + 6b6a855 commit 34413a7
Show file tree
Hide file tree
Showing 45 changed files with 2,413 additions and 587 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ public TaskProvider<? extends Task> createTask(Project project) {
t.setProjectBuildDirs(getProjectBuildDirs(project));
t.setClasspath(project.getConfigurations().getByName(JavaPlugin.COMPILE_CLASSPATH_CONFIGURATION_NAME));
SourceSet mainSourceSet = GradleUtils.getJavaSourceSets(project).findByName(SourceSet.MAIN_SOURCE_SET_NAME);
t.dependsOn(mainSourceSet.getJava().getSourceDirectories());
t.getSrcDirs().set(project.provider(() -> mainSourceSet.getAllSource().getSrcDirs()));
});
return task;
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/97410.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97410
summary: Introduce a collector manager for `QueryPhaseCollector`
area: Search
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/97463.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97463
summary: "[Enterprise Search] Add connectors indices and ent-search pipeline"
area: Application
type: feature
issues: []
5 changes: 5 additions & 0 deletions docs/reference/ilm/actions/ilm-rollover.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,11 @@ opt in to rolling over empty indices, by adding a `"min_docs": 0` condition. Thi
disabled on a cluster-wide basis by setting `indices.lifecycle.rollover.only_if_has_documents` to
`false`.

NOTE: The rollover action implicitly always rolls over a data stream or alias if one or more shards contain
200000000 or more documents. Normally a shard will reach 50GB long before it reaches 200M documents,
but this isn't the case for space efficient data sets. Search performance will very likely suffer
if a shard contains more than 200M documents. This is the reason of the builtin limit.

[[ilm-rollover-ex]]
==== Example

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ public String name() {
/**
* The type defined for the field in the pattern.
*/
GrokCaptureType type() {
public GrokCaptureType type() {
return type;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
/**
* The type defined for the field in the pattern.
*/
enum GrokCaptureType {
public enum GrokCaptureType {
STRING {
@Override
<T> T nativeExtracter(int[] backRefs, NativeExtracterMap<T> map) {
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,14 @@ protected boolean ignoreExternalCluster() {
}

public void testSynonymsUpdateable() throws FileNotFoundException, IOException, InterruptedException {
testSynonymsUpdate(false);
}

public void testSynonymsWithPreview() throws FileNotFoundException, IOException, InterruptedException {
testSynonymsUpdate(true);
}

private void testSynonymsUpdate(boolean preview) throws FileNotFoundException, IOException, InterruptedException {
Path config = internalCluster().getInstance(Environment.class).configFile();
String synonymsFileName = "synonyms.txt";
Path synonymsFile = config.resolve(synonymsFileName);
Expand Down Expand Up @@ -97,7 +105,7 @@ public void testSynonymsUpdateable() throws FileNotFoundException, IOException,
}
ReloadAnalyzersResponse reloadResponse = client().execute(
ReloadAnalyzerAction.INSTANCE,
new ReloadAnalyzersRequest(null, "test")
new ReloadAnalyzersRequest(null, preview, "test")
).actionGet();
assertNoFailures(reloadResponse);
assertEquals(cluster().numDataNodes(), reloadResponse.getSuccessfulShards());
Expand All @@ -109,17 +117,21 @@ public void testSynonymsUpdateable() throws FileNotFoundException, IOException,
);

analyzeResponse = indicesAdmin().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get();
assertEquals(3, analyzeResponse.getTokens().size());
int expectedTokens = preview ? 2 : 3;
assertEquals(expectedTokens, analyzeResponse.getTokens().size());
Set<String> tokens = new HashSet<>();
analyzeResponse.getTokens().stream().map(AnalyzeToken::getTerm).forEach(t -> tokens.add(t));
assertTrue(tokens.contains("foo"));
assertTrue(tokens.contains("baz"));
assertTrue(tokens.contains(testTerm));
if (preview == false) {
assertTrue(tokens.contains(testTerm));
}

response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get();
assertHitCount(response, 1L);
long expectedHitCount = preview ? 0L : 1L;
response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", testTerm)).get();
assertHitCount(response, 1L);
assertHitCount(response, expectedHitCount);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ setup:

- match:
acknowledged: true
- match: { reload_analyzers_details._shards.total: 0 }
- length: { reload_analyzers_details.reload_details: 0 }

- do:
catch: missing
Expand Down Expand Up @@ -62,3 +60,49 @@ setup:
id: "test-other-1"
- synonyms: "test => check"
id: "test-other-2"

---
"Delete synonym set - index uses the synonym set, so it can't be deleted":

- do:
indices.create:
index: my_index1
body:
settings:
index:
number_of_shards: 1
number_of_replicas: 0
analysis:
filter:
my_synonym_filter:
type: synonym_graph
synonyms_set: test-get-synonyms
updateable: true
analyzer:
my_analyzer1:
type: custom
tokenizer: standard
filter: [ lowercase, my_synonym_filter ]
mappings:
properties:
my_field:
type: text
search_analyzer: my_analyzer1

- do:
catch: /Synonym set \[test-get-synonyms\] cannot be deleted as it is used in the following indices:\ my_index1/
synonyms.delete:
synonyms_set: test-get-synonyms

- do:
synonyms.get:
synonyms_set: test-get-synonyms

- match:
count: 2
- match:
synonyms_set:
- synonyms: "hello, hi"
id: "test-id-1"
- synonyms: "bye => goodbye"
id: "test-id-2"
Original file line number Diff line number Diff line change
Expand Up @@ -160,39 +160,6 @@ setup:
query: hola
- match: { hits.total.value: 1 }

---
"Delete the synonyms set and confirm failed reload analyzers details":
- do:
synonyms.delete:
synonyms_set: set1

- match:
acknowledged: true
- gte: { reload_analyzers_details._shards.failed: 1 }
- match: { reload_analyzers_details._shards.failures.0.index: "my_index" }
- match: { reload_analyzers_details._shards.failures.0.reason.reason: "Synonym set [set1] not found" }

# Confirm that the index analyzers are not reloaded and still using old synonyms
- do:
search:
index: my_index
body:
query:
match:
my_field:
query: hi
- match: { hits.total.value: 1 }

- do:
search:
index: my_index
body:
query:
match:
my_field:
query: bye
- match: { hits.total.value: 1 }

---
"Fail loading synonyms from index if synonyms_set doesn't exist":
- do:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {
private static MedianAbsoluteDeviationAggregationBuilder randomBuilder() {
final MedianAbsoluteDeviationAggregationBuilder builder = new MedianAbsoluteDeviationAggregationBuilder("mad");
if (randomBoolean()) {
builder.compression(randomDoubleBetween(20, 1000, false));
builder.compression(randomDoubleBetween(25, 1000, false));
}
return builder;
}
Expand Down
6 changes: 4 additions & 2 deletions server/src/main/java/org/elasticsearch/TransportVersion.java
Original file line number Diff line number Diff line change
Expand Up @@ -154,11 +154,13 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId
public static final TransportVersion V_8_500_031 = registerTransportVersion(8_500_031, "e7aa7e95-37e7-46a3-aad1-90a21c0769e7");
public static final TransportVersion V_8_500_032 = registerTransportVersion(8_500_032, "a9a14bc6-c3f2-41d9-a3d8-c686bf2c901d");
public static final TransportVersion V_8_500_033 = registerTransportVersion(8_500_033, "193ab7c4-a751-4cbd-a66a-2d7d56ccbc10");
public static final TransportVersion V_8_500_034 = registerTransportVersion(8_500_034, "16871c8b-88ba-4432-980a-10fd9ecad2dc");

// Introduced for stateless plugin
public static final TransportVersion V_8_500_034 = registerTransportVersion(8_500_034, "3343c64f-d7ac-4f02-9262-3e1acfc56f89");
public static final TransportVersion V_8_500_035 = registerTransportVersion(8_500_035, "3343c64f-d7ac-4f02-9262-3e1acfc56f89");

private static class CurrentHolder {
private static final TransportVersion CURRENT = findCurrent(V_8_500_034);
private static final TransportVersion CURRENT = findCurrent(V_8_500_035);

// finds the pluggable current version, or uses the given fallback
private static TransportVersion findCurrent(TransportVersion fallback) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

package org.elasticsearch.action.admin.indices.analyze;

import org.elasticsearch.TransportVersion;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
Expand All @@ -16,54 +17,64 @@
import java.util.Arrays;
import java.util.Objects;

import static org.elasticsearch.TransportVersion.V_8_500_034;

/**
* Request for reloading index search analyzers
*/
public class ReloadAnalyzersRequest extends BroadcastRequest<ReloadAnalyzersRequest> {
private final String resource;
private final boolean preview;

private static final TransportVersion PREVIEW_OPTION_TRANSPORT_VERSION = V_8_500_034;

/**
* Constructs a request for reloading index search analyzers
* @param resource changed resource to reload analyzers from, @null if not applicable
* @param preview {@code false} applies analyzer reloading. {@code true} previews the reloading operation, so analyzers are not reloaded
* but the results retrieved. This is useful for understanding analyzers usage in the different indices.
* @param indices the indices to reload analyzers for
*/
public ReloadAnalyzersRequest(String resource, String... indices) {
public ReloadAnalyzersRequest(String resource, boolean preview, String... indices) {
super(indices);
this.resource = resource;
this.preview = preview;
}

public ReloadAnalyzersRequest(StreamInput in) throws IOException {
super(in);
this.resource = in.readOptionalString();
this.preview = in.getTransportVersion().onOrAfter(PREVIEW_OPTION_TRANSPORT_VERSION) && in.readBoolean();
}

@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(resource);
if (out.getTransportVersion().onOrAfter(PREVIEW_OPTION_TRANSPORT_VERSION)) {
out.writeBoolean(preview);
}
}

public String resource() {
return resource;
}

public boolean preview() {
return preview;
}

@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ReloadAnalyzersRequest that = (ReloadAnalyzersRequest) o;
return Objects.equals(indicesOptions(), that.indicesOptions())
&& Arrays.equals(indices, that.indices)
&& Objects.equals(resource, that.resource);
return preview == that.preview && Objects.equals(resource, that.resource);
}

@Override
public int hashCode() {
return Objects.hash(indicesOptions(), Arrays.hashCode(indices), resource);
return Objects.hash(indicesOptions(), Arrays.hashCode(indices), resource, preview);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ protected void shardOperation(
logger.info("reloading analyzers for index shard " + shardRouting);
IndexService indexService = indicesService.indexService(shardRouting.index());
List<String> reloadedSearchAnalyzers = indexService.mapperService()
.reloadSearchAnalyzers(indicesService.getAnalysis(), request.resource());
.reloadSearchAnalyzers(indicesService.getAnalysis(), request.resource(), request.preview());
return new ReloadResult(shardRouting.index().getName(), shardRouting.currentNodeId(), reloadedSearchAnalyzers);
});
}
Expand Down
Loading

0 comments on commit 34413a7

Please sign in to comment.