From eee0b34fc84e1d5c7c2cfcf456daa2e77622b54d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 18 Apr 2018 17:36:09 -0700 Subject: [PATCH 01/28] Networking: Deprecate http.enabled setting (#29591) This commit deprecates the http.enabled, in preparation for removing the feature in 7.0. relates #12792 --- .../java/org/elasticsearch/common/network/NetworkModule.java | 3 ++- .../org/elasticsearch/common/network/NetworkModuleTests.java | 5 ++++- server/src/test/java/org/elasticsearch/node/NodeTests.java | 4 ++++ .../src/test/java/org/elasticsearch/node/MockNodeTests.java | 3 +++ .../elasticsearch/test/test/InternalTestClusterTests.java | 5 +++++ 5 files changed, 18 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 2b9cde65d64cc..15332d4317f34 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -71,7 +71,8 @@ public final class NetworkModule { Property.NodeScope); public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); - public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); + public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, + Property.NodeScope, Property.Deprecated); public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope); private final Settings settings; diff --git a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index 70deb8a4ba88e..09dc8607bc484 100644 --- a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.util.BigArrays; @@ -151,6 +152,7 @@ public Map> getTransports(Settings settings, ThreadP assertSame(custom, module.getTransportSupplier()); assertTrue(module.isTransportClient()); assertFalse(module.isHttpEnabled()); + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testRegisterHttpTransport() { @@ -181,6 +183,7 @@ public Map> getHttpTransports(Settings set assertFalse(newModule.isTransportClient()); assertFalse(newModule.isHttpEnabled()); expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier()); + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testOverrideDefault() { @@ -306,7 +309,7 @@ public List getTransportInterceptors(NamedWriteableRegistr }); }); assertEquals("interceptor must not be null", nullPointerException.getMessage()); - + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) { diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index f1c8177b5a61c..254823791d5cd 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.env.Environment; @@ -62,6 +63,7 @@ public void testNodeName() throws IOException { assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(name)); } } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public static class CheckPlugin extends Plugin { @@ -93,6 +95,7 @@ protected void validateNodeBeforeAcceptingRequests(BootstrapContext context, Bou expectThrows(NodeValidationException.class, () -> node.start()); assertTrue(executed.get()); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testWarnIfPreRelease() { @@ -144,6 +147,7 @@ public void testNodeAttributes() throws IOException { } catch (IllegalArgumentException e) { assertEquals("node.attr.test_attr cannot have leading or trailing whitespace [trailing ]", e.getMessage()); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private static Settings.Builder baseSettings() { diff --git a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java index a217540b89fca..a0cdb8c3168f4 100644 --- a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java +++ b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.node; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -67,5 +69,6 @@ public void testComponentsMockedByMarkerPlugins() throws IOException { assertSame(searchService.getClass(), SearchService.class); } } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 05fdfac541a2e..77a66d462795f 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -19,6 +19,7 @@ */ package org.elasticsearch.test.test; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Client; @@ -242,6 +243,7 @@ public Settings transportClientSettings() { } finally { IOUtils.close(cluster0, cluster1); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException { @@ -346,6 +348,7 @@ public Settings transportClientSettings() { } finally { cluster.close(); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private Path[] getNodePaths(InternalTestCluster cluster, String name) { @@ -446,6 +449,7 @@ public Settings transportClientSettings() { } finally { cluster.close(); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testTwoNodeCluster() throws Exception { @@ -505,5 +509,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { } finally { cluster.close(); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } } From 78041353be339bb716e3c68a3fe1394804ff38eb Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 18 Apr 2018 21:18:56 -0700 Subject: [PATCH 02/28] Test: Guard deprecation check when 0 nodes created The internal test cluster can sometimes have 0 nodes. In this situation, the http.enabled flag will never be read, and thus no deprecation warning will be emitted. This commit guards the deprecation warning check in this case. --- .../elasticsearch/test/test/InternalTestClusterTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 77a66d462795f..a473845a31344 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -61,6 +61,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.not; @@ -229,13 +230,15 @@ public Settings transportClientSettings() { cluster1.beforeTest(random, random.nextDouble()); } assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); + if (cluster0.getNodeNames().length > 0) { + assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); + } Iterator iterator1 = cluster1.getClients().iterator(); for (Client client : cluster0.getClients()) { assertTrue(iterator1.hasNext()); Client other = iterator1.next(); assertSettings(client.settings(), other.settings(), false); } - assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); assertMMNinNodeSetting(cluster0, cluster0.numMasterNodes()); assertMMNinNodeSetting(cluster1, cluster0.numMasterNodes()); cluster0.afterTest(); @@ -243,7 +246,6 @@ public Settings transportClientSettings() { } finally { IOUtils.close(cluster0, cluster1); } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException { From 827d5ad6d673920a7a46ce768d162e09c159326e Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 19 Apr 2018 09:33:34 +0200 Subject: [PATCH 03/28] Added painless execute api. (#29164) Added an api that allows to execute an arbitrary script and a result to be returned. ``` POST /_scripts/painless/_execute { "script": { "source": "params.var1 / params.var2", "params": { "var1": 1, "var2": 1 } } } ``` Relates to #27875 --- .../painless/painless-execute-script.asciidoc | 53 +++ .../painless-getting-started.asciidoc | 2 + .../painless/PainlessExecuteAction.java | 338 ++++++++++++++++++ .../painless/PainlessPlugin.java | 34 +- .../painless/PainlessExecuteRequestTests.java | 61 ++++ .../PainlessExecuteResponseTests.java | 34 ++ .../painless/70_execute_painless_scripts.yml | 25 ++ .../api/scripts_painless_execute.json | 17 + 8 files changed, 563 insertions(+), 1 deletion(-) create mode 100644 docs/painless/painless-execute-script.asciidoc create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java create mode 100644 modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java create mode 100644 modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteResponseTests.java create mode 100644 modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc new file mode 100644 index 0000000000000..7997c87e3e45f --- /dev/null +++ b/docs/painless/painless-execute-script.asciidoc @@ -0,0 +1,53 @@ +[[painless-execute-api]] +=== Painless execute API + +The Painless execute API allows an arbitrary script to be executed and a result to be returned. + +[[painless-execute-api-parameters]] +.Parameters +[options="header"] +|====== +| Name | Required | Default | Description +| `script` | yes | - | The script to execute +| `context` | no | `execute_api_script` | The context the script should be executed in. +|====== + +==== Contexts + +Contexts control how scripts are executed, what variables are available at runtime and what the return type is. + +===== Painless test script context + +The `painless_test` context executes scripts as is and do not add any special parameters. +The only variable that is available is `params`, which can be used to access user defined values. +The result of the script is always converted to a string. +If no context is specified then this context is used by default. + +==== Example + +Request: + +[source,js] +---------------------------------------------------------------- +POST /_scripts/painless/_execute +{ + "script": { + "source": "params.count / params.total", + "params": { + "count": 100.0, + "total": 1000.0 + } + } +} +---------------------------------------------------------------- +// CONSOLE + +Response: + +[source,js] +-------------------------------------------------- +{ + "result": "0.1" +} +-------------------------------------------------- +// TESTRESPONSE \ No newline at end of file diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index e82e14b043840..b47b417c793e5 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -389,3 +389,5 @@ dispatch *feels* like it'd add a ton of complexity which'd make maintenance and other improvements much more difficult. include::painless-debugging.asciidoc[] + +include::painless-execute-script.asciidoc[] diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java new file mode 100644 index 0000000000000..aa650a37c4fa2 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java @@ -0,0 +1,338 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.painless; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestStatus.OK; + +public class PainlessExecuteAction extends Action { + + static final PainlessExecuteAction INSTANCE = new PainlessExecuteAction(); + private static final String NAME = "cluster:admin/scripts/painless/execute"; + + private PainlessExecuteAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContent { + + private static final ParseField SCRIPT_FIELD = new ParseField("script"); + private static final ParseField CONTEXT_FIELD = new ParseField("context"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "painless_execute_request", args -> new Request((Script) args[0], (SupportedContext) args[1])); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> Script.parse(p), SCRIPT_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + // For now only accept an empty json object: + XContentParser.Token token = p.nextToken(); + assert token == XContentParser.Token.FIELD_NAME; + String contextType = p.currentName(); + token = p.nextToken(); + assert token == XContentParser.Token.START_OBJECT; + token = p.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + token = p.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + return SupportedContext.valueOf(contextType.toUpperCase(Locale.ROOT)); + }, CONTEXT_FIELD); + } + + private Script script; + private SupportedContext context; + + static Request parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + Request(Script script, SupportedContext context) { + this.script = Objects.requireNonNull(script); + this.context = context != null ? context : SupportedContext.PAINLESS_TEST; + } + + Request() { + } + + public Script getScript() { + return script; + } + + public SupportedContext getContext() { + return context; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (script.getType() != ScriptType.INLINE) { + validationException = addValidationError("only inline scripts are supported", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + script = new Script(in); + context = SupportedContext.fromId(in.readByte()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + script.writeTo(out); + out.writeByte(context.id); + } + + // For testing only: + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(SCRIPT_FIELD.getPreferredName(), script); + builder.startObject(CONTEXT_FIELD.getPreferredName()); + { + builder.startObject(context.name()); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(script, request.script) && + context == request.context; + } + + @Override + public int hashCode() { + return Objects.hash(script, context); + } + + public enum SupportedContext { + + PAINLESS_TEST((byte) 0); + + private final byte id; + + SupportedContext(byte id) { + this.id = id; + } + + public static SupportedContext fromId(byte id) { + switch (id) { + case 0: + return PAINLESS_TEST; + default: + throw new IllegalArgumentException("unknown context [" + id + "]"); + } + } + } + + } + + public static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private Object result; + + Response() {} + + Response(Object result) { + this.result = result; + } + + public Object getResult() { + return result; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + result = in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeGenericValue(result); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("result", result); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(result, response.result); + } + + @Override + public int hashCode() { + return Objects.hash(result); + } + } + + public abstract static class PainlessTestScript { + + private final Map params; + + public PainlessTestScript(Map params) { + this.params = params; + } + + /** Return the parameters for this script. */ + public Map getParams() { + return params; + } + + public abstract Object execute(); + + public interface Factory { + + PainlessTestScript newInstance(Map params); + + } + + public static final String[] PARAMETERS = {}; + public static final ScriptContext CONTEXT = new ScriptContext<>("painless_test", Factory.class); + + } + + public static class TransportAction extends HandledTransportAction { + + + private final ScriptService scriptService; + + @Inject + public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ScriptService scriptService) { + super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new); + this.scriptService = scriptService; + } + @Override + protected void doExecute(Request request, ActionListener listener) { + switch (request.context) { + case PAINLESS_TEST: + PainlessTestScript.Factory factory = scriptService.compile(request.script, PainlessTestScript.CONTEXT); + PainlessTestScript painlessTestScript = factory.newInstance(request.script.getParams()); + String result = Objects.toString(painlessTestScript.execute()); + listener.onResponse(new Response(result)); + break; + default: + throw new UnsupportedOperationException("unsupported context [" + request.context + "]"); + } + } + + } + + static class RestAction extends BaseRestHandler { + + RestAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_scripts/painless/_execute", this); + controller.registerHandler(POST, "/_scripts/painless/_execute", this); + } + + @Override + public String getName() { + return "_scripts_painless_execute"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + final Request request = Request.parse(restRequest.contentOrSourceParamParser()); + return channel -> client.executeLocally(INSTANCE, request, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception { + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + return new BytesRestResponse(OK, builder); + } + }); + } + } + +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 795d81bb6e058..0364ad667efc7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -20,28 +20,40 @@ package org.elasticsearch.painless; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.ServiceLoader; +import java.util.function.Supplier; /** * Registers Painless as a plugin. */ -public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin { +public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { private final Map, List> extendedWhitelists = new HashMap<>(); @@ -74,4 +86,24 @@ public void reloadSPI(ClassLoader loader) { } } } + + @SuppressWarnings("rawtypes") + public List getContexts() { + return Collections.singletonList(PainlessExecuteAction.PainlessTestScript.CONTEXT); + } + + @Override + public List> getActions() { + return Collections.singletonList( + new ActionHandler<>(PainlessExecuteAction.INSTANCE, PainlessExecuteAction.TransportAction.class) + ); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + return Collections.singletonList(new PainlessExecuteAction.RestAction(settings, restController)); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java new file mode 100644 index 0000000000000..488ae0e1643bc --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.painless; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class PainlessExecuteRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected PainlessExecuteAction.Request createTestInstance() { + Script script = new Script(randomAlphaOfLength(10)); + PainlessExecuteAction.Request.SupportedContext context = randomBoolean() ? + PainlessExecuteAction.Request.SupportedContext.PAINLESS_TEST : null; + return new PainlessExecuteAction.Request(script, context); + } + + @Override + protected PainlessExecuteAction.Request createBlankInstance() { + return new PainlessExecuteAction.Request(); + } + + @Override + protected PainlessExecuteAction.Request doParseInstance(XContentParser parser) throws IOException { + return PainlessExecuteAction.Request.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testValidate() { + Script script = new Script(ScriptType.STORED, null, randomAlphaOfLength(10), Collections.emptyMap()); + PainlessExecuteAction.Request request = new PainlessExecuteAction.Request(script, null); + Exception e = request.validate(); + assertNotNull(e); + assertEquals("Validation Failed: 1: only inline scripts are supported;", e.getMessage()); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteResponseTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteResponseTests.java new file mode 100644 index 0000000000000..20f3cf08e04c8 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteResponseTests.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.painless; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class PainlessExecuteResponseTests extends AbstractStreamableTestCase { + + @Override + protected PainlessExecuteAction.Response createBlankInstance() { + return new PainlessExecuteAction.Response(); + } + + @Override + protected PainlessExecuteAction.Response createTestInstance() { + return new PainlessExecuteAction.Response(randomAlphaOfLength(10)); + } +} diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml new file mode 100644 index 0000000000000..7b915cc38dbc0 --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml @@ -0,0 +1,25 @@ +--- +"Execute with defaults": + - do: + scripts_painless_execute: + body: + script: + source: "params.count / params.total" + params: + count: 100.0 + total: 1000.0 + - match: { result: "0.1" } + +--- +"Execute with execute_api_script context": + - do: + scripts_painless_execute: + body: + script: + source: "params.var1 - params.var2" + params: + var1: 10 + var2: 100 + context: + painless_test: {} + - match: { result: "-90" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json new file mode 100644 index 0000000000000..c02627cfd874c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json @@ -0,0 +1,17 @@ +{ + "scripts_painless_execute": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html", + "methods": ["GET", "POST"], + "url": { + "path": "/_scripts/painless/_execute", + "paths": ["/_scripts/painless/_execute"], + "parts": { + }, + "params": { + } + }, + "body": { + "description": "The script to execute" + } + } +} From 31a2db4908cb44d6f0ff23f3d2cc8fe0fbfa2eb5 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 19 Apr 2018 09:40:25 +0200 Subject: [PATCH 04/28] Fix missing node id prefix in startup logs (#29534) When `node.name` is not set, some log traces at startup time does not show the node id. --- server/src/main/java/org/elasticsearch/node/Node.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index b2eb3a21b7fcd..805d659ced84f 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -258,7 +258,6 @@ protected Node(final Environment environment, Collection // use temp logger just to say we are starting. we can't use it later on because the node name might not be set Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings())); logger.info("initializing ..."); - } try { Settings tmpSettings = Settings.builder().put(environment.settings()) @@ -272,13 +271,13 @@ protected Node(final Environment environment, Collection throw new IllegalStateException("Failed to create node environment", ex); } final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); - Logger logger = Loggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); + final Logger logger = Loggers.getLogger(Node.class, tmpSettings); // this must be captured after the node name is possibly added to the settings final String nodeName = NODE_NAME_SETTING.get(tmpSettings); if (hadPredefinedNodeName == false) { - logger.info("node name [{}] derived from node ID [{}]; set [{}] to override", nodeName, nodeId, NODE_NAME_SETTING.getKey()); + logger.info("node name derived from node ID [{}]; set [{}] to override", nodeId, NODE_NAME_SETTING.getKey()); } else { logger.info("node name [{}], node ID [{}]", nodeName, nodeId); } From 0627cfac23e9ed1ad47905026999161158edc492 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 19 Apr 2018 09:20:04 +0200 Subject: [PATCH 05/28] test: also assert deprecation warning after clusters have been closed. --- .../elasticsearch/test/test/InternalTestClusterTests.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index a473845a31344..23c665af30a30 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -220,6 +220,7 @@ public Settings transportClientSettings() { assertClusters(cluster0, cluster1, false); long seed = randomLong(); + boolean shouldAssertSettingsDeprecationsAndWarnings = false; try { { Random random = new Random(seed); @@ -231,6 +232,7 @@ public Settings transportClientSettings() { } assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); if (cluster0.getNodeNames().length > 0) { + shouldAssertSettingsDeprecationsAndWarnings = true; assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); } Iterator iterator1 = cluster1.getClients().iterator(); @@ -245,6 +247,9 @@ public Settings transportClientSettings() { cluster1.afterTest(); } finally { IOUtils.close(cluster0, cluster1); + if (shouldAssertSettingsDeprecationsAndWarnings) { + assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); + } } } From f42cdee5e029268a3f114093f86467143be08330 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 19 Apr 2018 10:48:30 +0200 Subject: [PATCH 06/28] Fixed test by asserting deprecation warnings. Closes #29603 --- .../test/java/org/elasticsearch/tribe/TribeServiceTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java index 8890b20ec00e9..d7ca61d15525b 100644 --- a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java +++ b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.Environment; @@ -57,6 +58,7 @@ public void testMinimalSettings() { assertEquals("nodename/tribe1", clientSettings.get("node.name")); assertEquals("tribe1", clientSettings.get("tribe.name")); assertFalse(NetworkModule.HTTP_ENABLED.get(clientSettings)); + assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); assertEquals("false", clientSettings.get("node.master")); assertEquals("false", clientSettings.get("node.data")); assertEquals("false", clientSettings.get("node.ingest")); From ff45b819ea7b71f71972f30903c364dd3672da20 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Wed, 18 Apr 2018 17:41:19 -0400 Subject: [PATCH 07/28] [DOCS] Update highlighting docs (#28802) - add more explanation to some highlighting parameters - add a document describing how highlighters work internally --- .../request/highlighters-internal.asciidoc | 194 ++++++++++++++++++ .../search/request/highlighting.asciidoc | 27 ++- 2 files changed, 217 insertions(+), 4 deletions(-) create mode 100644 docs/reference/search/request/highlighters-internal.asciidoc diff --git a/docs/reference/search/request/highlighters-internal.asciidoc b/docs/reference/search/request/highlighters-internal.asciidoc new file mode 100644 index 0000000000000..651cdf917ced0 --- /dev/null +++ b/docs/reference/search/request/highlighters-internal.asciidoc @@ -0,0 +1,194 @@ +[[highlighter-internal-work]] +==== How highlighters work internally + +Given a query and a text (the content of a document field), the goal of a +highlighter is to find the best text fragments for the query, and highlight +the query terms in the found fragments. For this, a highlighter needs to +address several questions: + +- How break a text into fragments? +- How to find the best fragments among all fragments? +- How to highlight the query terms in a fragment? + +===== How to break a text into fragments? +Relevant settings: `fragment_size`, `fragmenter`, `type` of highlighter, +`boundary_chars`, `boundary_max_scan`, `boundary_scanner`, `boundary_scanner_locale`. + +Plain highlighter begins with analyzing the text using the given analyzer, +and creating a token stream from it. Plain highlighter uses a very simple +algorithm to break the token stream into fragments. It loops through terms in the token stream, +and every time the current term's end_offset exceeds `fragment_size` multiplied by the number of +created fragments, a new fragment is created. A little more computation is done with using `span` +fragmenter to avoid breaking up text between highlighted terms. But overall, since the breaking is +done only by `fragment_size`, some fragments can be quite odd, e.g. beginning +with a punctuation mark. + +Unified or FVH highlighters do a better job of breaking up a text into +fragments by utilizing Java's `BreakIterator`. This ensures that a fragment +is a valid sentence as long as `fragment_size` allows for this. + + +===== How to find the best fragments? +Relevant settings: `number_of_fragments`. + +To find the best, most relevant, fragments, a highlighter needs to score +each fragment in respect to the given query. The goal is to score only those +terms that participated in generating the 'hit' on the document. +For some complex queries, this is still work in progress. + +The plain highlighter creates an in-memory index from the current token stream, +and re-runs the original query criteria through Lucene's query execution planner +to get access to low-level match information for the current text. +For more complex queries the original query could be converted to a span query, +as span queries can handle phrases more accurately. Then this obtained low-level match +information is used to score each individual fragment. The scoring method of the plain +highlighter is quite simple. Each fragment is scored by the number of unique +query terms found in this fragment. The score of individual term is equal to its boost, +which is by default is 1. Thus, by default, a fragment that contains one unique query term, +will get a score of 1; and a fragment that contains two unique query terms, +will get a score of 2 and so on. The fragments are then sorted by their scores, +so the highest scored fragments will be output first. + +FVH doesn't need to analyze the text and build an in-memory index, as it uses +pre-indexed document term vectors, and finds among them terms that correspond to the query. +FVH scores each fragment by the number of query terms found in this fragment. +Similarly to plain highlighter, score of individual term is equal to its boost value. +In contrast to plain highlighter, all query terms are counted, not only unique terms. + +Unified highlighter can use pre-indexed term vectors or pre-indexed terms offsets, +if they are available. Otherwise, similar to Plain Highlighter, it has to create +an in-memory index from the text. Unified highlighter uses the BM25 scoring model +to score fragments. + + +===== How to highlight the query terms in a fragment? +Relevant settings: `pre-tags`, `post-tags`. + +The goal is to highlight only those terms that participated in generating the 'hit' on the document. +For some complex boolean queries, this is still work in progress, as highlighters don't reflect +the boolean logic of a query and only extract leaf (terms, phrases, prefix etc) queries. + +Plain highlighter given the token stream and the original text, recomposes the original text to +highlight only terms from the token stream that are contained in the low-level match information +structure from the previous step. + +FVH and unified highlighter use intermediate data structures to represent +fragments in some raw form, and then populate them with actual text. + +A highlighter uses `pre-tags`, `post-tags` to encode highlighted terms. + + +===== An example of the work of the unified highlighter + +Let's look in more details how unified highlighter works. + +First, we create a index with a text field `content`, that will be indexed +using `english` analyzer, and will be indexed without offsets or term vectors. + +[source,js] +-------------------------------------------------- +PUT test_index +{ + "mappings": { + "_doc": { + "properties": { + "content" : { + "type" : "text", + "analyzer" : "english" + } + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +We put the following document into the index: + +[source,js] +-------------------------------------------------- +PUT test_index/_doc/doc1 +{ + "content" : "For you I'm only a fox like a hundred thousand other foxes. But if you tame me, we'll need each other. You'll be the only boy in the world for me. I'll be the only fox in the world for you." +} +-------------------------------------------------- +// NOTCONSOLE + + +And we ran the following query with a highlight request: + +[source,js] +-------------------------------------------------- +GET test_index/_search +{ + "query": { + "match_phrase" : {"content" : "only fox"} + }, + "highlight": { + "type" : "unified", + "number_of_fragments" : 3, + "fields": { + "content": {} + } + } +} +-------------------------------------------------- +// NOTCONSOLE + + +After `doc1` is found as a hit for this query, this hit will be passed to the +unified highlighter for highlighting the field `content` of the document. +Since the field `content` was not indexed either with offsets or term vectors, +its raw field value will be analyzed, and in-memory index will be built from +the terms that match the query: + + {"token":"onli","start_offset":12,"end_offset":16,"position":3}, + {"token":"fox","start_offset":19,"end_offset":22,"position":5}, + {"token":"fox","start_offset":53,"end_offset":58,"position":11}, + {"token":"onli","start_offset":117,"end_offset":121,"position":24}, + {"token":"onli","start_offset":159,"end_offset":163,"position":34}, + {"token":"fox","start_offset":164,"end_offset":167,"position":35} + +Our complex phrase query will be converted to the span query: +`spanNear([text:onli, text:fox], 0, true)`, meaning that we are looking for +terms "onli: and "fox" within 0 distance from each other, and in the given +order. The span query will be run against the created before in-memory index, +to find the following match: + + {"term":"onli", "start_offset":159, "end_offset":163}, + {"term":"fox", "start_offset":164, "end_offset":167} + +In our example, we have got a single match, but there could be several matches. +Given the matches, the unified highlighter breaks the text of the field into +so called "passages". Each passage must contain at least one match. +The unified highlighter with the use of Java's `BreakIterator` ensures that each +passage represents a full sentence as long as it doesn't exceed `fragment_size`. +For our example, we have got a single passage with the following properties +(showing only a subset of the properties here): + + Passage: + startOffset: 147 + endOffset: 189 + score: 3.7158387 + matchStarts: [159, 164] + matchEnds: [163, 167] + numMatches: 2 + +Notice how a passage has a score, calculated using the BM25 scoring formula +adapted for passages. Scores allow us to choose the best scoring +passages if there are more passages available than the requested +by the user `number_of_fragments`. Scores also let us to sort passages by +`order: "score"` if requested by the user. + +As the final step, the unified highlighter will extract from the field's text +a string corresponding to each passage: + + "I'll be the only fox in the world for you." + +and will format with the tags and all matches in this string +using the passages's `matchStarts` and `matchEnds` information: + + I'll be the only fox in the world for you. + +This kind of formatted strings are the final result of the highlighter returned +to the user. \ No newline at end of file diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index e8e54dcf4b36c..6d6d7a8ab7a1d 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -7,6 +7,11 @@ When you request highlights, the response contains an additional `highlight` element for each search hit that includes the highlighted fields and the highlighted fragments. +NOTE: Highlighters don't reflect the boolean logic of a query when extracting + terms to highlight. Thus, for some complex boolean queries (e.g nested boolean + queries, queries using `minimum_should_match` etc.), parts of documents may be + highlighted that don't correspond to query matches. + Highlighting requires the actual content of a field. If the field is not stored (the mapping does not set `store` to `true`), the actual `_source` is loaded and the relevant field is extracted from `_source`. @@ -143,7 +148,10 @@ by Java's https://docs.oracle.com/javase/8/docs/api/java/text/BreakIterator.html You can specify the locale to use with `boundary_scanner_locale`. boundary_scanner_locale:: Controls which locale is used to search for sentence -and word boundaries. +and word boundaries. This parameter takes a form of a language tag, +e.g. `"en-US"`, `"fr-FR"`, `"ja-JP"`. More info can be found in the +https://docs.oracle.com/javase/8/docs/api/java/util/Locale.html#forLanguageTag-java.lang.String-[Locale Language Tag] +documentation. The default value is https://docs.oracle.com/javase/8/docs/api/java/util/Locale.html#ROOT[ Locale.ROOT]. encoder:: Indicates if the snippet should be HTML encoded: `default` (no encoding) or `html` (HTML-escape the snippet text and then @@ -203,12 +211,16 @@ handy when you need to highlight short texts such as a title or address, but fragmentation is not required. If `number_of_fragments` is 0, `fragment_size` is ignored. Defaults to 5. -order:: Sorts highlighted fragments by score when set to `score`. Only valid for -the `unified` highlighter. +order:: Sorts highlighted fragments by score when set to `score`. By default, +fragments will be output in the order they appear in the field (order: `none`). +Setting this option to `score` will output the most relevant fragments first. +Each highlighter applies its own logic to compute relevancy scores. See +the document <> +for more details how different highlighters find the best fragments. phrase_limit:: Controls the number of matching phrases in a document that are considered. Prevents the `fvh` highlighter from analyzing too many phrases -and consuming too much memory. When using `matched_fields, `phrase_limit` +and consuming too much memory. When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. Only supported by the `fvh` highlighter. Defaults to 256. @@ -932,3 +944,10 @@ Response: If the `number_of_fragments` option is set to `0`, `NullFragmenter` is used which does not fragment the text at all. This is useful for highlighting the entire contents of a document or field. + + +[[highlighter-internal]] +==== How highlighters work internally + +To learn how highlighters work internally check the document +<>. \ No newline at end of file From 2a1148bd2313bb8441f6a33f233e212de07e1357 Mon Sep 17 00:00:00 2001 From: debadair Date: Wed, 18 Apr 2018 16:56:09 -0700 Subject: [PATCH 08/28] [DOCS] Added include for internal highlighters section. (#29597) --- docs/reference/search/request/highlighting.asciidoc | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 6d6d7a8ab7a1d..97f31c0db7a08 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -96,7 +96,7 @@ the highlighted documents. This is important if you have large fields because it doesn't require reanalyzing the text to be highlighted. It also requires less disk space than using `term_vectors`. -* Term vectors. If `term_vector` information is provided by setting +* Term vectors. If `term_vector` information is provided by setting `term_vector` to `with_positions_offsets` in the mapping, the `unified` highlighter automatically uses the `term_vector` to highlight the field. It's fast especially for large fields (> `1MB`) and for highlighting multi-term queries like @@ -135,7 +135,7 @@ the `fvh` highlighter. boundaries. The `boundary_max_scan` setting controls how far to scan for boundary characters. Only valid for the `fvh` highlighter. `sentence`::: Break highlighted fragments at the next sentence boundary, as -determined by Java's +determined by Java's https://docs.oracle.com/javase/8/docs/api/java/text/BreakIterator.html[BreakIterator]. You can specify the locale to use with `boundary_scanner_locale`. + @@ -946,8 +946,4 @@ If the `number_of_fragments` option is set to `0`, This is useful for highlighting the entire contents of a document or field. -[[highlighter-internal]] -==== How highlighters work internally - -To learn how highlighters work internally check the document -<>. \ No newline at end of file +include::highlighters-internal.asciidoc[] From 3fccf062a32635ad0cc3f0ec402a483ed44eccfa Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 19 Apr 2018 13:47:35 +0200 Subject: [PATCH 09/28] test: Assert deprecated http.enebled setting warning Closes #29603 --- .../java/org/elasticsearch/tribe/TribeServiceTests.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java index d7ca61d15525b..c0f6deff787ac 100644 --- a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java +++ b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java @@ -216,9 +216,13 @@ public void testTribeNodeDeprecation() throws IOException { } try (Node node = new MockNode(settings.build(), MockTribePlugin.classpathPlugins)) { if (tribeServiceEnable) { - assertWarnings("tribe nodes are deprecated in favor of cross-cluster search and will be removed in Elasticsearch 7.0.0"); + assertSettingDeprecationsAndWarnings( + new Setting[]{NetworkModule.HTTP_ENABLED}, + "tribe nodes are deprecated in favor of cross-cluster search and will be removed in Elasticsearch 7.0.0" + ); } } + assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); } static class MergableCustomMetaData1 extends TestCustomMetaData From 4a7617c6ddde60bffbee7177fbb3c9f3d8cdbb68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Apr 2018 13:50:18 +0200 Subject: [PATCH 10/28] [Test] Minor changes to rank_eval tests (#29577) Removing an enum in favour of local constants to simplify tests and removing a few deprecated method calls and warnings. --- .../DiscountedCumulativeGainTests.java | 2 +- .../rankeval/MeanReciprocalRankTests.java | 11 ++++--- .../index/rankeval/PrecisionAtKTests.java | 29 ++++++++++--------- .../index/rankeval/RankEvalRequestIT.java | 11 ++++--- .../index/rankeval/RankEvalSpecTests.java | 1 - .../index/rankeval/RatedRequestsTests.java | 7 ++--- .../index/rankeval/TestRatingEnum.java | 24 --------------- 7 files changed, 33 insertions(+), 52 deletions(-) delete mode 100644 modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TestRatingEnum.java diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index 22c3542c0fab4..ba03a734ec760 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -253,7 +253,7 @@ private void assertParsedCorrect(String xContent, Integer expectedUnknownDocRati public static DiscountedCumulativeGain createTestItem() { boolean normalize = randomBoolean(); - Integer unknownDocRating = new Integer(randomIntBetween(0, 1000)); + Integer unknownDocRating = Integer.valueOf(randomIntBetween(0, 1000)); return new DiscountedCumulativeGain(normalize, unknownDocRating, 10); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 6604dbc74a065..0ac9cb0d901e6 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -46,6 +46,9 @@ public class MeanReciprocalRankTests extends ESTestCase { + private static final int IRRELEVANT_RATING_0 = 0; + private static final int RELEVANT_RATING_1 = 1; + public void testParseFromXContent() throws IOException { String xContent = "{ }"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { @@ -84,9 +87,9 @@ public void testMaxAcceptableRank() { int relevantAt = randomIntBetween(0, searchHits); for (int i = 0; i <= searchHits; i++) { if (i == relevantAt) { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.RELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), RELEVANT_RATING_1)); } else { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.IRRELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), IRRELEVANT_RATING_0)); } } @@ -110,9 +113,9 @@ public void testEvaluationOneRelevantInResults() { int relevantAt = randomIntBetween(0, 9); for (int i = 0; i <= 20; i++) { if (i == relevantAt) { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.RELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), RELEVANT_RATING_1)); } else { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.IRRELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), IRRELEVANT_RATING_0)); } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index aa3dd5a0b7e32..bf5bae4c334a9 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -46,9 +46,12 @@ public class PrecisionAtKTests extends ESTestCase { + private static final int IRRELEVANT_RATING_0 = 0; + private static final int RELEVANT_RATING_1 = 1; + public void testPrecisionAtFiveCalculation() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated, "test"), rated); assertEquals(1, evaluated.getQualityLevel(), 0.00001); assertEquals(1, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); @@ -57,11 +60,11 @@ public void testPrecisionAtFiveCalculation() { public void testPrecisionAtFiveIgnoreOneResult() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "1", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "2", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "3", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "4", TestRatingEnum.IRRELEVANT.ordinal())); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "1", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "2", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "3", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "4", IRRELEVANT_RATING_0)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated, "test"), rated); assertEquals((double) 4 / 5, evaluated.getQualityLevel(), 0.00001); assertEquals(4, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); @@ -89,11 +92,11 @@ public void testPrecisionAtFiveRelevanceThreshold() { public void testPrecisionAtFiveCorrectIndex() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test_other", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test_other", "1", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "1", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "2", TestRatingEnum.IRRELEVANT.ordinal())); + rated.add(createRatedDoc("test_other", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test_other", "1", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "1", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "2", IRRELEVANT_RATING_0)); // the following search hits contain only the last three documents EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated.subList(2, 5), "test"), rated); assertEquals((double) 2 / 3, evaluated.getQualityLevel(), 0.00001); @@ -103,8 +106,8 @@ public void testPrecisionAtFiveCorrectIndex() { public void testIgnoreUnlabeled() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "1", TestRatingEnum.RELEVANT.ordinal())); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "1", RELEVANT_RATING_1)); // add an unlabeled search hit SearchHit[] searchHits = Arrays.copyOf(toSearchHits(rated, "test"), 3); searchHits[2] = new SearchHit(2, "2", new Text("testtype"), Collections.emptyMap()); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index dc0bbddeb62b1..f0242e2bccdb6 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -43,6 +43,9 @@ import static org.hamcrest.Matchers.instanceOf; public class RankEvalRequestIT extends ESIntegTestCase { + + private static final int RELEVANT_RATING_1 = 1; + @Override protected Collection> transportClientPlugins() { return Arrays.asList(RankEvalPlugin.class); @@ -117,7 +120,7 @@ public void testPrecisionAtRequest() { if (id.equals("1") || id.equals("6")) { assertFalse(hit.getRating().isPresent()); } else { - assertEquals(TestRatingEnum.RELEVANT.ordinal(), hit.getRating().get().intValue()); + assertEquals(RELEVANT_RATING_1, hit.getRating().get().intValue()); } } } @@ -128,7 +131,7 @@ public void testPrecisionAtRequest() { for (RatedSearchHit hit : hitsAndRatings) { String id = hit.getSearchHit().getId(); if (id.equals("1")) { - assertEquals(TestRatingEnum.RELEVANT.ordinal(), hit.getRating().get().intValue()); + assertEquals(RELEVANT_RATING_1, hit.getRating().get().intValue()); } else { assertFalse(hit.getRating().isPresent()); } @@ -259,7 +262,7 @@ public void testBadQuery() { public void testIndicesOptions() { SearchSourceBuilder amsterdamQuery = new SearchSourceBuilder().query(new MatchAllQueryBuilder()); List relevantDocs = createRelevant("2", "3", "4", "5", "6"); - relevantDocs.add(new RatedDocument("test2", "7", TestRatingEnum.RELEVANT.ordinal())); + relevantDocs.add(new RatedDocument("test2", "7", RELEVANT_RATING_1)); List specifications = new ArrayList<>(); specifications.add(new RatedRequest("amsterdam_query", relevantDocs, amsterdamQuery)); RankEvalSpec task = new RankEvalSpec(specifications, new PrecisionAtK()); @@ -322,7 +325,7 @@ public void testIndicesOptions() { private static List createRelevant(String... docs) { List relevant = new ArrayList<>(); for (String doc : docs) { - relevant.add(new RatedDocument("test", doc, TestRatingEnum.RELEVANT.ordinal())); + relevant.add(new RatedDocument("test", doc, RELEVANT_RATING_1)); } return relevant; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java index b49811a9bcaec..e0899b451af11 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java @@ -52,7 +52,6 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.startsWith; public class RankEvalSpecTests extends ESTestCase { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index ad962178f581f..196b50b7f6163 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; @@ -54,7 +52,6 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.startsWith; public class RatedRequestsTests extends ESTestCase { @@ -139,8 +136,8 @@ public void testXContentParsingIsNotLenient() throws IOException { Exception exception = expectThrows(Exception.class, () -> RatedRequest.fromXContent(parser)); if (exception instanceof XContentParseException) { XContentParseException xcpe = (XContentParseException) exception; - assertThat(ExceptionsHelper.detailedMessage(xcpe), containsString("unknown field")); - assertThat(ExceptionsHelper.detailedMessage(xcpe), containsString("parser not found")); + assertThat(xcpe.getCause().getMessage(), containsString("unknown field")); + assertThat(xcpe.getCause().getMessage(), containsString("parser not found")); } if (exception instanceof XContentParseException) { assertThat(exception.getMessage(), containsString("[request] failed to parse field")); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TestRatingEnum.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TestRatingEnum.java deleted file mode 100644 index ea44c215d9214..0000000000000 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TestRatingEnum.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.rankeval; - -enum TestRatingEnum { - IRRELEVANT, RELEVANT; -} \ No newline at end of file From bab45e7f663e574e8942569203a40d86a2a0ad56 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 19 Apr 2018 08:17:05 -0400 Subject: [PATCH 11/28] Rename the bulk thread pool to write thread pool (#29593) This commit renames the bulk thread pool to the write thread pool. This is to better reflect the fact that the underlying thread pool is used to execute any document write request (single-document index/delete/update requests, and bulk requests). With this change, we add support for fallback settings thread_pool.bulk.* which will be supported until 7.0.0. We also add a system property so that the display name of the thread pool remains as "bulk" if needed to avoid breaking users. --- docs/reference/cat.asciidoc | 4 +- docs/reference/cat/thread_pool.asciidoc | 4 +- docs/reference/migration/migrate_6_3.asciidoc | 16 ++++ docs/reference/modules/threadpool.asciidoc | 9 +- .../index/reindex/RetryTests.java | 6 +- .../test/cat.thread_pool/10_basic.yml | 25 ++--- .../action/bulk/TransportShardBulkAction.java | 2 +- .../TransportResyncReplicationAction.java | 2 +- .../ingest/PipelineExecutionService.java | 2 +- .../threadpool/ExecutorBuilder.java | 2 +- .../threadpool/FixedExecutorBuilder.java | 96 +++++++++++++++---- .../elasticsearch/threadpool/ThreadPool.java | 8 +- .../action/bulk/BulkProcessorRetryIT.java | 4 +- .../concurrent/EsThreadPoolExecutorTests.java | 6 +- .../IndexShardOperationPermitsTests.java | 22 ++--- .../ThreadPoolSerializationTests.java | 4 +- .../UpdateThreadPoolSettingsTests.java | 4 +- 17 files changed, 149 insertions(+), 67 deletions(-) diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 3dff5abc52d9a..7a2262b7962bb 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -93,8 +93,8 @@ Responds with: // TESTRESPONSE[s/9300 27 sLBaIGK/\\d+ \\d+ .+/ _cat] You can also request multiple columns using simple wildcards like -`/_cat/thread_pool?h=ip,bulk.*` to get all headers (or aliases) starting -with `bulk.`. +`/_cat/thread_pool?h=ip,queue*` to get all headers (or aliases) starting +with `queue`. [float] [[numeric-formats]] diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index de41359a23b62..f1c7664ae33f6 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -15,7 +15,6 @@ Which looks like: [source,txt] -------------------------------------------------- node-0 analyze 0 0 0 -node-0 bulk 0 0 0 node-0 fetch_shard_started 0 0 0 node-0 fetch_shard_store 0 0 0 node-0 flush 0 0 0 @@ -29,6 +28,7 @@ node-0 refresh 0 0 0 node-0 search 0 0 0 node-0 snapshot 0 0 0 node-0 warmer 0 0 0 +node-0 write 0 0 0 -------------------------------------------------- // TESTRESPONSE[s/\d+/\\d+/ _cat] @@ -45,7 +45,6 @@ The second column is the thread pool name -------------------------------------------------- name analyze -bulk fetch_shard_started fetch_shard_store flush @@ -59,6 +58,7 @@ refresh search snapshot warmer +write -------------------------------------------------- diff --git a/docs/reference/migration/migrate_6_3.asciidoc b/docs/reference/migration/migrate_6_3.asciidoc index c6165983342d4..a658a24c7c9bc 100644 --- a/docs/reference/migration/migrate_6_3.asciidoc +++ b/docs/reference/migration/migrate_6_3.asciidoc @@ -44,3 +44,19 @@ users we think this is fine as analyze requests are useful for debugging and so probably do not see high concurrency. If you are impacted by this you can increase the size of the thread pool by using the `thread_pool.analyze.size` setting. + +==== Renaming the bulk thread pool + +The `bulk` thread pool has been renamed to the `write` thread pool. This change +was made to reflect the fact that this thread pool is used to execute all write +operations: single-document index/delete/update requests, as well as bulk +requests. The settings `thread_pool.bulk.size` and `thread_pool.bulk.queue_size` +are still supported as fallback settings although you should transition to +`thread_pool.write.size` and `thread_pool.write.queue_size`, respectively. The +fallback settings will be removed in 7.0.0. Additionally, this means that some +APIs (e.g., the node stats API) will now display the name of this thread pool as +`write`. If this change impacts you (e.g., for monitoring that you have in +place) you can start Elasticsearch with the JVM option +`-Des.thread_pool.write.use_bulk_as_display_name=true` to have Elasticsearch +continue to display the name of this thread pool as `bulk`. Elasticsearch will +stop observing this system property in 7.0.0. diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index fa8522ea1cbb0..9519dbf3e03a8 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -33,11 +33,10 @@ There are several thread pools, but the important ones include: `analyze`:: For analyze requests. Thread pool type is `fixed` with a size of 1, queue size of 16. -`bulk`:: - For bulk operations. Thread pool type is `fixed` - with a size of `# of available processors`, - queue_size of `200`. The maximum size for this pool - is `1 + # of available processors`. +`write`:: + For single-document index/delete/update and bulk requests. Thread pool type + is `fixed` with a size of `# of available processors`, queue_size of `200`. + The maximum size for this pool is `1 + # of available processors`. `snapshot`:: For snapshot/restore operations. Thread pool type is `scaling` with a diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index da0dbf2aae345..131c959af8afc 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -158,10 +158,10 @@ private void testCase( final Settings nodeSettings = Settings.builder() // use pools of size 1 so we can block them - .put("thread_pool.bulk.size", 1) + .put("thread_pool.write.size", 1) .put("thread_pool.search.size", 1) // use queues of size 1 because size 0 is broken and because search requests need the queue to function - .put("thread_pool.bulk.queue_size", 1) + .put("thread_pool.write.queue_size", 1) .put("thread_pool.search.queue_size", 1) .put("node.attr.color", "blue") .build(); @@ -203,7 +203,7 @@ private void testCase( assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(0L))); logger.info("Blocking bulk and unblocking search so we start to get bulk rejections"); - CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.BULK, node); + CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.WRITE, node); initialSearchBlock.await(); logger.info("Waiting for bulk rejections"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index 9cd970341412a..7a11388d1bf89 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -1,5 +1,8 @@ --- "Test cat thread_pool output": + - skip: + version: " - 6.2.99" + reason: the write thread pool was renamed from bulk in 6.3.0 - do: cat.thread_pool: {} @@ -29,30 +32,30 @@ - do: cat.thread_pool: - thread_pool_patterns: bulk,management,flush,index,generic,force_merge + thread_pool_patterns: write,management,flush,index,generic,force_merge h: id,name,active v: true - match: $body: | /^ id \s+ name \s+ active \n - (\S+\s+ bulk \s+ \d+ \n - \S+\s+ flush \s+ \d+ \n + (\S+\s+ flush \s+ \d+ \n \S+\s+ force_merge \s+ \d+ \n \S+\s+ generic \s+ \d+ \n \S+\s+ index \s+ \d+ \n - \S+\s+ management \s+ \d+ \n)+ $/ + \S+\s+ management \s+ \d+ \n + \S+\s+ write \s+ \d+ \n)+ $/ - do: cat.thread_pool: - thread_pool_patterns: bulk + thread_pool_patterns: write h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive v: true - match: $body: | - /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n - (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n + (\S+ \s+ write \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: @@ -68,12 +71,12 @@ - do: cat.thread_pool: - thread_pool_patterns: bulk,index,search + thread_pool_patterns: write,index,search size: "" - match: $body: | / #node_name name active queue rejected - ^ (\S+ \s+ bulk \s+ \d+ \s+ \d+ \s+ \d+ \n - \S+ \s+ index \s+ \d+ \s+ \d+ \s+ \d+ \n - \S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + ^ (\S+ \s+ index \s+ \d+ \s+ \d+ \s+ \d+ \n + \S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n + \S+ \s+ write \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 7221118d2ef50..8fb490c4b6531 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -83,7 +83,7 @@ public TransportShardBulkAction(Settings settings, TransportService transportSer MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK); + indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.WRITE); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; } diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 4e7c66afdcaf0..c182fb24ffb11 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -60,7 +60,7 @@ public TransportResyncReplicationAction(Settings settings, TransportService tran ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, ResyncReplicationRequest::new, ResyncReplicationRequest::new, ThreadPool.Names.BULK); + indexNameExpressionResolver, ResyncReplicationRequest::new, ResyncReplicationRequest::new, ThreadPool.Names.WRITE); } @Override diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 69018db2c9365..d2503476d3353 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -56,7 +56,7 @@ public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { public void executeBulkRequest(Iterable actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { - threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { + threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { diff --git a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java index 314eb1df71a4b..b50443a8f9e06 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java @@ -48,7 +48,7 @@ protected static String settingsKey(final String prefix, final String key) { } protected int applyHardSizeLimit(final Settings settings, final String name) { - if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { + if (name.equals("bulk") || name.equals(ThreadPool.Names.INDEX) || name.equals(ThreadPool.Names.WRITE)) { return 1 + EsExecutors.numberOfProcessors(settings); } else { return Integer.MAX_VALUE; diff --git a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index 94db6cb64e2c8..e4ec0ef3d45a4 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.threadpool; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.SizeValue; @@ -38,7 +39,9 @@ public final class FixedExecutorBuilder extends ExecutorBuilder { private final Setting sizeSetting; + private final Setting fallbackSizeSetting; private final Setting queueSizeSetting; + private final Setting fallbackQueueSizeSetting; /** * Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name. @@ -52,6 +55,19 @@ public final class FixedExecutorBuilder extends ExecutorBuilder fallbackSizeSetting = sizeSetting(settings, fallbackName, size, fallbackPrefix, fallbackProperties); + this.sizeSetting = + new Setting<>( + new Setting.SimpleKey(sizeKey), + fallbackSizeSetting, + s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), + properties); + this.fallbackSizeSetting = fallbackSizeSetting; + final Setting fallbackQueueSizeSetting = queueSizeSetting(fallbackPrefix, queueSize, fallbackProperties); + this.queueSizeSetting = + new Setting<>( + new Setting.SimpleKey(queueSizeKey), + fallbackQueueSizeSetting, + s -> Setting.parseInt(s, Integer.MIN_VALUE, queueSizeKey), + properties); + this.fallbackQueueSizeSetting = fallbackQueueSizeSetting; } - this.sizeSetting = - new Setting<>( - sizeKey, - s -> Integer.toString(size), - s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), - properties); - final String queueSizeKey = settingsKey(prefix, "queue_size"); - this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, properties); + } + + private Setting sizeSetting( + final Settings settings, final String name, final int size, final String prefix, final Setting.Property[] properties) { + final String sizeKey = settingsKey(prefix, "size"); + return new Setting<>( + sizeKey, + s -> Integer.toString(size), + s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), + properties); + } + + private Setting queueSizeSetting(final String prefix, final int queueSize, final Setting.Property[] properties) { + return Setting.intSetting(settingsKey(prefix, "queue_size"), queueSize, properties); } @Override public List> getRegisteredSettings() { - return Arrays.asList(sizeSetting, queueSizeSetting); + if (fallbackSizeSetting == null && fallbackQueueSizeSetting == null) { + return Arrays.asList(sizeSetting, queueSizeSetting); + } else { + assert fallbackSizeSetting != null && fallbackQueueSizeSetting != null; + return Arrays.asList(sizeSetting, fallbackSizeSetting, queueSizeSetting, fallbackQueueSizeSetting); + } } @Override @@ -132,8 +190,14 @@ ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final Thre final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newFixed(settings.nodeName + "/" + name(), size, queueSize, threadFactory, threadContext); + final String name; + if ("write".equals(name()) && Booleans.parseBoolean(System.getProperty("es.thread_pool.write.use_bulk_as_display_name", "false"))) { + name = "bulk"; + } else { + name = name(); + } final ThreadPool.Info info = - new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize)); + new ThreadPool.Info(name, ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize)); return new ThreadPool.ExecutorHolder(executor, info); } diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index a8f5cd2c33578..bf3641f18b172 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -69,7 +69,7 @@ public static class Names { public static final String GET = "get"; public static final String ANALYZE = "analyze"; public static final String INDEX = "index"; - public static final String BULK = "bulk"; + public static final String WRITE = "write"; public static final String SEARCH = "search"; public static final String MANAGEMENT = "management"; public static final String FLUSH = "flush"; @@ -126,7 +126,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.GET, ThreadPoolType.FIXED); map.put(Names.ANALYZE, ThreadPoolType.FIXED); map.put(Names.INDEX, ThreadPoolType.FIXED); - map.put(Names.BULK, ThreadPoolType.FIXED); + map.put(Names.WRITE, ThreadPoolType.FIXED); map.put(Names.SEARCH, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE); map.put(Names.MANAGEMENT, ThreadPoolType.SCALING); map.put(Names.FLUSH, ThreadPoolType.SCALING); @@ -170,7 +170,7 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512); builders.put(Names.GENERIC, new ScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30))); builders.put(Names.INDEX, new FixedExecutorBuilder(settings, Names.INDEX, availableProcessors, 200, true)); - builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 200)); // now that we reuse bulk for index/delete ops + builders.put(Names.WRITE, new FixedExecutorBuilder(settings, Names.WRITE, "bulk", availableProcessors, 200)); builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000)); builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); builders.put(Names.SEARCH, new AutoQueueAdjustingExecutorBuilder(settings, @@ -264,7 +264,7 @@ public Info info(String name) { public ThreadPoolStats stats() { List stats = new ArrayList<>(); for (ExecutorHolder holder : executors.values()) { - String name = holder.info.getName(); + final String name = holder.info.getName(); // no need to have info on "same" thread pool if ("same".equals(name)) { continue; diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index 1a07eac1adbd5..4b96f3d17543c 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -54,8 +54,8 @@ protected Settings nodeSettings(int nodeOrdinal) { // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected) //.put("thread_pool.listener.queue_size", 1) .put("thread_pool.get.queue_size", 1) - // default is 50 - .put("thread_pool.bulk.queue_size", 30) + // default is 200 + .put("thread_pool.write.queue_size", 30) .build(); } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java index b6110a85eceb6..33917ceff685b 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java @@ -35,8 +35,8 @@ public class EsThreadPoolExecutorTests extends ESSingleNodeTestCase { protected Settings nodeSettings() { return Settings.builder() .put("node.name", "es-thread-pool-executor-tests") - .put("thread_pool.bulk.size", 1) - .put("thread_pool.bulk.queue_size", 0) + .put("thread_pool.write.size", 1) + .put("thread_pool.write.queue_size", 0) .put("thread_pool.search.size", 1) .put("thread_pool.search.queue_size", 1) .build(); @@ -44,7 +44,7 @@ protected Settings nodeSettings() { public void testRejectedExecutionExceptionContainsNodeName() { // we test a fixed and an auto-queue executor but not scaling since it does not reject - runThreadPoolExecutorTest(1, ThreadPool.Names.BULK); + runThreadPoolExecutorTest(1, ThreadPool.Names.WRITE); runThreadPoolExecutorTest(2, ThreadPool.Names.SEARCH); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java index a8e282e2c17e5..28f623b23e282 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java @@ -69,18 +69,18 @@ public class IndexShardOperationPermitsTests extends ESTestCase { @BeforeClass public static void setupThreadPool() { - int bulkThreadPoolSize = randomIntBetween(1, 2); - int bulkThreadPoolQueueSize = randomIntBetween(1, 2); + int writeThreadPoolSize = randomIntBetween(1, 2); + int writeThreadPoolQueueSize = randomIntBetween(1, 2); threadPool = new TestThreadPool("IndexShardOperationsLockTests", Settings.builder() - .put("thread_pool." + ThreadPool.Names.BULK + ".size", bulkThreadPoolSize) - .put("thread_pool." + ThreadPool.Names.BULK + ".queue_size", bulkThreadPoolQueueSize) + .put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize) + .put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize) .build()); - assertThat(threadPool.executor(ThreadPool.Names.BULK), instanceOf(EsThreadPoolExecutor.class)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.BULK)).getCorePoolSize(), equalTo(bulkThreadPoolSize)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.BULK)).getMaximumPoolSize(), equalTo(bulkThreadPoolSize)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.BULK)).getQueue().remainingCapacity(), - equalTo(bulkThreadPoolQueueSize)); + assertThat(threadPool.executor(ThreadPool.Names.WRITE), instanceOf(EsThreadPoolExecutor.class)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getCorePoolSize(), equalTo(writeThreadPoolSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getMaximumPoolSize(), equalTo(writeThreadPoolSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getQueue().remainingCapacity(), + equalTo(writeThreadPoolQueueSize)); } @AfterClass @@ -110,8 +110,8 @@ class DummyException extends RuntimeException {} CountDownLatch latch = new CountDownLatch(numThreads / 4); boolean forceExecution = randomBoolean(); for (int i = 0; i < numThreads; i++) { - // the bulk thread pool uses a bounded size and can get rejections, see setupThreadPool - String threadPoolName = randomFrom(ThreadPool.Names.BULK, ThreadPool.Names.GENERIC); + // the write thread pool uses a bounded size and can get rejections, see setupThreadPool + String threadPoolName = randomFrom(ThreadPool.Names.WRITE, ThreadPool.Names.GENERIC); boolean failingListener = randomBoolean(); PlainActionFuture future = new PlainActionFuture() { @Override diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java index 3830d10f69b3c..546017f807ac0 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java @@ -87,9 +87,9 @@ public void testThatToXContentWritesOutUnboundedCorrectly() throws Exception { } public void testThatNegativeSettingAllowsToStart() throws InterruptedException { - Settings settings = Settings.builder().put("node.name", "bulk").put("thread_pool.bulk.queue_size", "-1").build(); + Settings settings = Settings.builder().put("node.name", "write").put("thread_pool.write.queue_size", "-1").build(); ThreadPool threadPool = new ThreadPool(settings); - assertThat(threadPool.info("bulk").getQueueSize(), is(nullValue())); + assertThat(threadPool.info("write").getQueueSize(), is(nullValue())); terminate(threadPool); } diff --git a/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index f0f8c70a3f9d4..6ae41febf3b19 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -61,7 +61,7 @@ public void testCorrectThreadPoolTypePermittedInSettings() throws InterruptedExc } public void testIndexingThreadPoolsMaxSize() throws InterruptedException { - final String name = randomFrom(Names.BULK, Names.INDEX); + final String name = randomFrom(Names.WRITE, Names.INDEX); final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY); final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE); @@ -92,7 +92,7 @@ public void testIndexingThreadPoolsMaxSize() throws InterruptedException { } private static int getExpectedThreadPoolSize(Settings settings, String name, int size) { - if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { + if (name.equals(ThreadPool.Names.WRITE) || name.equals(ThreadPool.Names.INDEX)) { return Math.min(size, EsExecutors.numberOfProcessors(settings)); } else { return size; From 1781141d9aaef6c98112ab9926f42b7325cbd94c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 10 Apr 2018 10:14:52 +0200 Subject: [PATCH 12/28] Make ranking evaluation details accessible for client Allow high level java rest client to access details of the metric calculation by making them accessible across packages. Also renaming the inner `Breakdown` classes of the evaluation metrics to `Detail` to better communicate their use. --- .../index/rankeval/MeanReciprocalRank.java | 22 ++++++------ .../index/rankeval/PrecisionAtK.java | 24 ++++++------- .../RankEvalNamedXContentProvider.java | 4 +-- .../index/rankeval/RankEvalPlugin.java | 4 +-- .../index/rankeval/EvalQueryQualityTests.java | 6 ++-- .../rankeval/MeanReciprocalRankTests.java | 8 ++--- .../index/rankeval/PrecisionAtKTests.java | 36 +++++++++---------- .../index/rankeval/RankEvalRequestIT.java | 12 +++---- 8 files changed, 58 insertions(+), 58 deletions(-) diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java index 0f51f6d5d6369..eb20dc8c680f9 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java @@ -128,7 +128,7 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, double reciprocalRank = (firstRelevant == -1) ? 0 : 1.0d / firstRelevant; EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, reciprocalRank); - evalQueryQuality.setMetricDetails(new Breakdown(firstRelevant)); + evalQueryQuality.setMetricDetails(new Detail(firstRelevant)); evalQueryQuality.addHitsAndRatings(ratedHits); return evalQueryQuality; } @@ -181,16 +181,16 @@ public final int hashCode() { return Objects.hash(relevantRatingThreshhold, k); } - static class Breakdown implements MetricDetail { + public static final class Detail implements MetricDetail { private final int firstRelevantRank; private static ParseField FIRST_RELEVANT_RANK_FIELD = new ParseField("first_relevant"); - Breakdown(int firstRelevantRank) { + Detail(int firstRelevantRank) { this.firstRelevantRank = firstRelevantRank; } - Breakdown(StreamInput in) throws IOException { + Detail(StreamInput in) throws IOException { this.firstRelevantRank = in.readVInt(); } @@ -206,15 +206,15 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) return builder.field(FIRST_RELEVANT_RANK_FIELD.getPreferredName(), firstRelevantRank); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { - return new Breakdown((Integer) args[0]); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Integer) args[0]); }); static { PARSER.declareInt(constructorArg(), FIRST_RELEVANT_RANK_FIELD); } - public static Breakdown fromXContent(XContentParser parser) { + public static Detail fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } @@ -232,24 +232,24 @@ public String getWriteableName() { * the ranking of the first relevant document, or -1 if no relevant document was * found */ - int getFirstRelevantRank() { + public int getFirstRelevantRank() { return firstRelevantRank; } @Override - public final boolean equals(Object obj) { + public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } - MeanReciprocalRank.Breakdown other = (MeanReciprocalRank.Breakdown) obj; + MeanReciprocalRank.Detail other = (MeanReciprocalRank.Detail) obj; return Objects.equals(firstRelevantRank, other.firstRelevantRank); } @Override - public final int hashCode() { + public int hashCode() { return Objects.hash(firstRelevantRank); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java index 15d955935eeff..136158ea5cba7 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java @@ -181,7 +181,7 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, } EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, precision); evalQueryQuality.setMetricDetails( - new PrecisionAtK.Breakdown(truePositives, truePositives + falsePositives)); + new PrecisionAtK.Detail(truePositives, truePositives + falsePositives)); evalQueryQuality.addHitsAndRatings(ratedSearchHits); return evalQueryQuality; } @@ -217,19 +217,19 @@ public final int hashCode() { return Objects.hash(relevantRatingThreshhold, ignoreUnlabeled, k); } - static class Breakdown implements MetricDetail { + public static final class Detail implements MetricDetail { private static final ParseField DOCS_RETRIEVED_FIELD = new ParseField("docs_retrieved"); private static final ParseField RELEVANT_DOCS_RETRIEVED_FIELD = new ParseField("relevant_docs_retrieved"); private int relevantRetrieved; private int retrieved; - Breakdown(int relevantRetrieved, int retrieved) { + Detail(int relevantRetrieved, int retrieved) { this.relevantRetrieved = relevantRetrieved; this.retrieved = retrieved; } - Breakdown(StreamInput in) throws IOException { + Detail(StreamInput in) throws IOException { this.relevantRetrieved = in.readVInt(); this.retrieved = in.readVInt(); } @@ -242,8 +242,8 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) return builder; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { - return new Breakdown((Integer) args[0], (Integer) args[1]); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Integer) args[0], (Integer) args[1]); }); static { @@ -251,7 +251,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) PARSER.declareInt(constructorArg(), DOCS_RETRIEVED_FIELD); } - public static Breakdown fromXContent(XContentParser parser) { + public static Detail fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } @@ -266,29 +266,29 @@ public String getWriteableName() { return NAME; } - int getRelevantRetrieved() { + public int getRelevantRetrieved() { return relevantRetrieved; } - int getRetrieved() { + public int getRetrieved() { return retrieved; } @Override - public final boolean equals(Object obj) { + public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } - PrecisionAtK.Breakdown other = (PrecisionAtK.Breakdown) obj; + PrecisionAtK.Detail other = (PrecisionAtK.Detail) obj; return Objects.equals(relevantRetrieved, other.relevantRetrieved) && Objects.equals(retrieved, other.retrieved); } @Override - public final int hashCode() { + public int hashCode() { return Objects.hash(relevantRetrieved, retrieved); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java index 54d68774a016e..c5785ca3847d4 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java @@ -38,9 +38,9 @@ public List getNamedXContentParsers() { namedXContent.add(new NamedXContentRegistry.Entry(EvaluationMetric.class, new ParseField(DiscountedCumulativeGain.NAME), DiscountedCumulativeGain::fromXContent)); namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(PrecisionAtK.NAME), - PrecisionAtK.Breakdown::fromXContent)); + PrecisionAtK.Detail::fromXContent)); namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(MeanReciprocalRank.NAME), - MeanReciprocalRank.Breakdown::fromXContent)); + MeanReciprocalRank.Detail::fromXContent)); return namedXContent; } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java index d4ccd7c2180fe..884cf3bafdcda 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java @@ -60,9 +60,9 @@ public List getNamedWriteables() { namedWriteables.add(new NamedWriteableRegistry.Entry(EvaluationMetric.class, MeanReciprocalRank.NAME, MeanReciprocalRank::new)); namedWriteables.add( new NamedWriteableRegistry.Entry(EvaluationMetric.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain::new)); - namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, PrecisionAtK.NAME, PrecisionAtK.Breakdown::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, PrecisionAtK.NAME, PrecisionAtK.Detail::new)); namedWriteables - .add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Breakdown::new)); + .add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new)); return namedWriteables; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java index df6de75ba2cb4..112cf4eaaf72e 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java @@ -69,9 +69,9 @@ public static EvalQueryQuality randomEvalQueryQuality() { randomDoubleBetween(0.0, 1.0, true)); if (randomBoolean()) { if (randomBoolean()) { - evalQueryQuality.setMetricDetails(new PrecisionAtK.Breakdown(randomIntBetween(0, 1000), randomIntBetween(0, 1000))); + evalQueryQuality.setMetricDetails(new PrecisionAtK.Detail(randomIntBetween(0, 1000), randomIntBetween(0, 1000))); } else { - evalQueryQuality.setMetricDetails(new MeanReciprocalRank.Breakdown(randomIntBetween(0, 1000))); + evalQueryQuality.setMetricDetails(new MeanReciprocalRank.Detail(randomIntBetween(0, 1000))); } } evalQueryQuality.addHitsAndRatings(ratedHits); @@ -137,7 +137,7 @@ private static EvalQueryQuality mutateTestItem(EvalQueryQuality original) { break; case 2: if (metricDetails == null) { - metricDetails = new PrecisionAtK.Breakdown(1, 5); + metricDetails = new PrecisionAtK.Detail(1, 5); } else { metricDetails = null; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 0ac9cb0d901e6..c9ff39bbd118a 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -96,7 +96,7 @@ public void testMaxAcceptableRank() { int rankAtFirstRelevant = relevantAt + 1; EvalQueryQuality evaluation = reciprocalRank.evaluate("id", hits, ratedDocs); assertEquals(1.0 / rankAtFirstRelevant, evaluation.getQualityLevel(), Double.MIN_VALUE); - assertEquals(rankAtFirstRelevant, ((MeanReciprocalRank.Breakdown) evaluation.getMetricDetails()).getFirstRelevantRank()); + assertEquals(rankAtFirstRelevant, ((MeanReciprocalRank.Detail) evaluation.getMetricDetails()).getFirstRelevantRank()); // check that if we have fewer search hits than relevant doc position, // we don't find any result and get 0.0 quality level @@ -121,7 +121,7 @@ public void testEvaluationOneRelevantInResults() { EvalQueryQuality evaluation = reciprocalRank.evaluate("id", hits, ratedDocs); assertEquals(1.0 / (relevantAt + 1), evaluation.getQualityLevel(), Double.MIN_VALUE); - assertEquals(relevantAt + 1, ((MeanReciprocalRank.Breakdown) evaluation.getMetricDetails()).getFirstRelevantRank()); + assertEquals(relevantAt + 1, ((MeanReciprocalRank.Detail) evaluation.getMetricDetails()).getFirstRelevantRank()); } /** @@ -141,7 +141,7 @@ public void testPrecisionAtFiveRelevanceThreshold() { MeanReciprocalRank reciprocalRank = new MeanReciprocalRank(2, 10); EvalQueryQuality evaluation = reciprocalRank.evaluate("id", hits, rated); assertEquals((double) 1 / 3, evaluation.getQualityLevel(), 0.00001); - assertEquals(3, ((MeanReciprocalRank.Breakdown) evaluation.getMetricDetails()).getFirstRelevantRank()); + assertEquals(3, ((MeanReciprocalRank.Detail) evaluation.getMetricDetails()).getFirstRelevantRank()); } public void testCombine() { @@ -165,7 +165,7 @@ public void testNoResults() throws Exception { SearchHit[] hits = new SearchHit[0]; EvalQueryQuality evaluated = (new MeanReciprocalRank()).evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(-1, ((MeanReciprocalRank.Breakdown) evaluated.getMetricDetails()).getFirstRelevantRank()); + assertEquals(-1, ((MeanReciprocalRank.Detail) evaluated.getMetricDetails()).getFirstRelevantRank()); } public void testXContentRoundtrip() throws IOException { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index bf5bae4c334a9..3efff57920b84 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -54,8 +54,8 @@ public void testPrecisionAtFiveCalculation() { rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated, "test"), rated); assertEquals(1, evaluated.getQualityLevel(), 0.00001); - assertEquals(1, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(1, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(1, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(1, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testPrecisionAtFiveIgnoreOneResult() { @@ -67,8 +67,8 @@ public void testPrecisionAtFiveIgnoreOneResult() { rated.add(createRatedDoc("test", "4", IRRELEVANT_RATING_0)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated, "test"), rated); assertEquals((double) 4 / 5, evaluated.getQualityLevel(), 0.00001); - assertEquals(4, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(5, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(4, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(5, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } /** @@ -86,8 +86,8 @@ public void testPrecisionAtFiveRelevanceThreshold() { PrecisionAtK precisionAtN = new PrecisionAtK(2, false, 5); EvalQueryQuality evaluated = precisionAtN.evaluate("id", toSearchHits(rated, "test"), rated); assertEquals((double) 3 / 5, evaluated.getQualityLevel(), 0.00001); - assertEquals(3, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(5, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(3, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(5, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testPrecisionAtFiveCorrectIndex() { @@ -100,8 +100,8 @@ public void testPrecisionAtFiveCorrectIndex() { // the following search hits contain only the last three documents EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated.subList(2, 5), "test"), rated); assertEquals((double) 2 / 3, evaluated.getQualityLevel(), 0.00001); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(3, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(3, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testIgnoreUnlabeled() { @@ -115,15 +115,15 @@ public void testIgnoreUnlabeled() { EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", searchHits, rated); assertEquals((double) 2 / 3, evaluated.getQualityLevel(), 0.00001); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(3, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(3, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); // also try with setting `ignore_unlabeled` PrecisionAtK prec = new PrecisionAtK(1, true, 10); evaluated = prec.evaluate("id", searchHits, rated); assertEquals((double) 2 / 2, evaluated.getQualityLevel(), 0.00001); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testNoRatedDocs() throws Exception { @@ -134,23 +134,23 @@ public void testNoRatedDocs() throws Exception { } EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(5, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(5, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); // also try with setting `ignore_unlabeled` PrecisionAtK prec = new PrecisionAtK(1, true, 10); evaluated = prec.evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testNoResults() throws Exception { SearchHit[] hits = new SearchHit[0]; EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testParseFromXContent() throws IOException { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index f0242e2bccdb6..8a3bad50b22da 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.rankeval.PrecisionAtK.Breakdown; +import org.elasticsearch.index.rankeval.PrecisionAtK.Detail; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -271,7 +271,7 @@ public void testIndicesOptions() { request.setRankEvalSpec(task); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - Breakdown details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + Detail details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(7, details.getRetrieved()); assertEquals(6, details.getRelevantRetrieved()); @@ -280,7 +280,7 @@ public void testIndicesOptions() { request.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); assertEquals(5, details.getRelevantRetrieved()); @@ -295,12 +295,12 @@ public void testIndicesOptions() { request = new RankEvalRequest(task, new String[] { "tes*" }); request.indicesOptions(IndicesOptions.fromParameters("none", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); request.indicesOptions(IndicesOptions.fromParameters("open", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); assertEquals(5, details.getRelevantRetrieved()); @@ -313,7 +313,7 @@ public void testIndicesOptions() { request = new RankEvalRequest(task, new String[] { "bad*" }); request.indicesOptions(IndicesOptions.fromParameters(null, null, "true", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); request.indicesOptions(IndicesOptions.fromParameters(null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); From 7d366c00574229bf50bdd7405a57d643f2f818c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 10 Apr 2018 10:15:31 +0200 Subject: [PATCH 13/28] [Docs] Add rankEval method for Jva HL client This change adds documentation about using the ranking evaluation API from the high level Java REST client. Closes #28694 --- .../documentation/SearchDocumentationIT.java | 84 +++++++++++++++++ .../high-level/search/rank-eval.asciidoc | 89 +++++++++++++++++++ .../high-level/supported-apis.asciidoc | 2 + 3 files changed, 175 insertions(+) create mode 100644 docs/java-rest/high-level/search/rank-eval.asciidoc diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 96d962c3ac553..dac2e23934e9a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.TimeValue; @@ -44,6 +45,16 @@ import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.rankeval.EvalQueryQuality; +import org.elasticsearch.index.rankeval.EvaluationMetric; +import org.elasticsearch.index.rankeval.MetricDetail; +import org.elasticsearch.index.rankeval.PrecisionAtK; +import org.elasticsearch.index.rankeval.RankEvalRequest; +import org.elasticsearch.index.rankeval.RankEvalResponse; +import org.elasticsearch.index.rankeval.RankEvalSpec; +import org.elasticsearch.index.rankeval.RatedDocument; +import org.elasticsearch.index.rankeval.RatedRequest; +import org.elasticsearch.index.rankeval.RatedSearchHit; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; @@ -74,6 +85,7 @@ import org.elasticsearch.search.suggest.term.TermSuggestion; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -688,6 +700,78 @@ public void onFailure(Exception e) { } } + public void testRankEval() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + { + // tag::rank-eval-request-basic + EvaluationMetric metric = new PrecisionAtK(); // <1> + List ratedDocs = new ArrayList<>(); + ratedDocs.add(new RatedDocument("posts", "1", 1)); // <2> + SearchSourceBuilder searchQuery = new SearchSourceBuilder(); + searchQuery.query(QueryBuilders.matchQuery("user", "kimchy"));// <3> + RatedRequest ratedRequest = // <4> + new RatedRequest("kimchy_query", ratedDocs, searchQuery); + List ratedRequests = Arrays.asList(ratedRequest); + RankEvalSpec specification = + new RankEvalSpec(ratedRequests, metric); // <5> + RankEvalRequest request = // <6> + new RankEvalRequest(specification, new String[] { "posts" }); + // end::rank-eval-request-basic + + // tag::rank-eval-execute + RankEvalResponse response = client.rankEval(request); + // end::rank-eval-execute + + logger.warn(Strings.toString(response)); + // tag::rank-eval-response + double evaluationResult = response.getEvaluationResult(); // <1> + assertEquals(1.0 / 3.0, evaluationResult, 0.0); + Map partialResults = + response.getPartialResults(); + EvalQueryQuality evalQuality = + partialResults.get("kimchy_query"); // <2> + assertEquals("kimchy_query", evalQuality.getId()); + double qualityLevel = evalQuality.getQualityLevel(); // <3> + assertEquals(1.0 / 3.0, qualityLevel, 0.0); + List hitsAndRatings = evalQuality.getHitsAndRatings(); + RatedSearchHit ratedSearchHit = hitsAndRatings.get(0); + assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4> + assertFalse(ratedSearchHit.getRating().isPresent()); // <5> + MetricDetail metricDetails = evalQuality.getMetricDetails(); + String metricName = metricDetails.getMetricName(); + assertEquals(PrecisionAtK.NAME, metricName); // <6> + PrecisionAtK.Detail detail = (PrecisionAtK.Detail) metricDetails; + assertEquals(1, detail.getRelevantRetrieved()); // <7> + assertEquals(3, detail.getRetrieved()); + // end::rank-eval-response + + // tag::rank-eval-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(RankEvalResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::rank-eval-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::rank-eval-execute-async + client.rankEvalAsync(request, listener); // <1> + // end::rank-eval-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testMultiSearch() throws Exception { indexSearchTestData(); RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/search/rank-eval.asciidoc b/docs/java-rest/high-level/search/rank-eval.asciidoc new file mode 100644 index 0000000000000..6db0dadd00ed7 --- /dev/null +++ b/docs/java-rest/high-level/search/rank-eval.asciidoc @@ -0,0 +1,89 @@ +[[java-rest-high-rank-eval]] +=== Ranking Evaluation API + +The `rankEval` method allows to evaluate the quality of ranked search +results over a set of search request. Given sets of manually rated +documents for each search request, ranking evaluation performs a +<> request and calculates +information retrieval metrics like _mean reciprocal rank_, _precision_ +or _discounted cumulative gain_ on the returned results. + +[[java-rest-high-rank-eval-request]] +==== Ranking Evaluation Request + +In order to build a `RankEvalRequest`, you first need to create an +evaluation specification (`RankEvalSpec`). This specification requires +to define the evaluation metric that is going to be calculated, as well +as a list of rated documents per search requests. Creating the ranking +evaluation request then takes the specification and a list of target +indices as arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-request-basic] +-------------------------------------------------- +<1> Define the metric used in the evaluation +<2> Add rated documents, specified by index name, id and rating +<3> Create the search query to evaluate +<4> Combine the three former parts into a `RatedRequest` +<5> Create the ranking evaluation specification +<6> Create the ranking evaluation request + +[[java-rest-high-rank-eval-sync]] +==== Synchronous Execution + +The `rankEval` method executes `RankEvalRequest`s synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute] +-------------------------------------------------- + +[[java-rest-high-rank-eval-async]] +==== Asynchronous Execution + +The `rankEvalAsync` method executes `RankEvalRequest`s asynchronously, +calling the provided `ActionListener` when the response is ready. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute-async] +-------------------------------------------------- +<1> The `RankEvalRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `RankEvalResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `RankEvalRequest` fails. + +==== RankEvalResponse + +The `RankEvalResponse` that is returned by executing the request +contains information about the overall evaluation score, the +scores of each individual search request in the set of queries and +detailed information about search hits and details about the metric +calculation per partial result. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-response] +-------------------------------------------------- +<1> The overall evaluation result +<2> Partial results that are keyed by their query id +<3> The metric score for each partial result +<4> Rated search hits contain a fully fledged `SearchHit` +<5> Rated search hits also contain an `Optional` rating that +is not present if the document did not get a rating in the request +<6> Metric details are named after the metric used in the request +<7> After casting to the metric used in the request, the +metric details offers insight into parts of the metric calculation \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 29052171cddc6..1f3d7a3744300 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -32,10 +32,12 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> include::search/search.asciidoc[] include::search/scroll.asciidoc[] include::search/multi-search.asciidoc[] +include::search/rank-eval.asciidoc[] == Miscellaneous APIs From 28d46512121db85390645a9b328f89d7f4f6152d Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 19 Apr 2018 15:09:14 +0200 Subject: [PATCH 14/28] Update plan for the removal of mapping types. (#29586) 8.x will no longer allow types in APIs and 7.x will issue deprecation warnings when `include_type_name` is set to `false`. --- docs/reference/mapping/removal_of_types.asciidoc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 070d189a0fffe..acfed6874a426 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -258,15 +258,17 @@ Elasticsearch 6.x:: Elasticsearch 7.x:: -* The `type` parameter in URLs are optional. For instance, indexing +* The `type` parameter in URLs are deprecated. For instance, indexing a document no longer requires a document `type`. The new index APIs are `PUT {index}/_doc/{id}` in case of explicit ids and `POST {index}/_doc` for auto-generated ids. -* The `GET|PUT _mapping` APIs support a query string parameter - (`include_type_name`) which indicates whether the body should include - a layer for the type name. It defaults to `true`. 7.x indices which - don't have an explicit type will use the dummy type name `_doc`. +* The index creation, `GET|PUT _mapping` and document APIs support a query + string parameter (`include_type_name`) which indicates whether requests and + responses should include a type name. It defaults to `true`. + 7.x indices which don't have an explicit type will use the dummy type name + `_doc`. Not setting `include_type_name=false` will result in a deprecation + warning. * The `_default_` mapping type is removed. @@ -274,7 +276,8 @@ Elasticsearch 8.x:: * The `type` parameter is no longer supported in URLs. -* The `include_type_name` parameter defaults to `false`. +* The `include_type_name` parameter is deprecated, default to `false` and fails + the request when set to `true`. Elasticsearch 9.x:: From 9ef3a73f061b8a96ade1c0f7fc13c477cf5b2714 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 19 Apr 2018 15:37:28 +0200 Subject: [PATCH 15/28] test: Assert deprecated http.enebled setting warning Relates to #29603 --- .../src/test/java/org/elasticsearch/tribe/TribeUnitTests.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 582cddd5facb1..7ba032361fc49 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -124,7 +125,8 @@ protected Function nodeBuilder(Path configPath) { public void testThatTribeClientsIgnoreGlobalConfig() throws Exception { assertTribeNodeSuccessfullyCreated(getDataPath("elasticsearch.yml").getParent()); - assertWarnings("tribe nodes are deprecated in favor of cross-cluster search and will be removed in Elasticsearch 7.0.0"); + assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}, + "tribe nodes are deprecated in favor of cross-cluster search and will be removed in Elasticsearch 7.0.0"); } private static void assertTribeNodeSuccessfullyCreated(Path configPath) throws Exception { From 3f5167f37b2131950c797a0d71f4dbb0eca92dbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Apr 2018 16:48:17 +0200 Subject: [PATCH 16/28] Deprecate use of `htmlStrip` as name for HtmlStripCharFilter (#27429) The camel case name `htmlStip` should be removed in favour of `html_strip`, but we need to deprecate it first. This change adds deprecation warnings for indices with version starting with 6.3.0 and logs deprecation warnings in this cases. --- .../analysis/common/CommonAnalysisPlugin.java | 15 +++- .../HtmlStripCharFilterFactoryTests.java | 73 +++++++++++++++++++ .../test/indices.analyze/10_analyze.yml | 53 ++++++++++++++ .../analysis/PreConfiguredCharFilter.java | 9 +++ 4 files changed, 148 insertions(+), 2 deletions(-) create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index e0193e50313f3..a01eb52fdd498 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -67,6 +67,8 @@ import org.apache.lucene.analysis.standard.ClassicFilter; import org.apache.lucene.analysis.tr.ApostropheFilter; import org.apache.lucene.analysis.util.ElisionFilter; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; @@ -88,6 +90,9 @@ import static org.elasticsearch.plugins.AnalysisPlugin.requriesAnalysisSettings; public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { + + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(CommonAnalysisPlugin.class)); + @Override public Map> getTokenFilters() { Map> filters = new TreeMap<>(); @@ -171,8 +176,14 @@ public Map> getTokenizers() { public List getPreConfiguredCharFilters() { List filters = new ArrayList<>(); filters.add(PreConfiguredCharFilter.singleton("html_strip", false, HTMLStripCharFilter::new)); - // TODO deprecate htmlStrip - filters.add(PreConfiguredCharFilter.singleton("htmlStrip", false, HTMLStripCharFilter::new)); + filters.add(PreConfiguredCharFilter.singletonWithVersion("htmlStrip", false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_6_3_0)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("htmlStrip_deprecation", + "The [htmpStrip] char filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [html_strip] instead."); + } + return new HTMLStripCharFilter(reader); + })); return filters; } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java new file mode 100644 index 0000000000000..0d5389a6d6594 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Map; + + +public class HtmlStripCharFilterFactoryTests extends ESTestCase { + + /** + * Check that the deprecated name "htmlStrip" issues a deprecation warning for indices created since 6.3.0 + */ + public void testDeprecationWarning() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map charFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).charFilter; + CharFilterFactory charFilterFactory = charFilters.get("htmlStrip"); + assertNotNull(charFilterFactory.create(new StringReader("input"))); + assertWarnings("The [htmpStrip] char filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [html_strip] instead."); + } + } + + /** + * Check that the deprecated name "htmlStrip" does NOT issues a deprecation warning for indices created before 6.3.0 + */ + public void testNoDeprecationWarningPre6_3() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_2_4)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map charFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).charFilter; + CharFilterFactory charFilterFactory = charFilters.get("htmlStrip"); + assertNotNull(charFilterFactory.create(new StringReader(""))); + } + } +} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index cbb8f053cfbba..f8fc3acc02c4c 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -17,3 +17,56 @@ - match: { error.type: illegal_argument_exception } - match: { error.reason: "Custom normalizer may not use filter [word_delimiter]" } +--- +"htmlStrip_deprecated": + - skip: + version: " - 6.2.99" + reason: deprecated in 6.3 + features: "warnings" + + - do: + indices.create: + index: test_deprecated_htmlstrip + body: + settings: + index: + analysis: + analyzer: + my_htmlStripWithCharfilter: + tokenizer: keyword + char_filter: ["htmlStrip"] + mappings: + type: + properties: + name: + type: text + analyzer: my_htmlStripWithCharfilter + + - do: + warnings: + - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' + index: + index: test_deprecated_htmlstrip + type: type + id: 1 + body: { "name": "foo bar" } + + - do: + warnings: + - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' + index: + index: test_deprecated_htmlstrip + type: type + id: 2 + body: { "name": "foo baz" } + + - do: + warnings: + - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' + indices.analyze: + index: test_deprecated_htmlstrip + body: + analyzer: "my_htmlStripWithCharfilter" + text: "foo" + - length: { tokens: 1 } + - match: { tokens.0.token: "\nfoo\n" } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java index a979e9e34fe4e..84eb0c4c3498c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java @@ -40,6 +40,15 @@ public static PreConfiguredCharFilter singleton(String name, boolean useFilterFo (reader, version) -> create.apply(reader)); } + /** + * Create a pre-configured char filter that may not vary at all, provide access to the elasticsearch verison + */ + public static PreConfiguredCharFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, + BiFunction create) { + return new PreConfiguredCharFilter(name, CachingStrategy.ONE, useFilterForMultitermQueries, + (reader, version) -> create.apply(reader, version)); + } + /** * Create a pre-configured token filter that may vary based on the Lucene version. */ From 2e479ac866262cd0d9582f61a6e9a4cb6c0b9fd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Apr 2018 18:06:22 +0200 Subject: [PATCH 17/28] [Test] Fix assertion in SearchDocumentationIT --- .../client/documentation/SearchDocumentationIT.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index dac2e23934e9a..1d9111926d547 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.TimeValue; @@ -723,7 +722,6 @@ public void testRankEval() throws Exception { RankEvalResponse response = client.rankEval(request); // end::rank-eval-execute - logger.warn(Strings.toString(response)); // tag::rank-eval-response double evaluationResult = response.getEvaluationResult(); // <1> assertEquals(1.0 / 3.0, evaluationResult, 0.0); @@ -736,7 +734,7 @@ public void testRankEval() throws Exception { assertEquals(1.0 / 3.0, qualityLevel, 0.0); List hitsAndRatings = evalQuality.getHitsAndRatings(); RatedSearchHit ratedSearchHit = hitsAndRatings.get(0); - assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4> + assertEquals("2", ratedSearchHit.getSearchHit().getId()); // <4> assertFalse(ratedSearchHit.getRating().isPresent()); // <5> MetricDetail metricDetails = evalQuality.getMetricDetails(); String metricName = metricDetails.getMetricName(); From ee403aa001ddb5621ed5dc1ea2e24864794e87bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Apr 2018 17:00:52 +0200 Subject: [PATCH 18/28] Add tests for ranking evaluation with aliases (#29452) The ranking evaluation requests so far were not tested against aliases but they should run regardless of the targeted index is a real index or an alias. This change adds cases for this to the integration and rest tests. --- .../index/rankeval/RankEvalRequestIT.java | 50 ++++++----- .../rest-api-spec/test/rank_eval/10_basic.yml | 90 ++++++++++--------- 2 files changed, 75 insertions(+), 65 deletions(-) diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index 8a3bad50b22da..b55c57bae2bcf 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.rankeval; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.index.IndexNotFoundException; @@ -40,10 +41,13 @@ import java.util.Set; import static org.elasticsearch.index.rankeval.EvaluationMetric.filterUnknownDocuments; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; public class RankEvalRequestIT extends ESIntegTestCase { + private static final String TEST_INDEX = "test"; + private static final String INDEX_ALIAS = "alias0"; private static final int RELEVANT_RATING_1 = 1; @Override @@ -58,20 +62,23 @@ protected Collection> nodePlugins() { @Before public void setup() { - createIndex("test"); + createIndex(TEST_INDEX); ensureGreen(); - client().prepareIndex("test", "testtype").setId("1") + client().prepareIndex(TEST_INDEX, "testtype").setId("1") .setSource("text", "berlin", "title", "Berlin, Germany", "population", 3670622).get(); - client().prepareIndex("test", "testtype").setId("2").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("3").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("4").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("5").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("6").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("2").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("3").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("4").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("5").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("6").setSource("text", "amsterdam", "population", 851573).get(); // add another index for testing closed indices etc... client().prepareIndex("test2", "testtype").setId("7").setSource("text", "amsterdam", "population", 851573).get(); refresh(); + + // set up an alias that can also be used in tests + assertAcked(client().admin().indices().prepareAliases().addAliasAction(AliasActions.add().index(TEST_INDEX).alias(INDEX_ALIAS))); } /** @@ -101,7 +108,8 @@ public void testPrecisionAtRequest() { RankEvalAction.INSTANCE, new RankEvalRequest()); builder.setRankEvalSpec(task); - RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices("test")) + String indexToUse = randomBoolean() ? TEST_INDEX : INDEX_ALIAS; + RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices(indexToUse)) .actionGet(); // the expected Prec@ for the first query is 4/6 and the expected Prec@ for the // second is 1/6, divided by 2 to get the average @@ -143,7 +151,7 @@ public void testPrecisionAtRequest() { metric = new PrecisionAtK(1, false, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { TEST_INDEX })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // if we look only at top 3 documente, the expected P@3 for the first query is @@ -163,19 +171,19 @@ public void testDCGRequest() { List specifications = new ArrayList<>(); List ratedDocs = Arrays.asList( - new RatedDocument("test", "1", 3), - new RatedDocument("test", "2", 2), - new RatedDocument("test", "3", 3), - new RatedDocument("test", "4", 0), - new RatedDocument("test", "5", 1), - new RatedDocument("test", "6", 2)); + new RatedDocument(TEST_INDEX, "1", 3), + new RatedDocument(TEST_INDEX, "2", 2), + new RatedDocument(TEST_INDEX, "3", 3), + new RatedDocument(TEST_INDEX, "4", 0), + new RatedDocument(TEST_INDEX, "5", 1), + new RatedDocument(TEST_INDEX, "6", 2)); specifications.add(new RatedRequest("amsterdam_query", ratedDocs, testQuery)); DiscountedCumulativeGain metric = new DiscountedCumulativeGain(false, null, 10); RankEvalSpec task = new RankEvalSpec(specifications, metric); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest(task, new String[] { "test" })); + new RankEvalRequest(task, new String[] { TEST_INDEX })); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); assertEquals(DiscountedCumulativeGainTests.EXPECTED_DCG, response.getEvaluationResult(), 10E-14); @@ -184,7 +192,7 @@ public void testDCGRequest() { metric = new DiscountedCumulativeGain(false, null, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { TEST_INDEX })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); assertEquals(12.39278926071437, response.getEvaluationResult(), 10E-14); @@ -203,7 +211,7 @@ public void testMRRRequest() { RankEvalSpec task = new RankEvalSpec(specifications, metric); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest(task, new String[] { "test" })); + new RankEvalRequest(task, new String[] { TEST_INDEX })); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // the expected reciprocal rank for the amsterdam_query is 1/5 @@ -216,7 +224,7 @@ public void testMRRRequest() { metric = new MeanReciprocalRank(1, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { TEST_INDEX })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // limiting to top 3 results, the amsterdam_query has no relevant document in it @@ -247,7 +255,7 @@ public void testBadQuery() { RankEvalSpec task = new RankEvalSpec(specifications, new PrecisionAtK()); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest(task, new String[] { "test" })); + new RankEvalRequest(task, new String[] { TEST_INDEX })); builder.setRankEvalSpec(task); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); @@ -267,7 +275,7 @@ public void testIndicesOptions() { specifications.add(new RatedRequest("amsterdam_query", relevantDocs, amsterdamQuery)); RankEvalSpec task = new RankEvalSpec(specifications, new PrecisionAtK()); - RankEvalRequest request = new RankEvalRequest(task, new String[] { "test", "test2" }); + RankEvalRequest request = new RankEvalRequest(task, new String[] { TEST_INDEX, "test2" }); request.setRankEvalSpec(task); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml index c6a94cc5f0b44..f3ba3d0a3cfd2 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml @@ -1,10 +1,4 @@ ---- -"Response format": - - - skip: - version: " - 6.2.99" - reason: changes in response format changed in 6.3 - +setup: - do: indices.create: index: foo @@ -43,9 +37,22 @@ - do: indices.refresh: {} + - do: + indices.put_alias: + index: foo + name: alias + +--- +"Response format": + + - skip: + version: " - 6.2.99" + reason: response format was updated in 6.3 + - do: rank_eval: - body: { + index: foo, + body: { "requests" : [ { "id": "amsterdam_query", @@ -84,52 +91,47 @@ - match: { details.berlin_query.hits.0.hit._id: "doc1" } - match: { details.berlin_query.hits.0.rating: 1} - match: { details.berlin_query.hits.1.hit._id: "doc4" } - - is_false: details.berlin_query.hits.1.rating + - is_false: details.berlin_query.hits.1.rating --- -"Mean Reciprocal Rank": +"Alias resolution": - skip: version: " - 6.2.99" - reason: changes in response format in 6.3 + reason: response format was updated in 6.3 - do: - indices.create: - index: foo - body: - settings: - index: - number_of_shards: 1 - - do: - index: - index: foo - type: bar - id: doc1 - body: { "text": "berlin" } + rank_eval: + index: alias + body: { + "requests" : [ + { + "id": "amsterdam_query", + "request": { "query": { "match" : {"text" : "amsterdam" }}}, + "ratings": [ + {"_index": "foo", "_id": "doc1", "rating": 0}, + {"_index": "foo", "_id": "doc2", "rating": 1}, + {"_index": "foo", "_id": "doc3", "rating": 1}] + }, + { + "id" : "berlin_query", + "request": { "query": { "match" : { "text" : "berlin" } }, "size" : 10 }, + "ratings": [{"_index": "foo", "_id": "doc1", "rating": 1}] + } + ], + "metric" : { "precision": { "ignore_unlabeled" : true }} + } - - do: - index: - index: foo - type: bar - id: doc2 - body: { "text": "amsterdam" } + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.berlin_query.quality_level: 1.0} - - do: - index: - index: foo - type: bar - id: doc3 - body: { "text": "amsterdam" } - - - do: - index: - index: foo - type: bar - id: doc4 - body: { "text": "something about amsterdam and berlin" } +--- +"Mean Reciprocal Rank": - - do: - indices.refresh: {} + - skip: + version: " - 6.2.99" + reason: response format was updated in 6.3 - do: rank_eval: From e8d814282897daf3a4a313768fe92a25778072cd Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 19 Apr 2018 09:51:52 -0700 Subject: [PATCH 19/28] Build: Move java home checks to pre-execution phase (#29548) This commit moves the checks on JAVAX_HOME (where X is the java version number) existing to the end of gradle's configuration phase, and based on whether the tasks needing the java home are configured to execute. relates #29519 --- .../elasticsearch/gradle/BuildPlugin.groovy | 42 ++++++++++++------- .../gradle/test/ClusterFormationTasks.groovy | 4 ++ .../elasticsearch/gradle/test/NodeInfo.groovy | 22 +++++++--- distribution/bwc/build.gradle | 4 +- qa/reindex-from-old/build.gradle | 2 +- 5 files changed, 52 insertions(+), 22 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 3103f23472ed7..d0ae4fd470312 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -38,6 +38,7 @@ import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler +import org.gradle.api.execution.TaskExecutionGraph import org.gradle.api.plugins.JavaPlugin import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin @@ -221,21 +222,34 @@ class BuildPlugin implements Plugin { return System.getenv('JAVA' + version + '_HOME') } - /** - * Get Java home for the project for the specified version. If the specified version is not configured, an exception with the specified - * message is thrown. - * - * @param project the project - * @param version the version of Java home to obtain - * @param message the exception message if Java home for the specified version is not configured - * @return Java home for the specified version - * @throws GradleException if Java home for the specified version is not configured - */ - static String getJavaHome(final Project project, final int version, final String message) { - if (project.javaVersions.get(version) == null) { - throw new GradleException(message) + /** Add a check before gradle execution phase which ensures java home for the given java version is set. */ + static void requireJavaHome(Task task, int version) { + Project rootProject = task.project.rootProject // use root project for global accounting + if (rootProject.hasProperty('requiredJavaVersions') == false) { + rootProject.rootProject.ext.requiredJavaVersions = [:].withDefault{key -> return []} + rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> + List messages = [] + for (entry in rootProject.requiredJavaVersions) { + if (rootProject.javaVersions.get(entry.key) != null) { + continue + } + List tasks = entry.value.findAll { taskGraph.hasTask(it) }.collect { " ${it.path}" } + if (tasks.isEmpty() == false) { + messages.add("JAVA${entry.key}_HOME required to run tasks:\n${tasks.join('\n')}") + } + } + if (messages.isEmpty() == false) { + throw new GradleException(messages.join('\n')) + } + } } - return project.javaVersions.get(version) + rootProject.requiredJavaVersions.get(version).add(task) + } + + /** A convenience method for getting java home for a version of java and requiring that version for the given task to execute */ + static String getJavaHome(final Task task, final int version) { + requireJavaHome(task, version) + return task.project.javaVersions.get(version) } private static String findRuntimeJavaHome(final String compilerJavaHome) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index adf65668edfab..e40e3fae88e08 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test import org.apache.tools.ant.DefaultLogger import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties @@ -611,6 +612,9 @@ class ClusterFormationTasks { } Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) + if (node.javaVersion != null) { + BuildPlugin.requireJavaHome(start, node.javaVersion) + } start.doLast(elasticsearchRunner) return start } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index a32c152edbfbe..621220043ddef 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -36,6 +36,9 @@ import static org.elasticsearch.gradle.BuildPlugin.getJavaHome * A container for the files and configuration associated with a single node in a test cluster. */ class NodeInfo { + /** Gradle project this node is part of */ + Project project + /** common configuration for all nodes, including this one */ ClusterConfiguration config @@ -84,6 +87,9 @@ class NodeInfo { /** directory to install plugins from */ File pluginsTmpDir + /** Major version of java this node runs with, or {@code null} if using the runtime java version */ + Integer javaVersion + /** environment variables to start the node with */ Map env @@ -109,6 +115,7 @@ class NodeInfo { NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, Version nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum + this.project = project this.sharedDir = sharedDir if (config.clusterName != null) { clusterName = config.clusterName @@ -165,12 +172,11 @@ class NodeInfo { args.add("${esScript}") } + if (nodeVersion.before("6.2.0")) { - env = ['JAVA_HOME': "${-> getJavaHome(project, 8, "JAVA8_HOME must be set to run BWC tests against [" + nodeVersion + "]")}"] + javaVersion = 8 } else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) { - env = ['JAVA_HOME': "${-> getJavaHome(project, 9, "JAVA9_HOME must be set to run BWC tests against [" + nodeVersion + "]")}"] - } else { - env = ['JAVA_HOME': (String) project.runtimeJavaHome] + javaVersion = 9 } args.addAll("-E", "node.portsfile=true") @@ -182,7 +188,7 @@ class NodeInfo { // in the cluster-specific options esJavaOpts = String.join(" ", "-ea", "-esa", esJavaOpts) } - env.put('ES_JAVA_OPTS', esJavaOpts) + env = ['ES_JAVA_OPTS': esJavaOpts] for (Map.Entry property : System.properties.entrySet()) { if (property.key.startsWith('tests.es.')) { args.add("-E") @@ -253,6 +259,11 @@ class NodeInfo { return Native.toString(shortPath).substring(4) } + /** Return the java home used by this node. */ + String getJavaHome() { + return javaVersion == null ? project.runtimeJavaHome : project.javaVersions.get(javaVersion) + } + /** Returns debug string for the command that started this node. */ String getCommandString() { String esCommandString = "\nNode ${nodeNum} configuration:\n" @@ -260,6 +271,7 @@ class NodeInfo { esCommandString += "| cwd: ${cwd}\n" esCommandString += "| command: ${executable} ${args.join(' ')}\n" esCommandString += '| environment:\n' + esCommandString += "| JAVA_HOME: ${javaHome}\n" env.each { k, v -> esCommandString += "| ${k}: ${v}\n" } if (config.daemonize) { esCommandString += "|\n| [${wrapperScript.name}]\n" diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 7f12e357ec9ff..f099bbc92624b 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -147,9 +147,9 @@ subprojects { workingDir = checkoutDir if (["5.6", "6.0", "6.1"].contains(bwcBranch)) { // we are building branches that are officially built with JDK 8, push JAVA8_HOME to JAVA_HOME for these builds - environment('JAVA_HOME', "${-> getJavaHome(project, 8, "JAVA8_HOME is required to build BWC versions for BWC branch [" + bwcBranch + "]")}") + environment('JAVA_HOME', getJavaHome(it, 8)) } else if ("6.2".equals(bwcBranch)) { - environment('JAVA_HOME', "${-> getJavaHome(project, 9, "JAVA9_HOME is required to build BWC versions for BWC branch [" + bwcBranch + "]")}") + environment('JAVA_HOME', getJavaHome(it, 9)) } else { environment('JAVA_HOME', project.compilerJavaHome) } diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle index c4b4927a4a2b1..8da714dd6278a 100644 --- a/qa/reindex-from-old/build.gradle +++ b/qa/reindex-from-old/build.gradle @@ -77,7 +77,7 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { dependsOn unzip executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', "${-> getJavaHome(project, 7, "JAVA7_HOME must be set to run reindex-from-old")}" + env 'JAVA_HOME', getJavaHome(it, 7) args 'oldes.OldElasticsearch', baseDir, unzip.temporaryDir, From 7a62e6ad8e56c42fa3a7d8622786bcab45460fc2 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 19 Apr 2018 12:38:10 -0400 Subject: [PATCH 20/28] Avoid side-effect in VersionMap when assertion enabled (#29585) Today when a version map does not require safe access, we will skip that document. However, if the assertion is enabled, we remove the delete tombstone of that document if existed. This side-effect may accidentally hide bugs in which stale delete tombstone can be accessed. This change ensures putAssertionMap not modify the tombstone maps. --- .../index/engine/DeleteVersionValue.java | 2 +- ...rsionValue.java => IndexVersionValue.java} | 10 +-- .../index/engine/InternalEngine.java | 16 ++--- .../index/engine/LiveVersionMap.java | 51 +++++++-------- .../index/engine/VersionValue.java | 2 +- .../index/engine/LiveVersionMapTests.java | 65 ++++++++++++------- .../index/engine/VersionValueTests.java | 9 ++- 7 files changed, 83 insertions(+), 72 deletions(-) rename server/src/main/java/org/elasticsearch/index/engine/{TranslogVersionValue.java => IndexVersionValue.java} (86%) diff --git a/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java index d2b2e24c616a8..9f094197b8d9c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java @@ -23,7 +23,7 @@ /** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */ -class DeleteVersionValue extends VersionValue { +final class DeleteVersionValue extends VersionValue { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DeleteVersionValue.class); diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java similarity index 86% rename from server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java rename to server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java index 67415ea6139a6..4f67372926712 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java @@ -24,13 +24,13 @@ import java.util.Objects; -final class TranslogVersionValue extends VersionValue { +final class IndexVersionValue extends VersionValue { - private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TranslogVersionValue.class); + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexVersionValue.class); private final Translog.Location translogLocation; - TranslogVersionValue(Translog.Location translogLocation, long version, long seqNo, long term) { + IndexVersionValue(Translog.Location translogLocation, long version, long seqNo, long term) { super(version, seqNo, term); this.translogLocation = translogLocation; } @@ -45,7 +45,7 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; - TranslogVersionValue that = (TranslogVersionValue) o; + IndexVersionValue that = (IndexVersionValue) o; return Objects.equals(translogLocation, that.translogLocation); } @@ -56,7 +56,7 @@ public int hashCode() { @Override public String toString() { - return "TranslogVersionValue{" + + return "IndexVersionValue{" + "version=" + version + ", seqNo=" + seqNo + ", term=" + term + diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index b5a1ec49b55e5..b6759e6effdaf 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -636,7 +636,7 @@ private VersionValue resolveDocVersion(final Operation op) throws IOException { assert incrementIndexVersionLookup(); // used for asserting in tests final long currentVersion = loadCurrentVersionFromIndex(op.uid()); if (currentVersion != Versions.NOT_FOUND) { - versionValue = new VersionValue(currentVersion, SequenceNumbers.UNASSIGNED_SEQ_NO, 0L); + versionValue = new IndexVersionValue(null, currentVersion, SequenceNumbers.UNASSIGNED_SEQ_NO, 0L); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) { @@ -824,8 +824,9 @@ public IndexResult index(Index index) throws IOException { indexResult.setTranslogLocation(location); } if (plan.indexIntoLucene && indexResult.hasFailure() == false) { - versionMap.maybePutUnderLock(index.uid().bytes(), - getVersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), indexResult.getTranslogLocation())); + final Translog.Location translogLocation = trackTranslogLocation.get() ? indexResult.getTranslogLocation() : null; + versionMap.maybePutIndexUnderLock(index.uid().bytes(), + new IndexVersionValue(translogLocation, plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); } if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo()); @@ -982,13 +983,6 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } } - private VersionValue getVersionValue(long version, long seqNo, long term, Translog.Location location) { - if (location != null && trackTranslogLocation.get()) { - return new TranslogVersionValue(location, version, seqNo, term); - } - return new VersionValue(version, seqNo, term); - } - /** * returns true if the indexing operation may have already be processed by this engine. * Note that it is OK to rarely return true even if this is not the case. However a `false` @@ -1242,7 +1236,7 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) indexWriter.deleteDocuments(delete.uid()); numDocDeletes.inc(); } - versionMap.putUnderLock(delete.uid().bytes(), + versionMap.putDeleteUnderLock(delete.uid().bytes(), new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 7c5dcfa5c9050..6d9dc4a38974c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -268,7 +268,7 @@ VersionValue getUnderLock(final BytesRef uid) { } private VersionValue getUnderLock(final BytesRef uid, Maps currentMaps) { - assert keyedLock.isHeldByCurrentThread(uid); + assert assertKeyedLockHeldByCurrentThread(uid); // First try to get the "live" value: VersionValue value = currentMaps.current.get(uid); if (value != null) { @@ -306,44 +306,36 @@ boolean isSafeAccessRequired() { /** * Adds this uid/version to the pending adds map iff the map needs safe access. */ - void maybePutUnderLock(BytesRef uid, VersionValue version) { - assert keyedLock.isHeldByCurrentThread(uid); + void maybePutIndexUnderLock(BytesRef uid, IndexVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); Maps maps = this.maps; if (maps.isSafeAccessMode()) { - putUnderLock(uid, version, maps); + putIndexUnderLock(uid, version); } else { maps.current.markAsUnsafe(); assert putAssertionMap(uid, version); } } - private boolean putAssertionMap(BytesRef uid, VersionValue version) { - putUnderLock(uid, version, unsafeKeysMap); - return true; + void putIndexUnderLock(BytesRef uid, IndexVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); + assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length; + maps.put(uid, version); + removeTombstoneUnderLock(uid); } - /** - * Adds this uid/version to the pending adds map. - */ - void putUnderLock(BytesRef uid, VersionValue version) { - Maps maps = this.maps; - putUnderLock(uid, version, maps); + private boolean putAssertionMap(BytesRef uid, IndexVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); + assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length; + unsafeKeysMap.put(uid, version); + return true; } - /** - * Adds this uid/version to the pending adds map. - */ - private void putUnderLock(BytesRef uid, VersionValue version, Maps maps) { - assert keyedLock.isHeldByCurrentThread(uid); + void putDeleteUnderLock(BytesRef uid, DeleteVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length; - if (version.isDelete() == false) { - maps.put(uid, version); - removeTombstoneUnderLock(uid); - } else { - DeleteVersionValue versionValue = (DeleteVersionValue) version; - putTombstone(uid, versionValue); - maps.remove(uid, versionValue); - } + putTombstone(uid, version); + maps.remove(uid, version); } private void putTombstone(BytesRef uid, DeleteVersionValue version) { @@ -365,7 +357,7 @@ private void putTombstone(BytesRef uid, DeleteVersionValue version) { * Removes this uid from the pending deletes map. */ void removeTombstoneUnderLock(BytesRef uid) { - assert keyedLock.isHeldByCurrentThread(uid); + assert assertKeyedLockHeldByCurrentThread(uid); long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; final VersionValue prev = tombstones.remove(uid); if (prev != null) { @@ -465,4 +457,9 @@ Map getAllTombstones() { Releasable acquireLock(BytesRef uid) { return keyedLock.acquire(uid); } + + private boolean assertKeyedLockHeldByCurrentThread(BytesRef uid) { + assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() + "], uid [" + uid.utf8ToString() + "]"; + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java index d63306486732e..567a7964186ad 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -27,7 +27,7 @@ import java.util.Collection; import java.util.Collections; -class VersionValue implements Accountable { +abstract class VersionValue implements Accountable { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(VersionValue.class); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index ce3ddff00dade..e0efcf9f0f73f 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.util.RamUsageTester; import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -47,9 +48,8 @@ public void testRamBytesUsed() throws Exception { for (int i = 0; i < 100000; ++i) { BytesRefBuilder uid = new BytesRefBuilder(); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); - VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong()); try (Releasable r = map.acquireLock(uid.toBytesRef())) { - map.putUnderLock(uid.toBytesRef(), version); + map.putIndexUnderLock(uid.toBytesRef(), randomIndexVersionValue()); } } long actualRamBytesUsed = RamUsageTester.sizeOf(map); @@ -64,9 +64,8 @@ public void testRamBytesUsed() throws Exception { for (int i = 0; i < 100000; ++i) { BytesRefBuilder uid = new BytesRefBuilder(); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); - VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong()); try (Releasable r = map.acquireLock(uid.toBytesRef())) { - map.putUnderLock(uid.toBytesRef(), version); + map.putIndexUnderLock(uid.toBytesRef(), randomIndexVersionValue()); } } actualRamBytesUsed = RamUsageTester.sizeOf(map); @@ -100,14 +99,15 @@ private BytesRef uid(String string) { public void testBasics() throws IOException { LiveVersionMap map = new LiveVersionMap(); try (Releasable r = map.acquireLock(uid("test"))) { - map.putUnderLock(uid("test"), new VersionValue(1,1,1)); - assertEquals(new VersionValue(1,1,1), map.getUnderLock(uid("test"))); + Translog.Location tlogLoc = randomTranslogLocation(); + map.putIndexUnderLock(uid("test"), new IndexVersionValue(tlogLoc, 1, 1, 1)); + assertEquals(new IndexVersionValue(tlogLoc, 1, 1, 1), map.getUnderLock(uid("test"))); map.beforeRefresh(); - assertEquals(new VersionValue(1,1,1), map.getUnderLock(uid("test"))); + assertEquals(new IndexVersionValue(tlogLoc, 1, 1, 1), map.getUnderLock(uid("test"))); map.afterRefresh(randomBoolean()); assertNull(map.getUnderLock(uid("test"))); - map.putUnderLock(uid("test"), new DeleteVersionValue(1,1,1,1)); + map.putDeleteUnderLock(uid("test"), new DeleteVersionValue(1,1,1,1)); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.beforeRefresh(); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); @@ -154,21 +154,24 @@ public void testConcurrently() throws IOException, InterruptedException { BytesRef bytesRef = randomFrom(random(), keyList); try (Releasable r = map.acquireLock(bytesRef)) { VersionValue versionValue = values.computeIfAbsent(bytesRef, - v -> new VersionValue(randomLong(), maxSeqNo.incrementAndGet(), randomLong())); + v -> new IndexVersionValue( + randomTranslogLocation(), randomLong(), maxSeqNo.incrementAndGet(), randomLong())); boolean isDelete = versionValue instanceof DeleteVersionValue; if (isDelete) { map.removeTombstoneUnderLock(bytesRef); deletes.remove(bytesRef); } if (isDelete == false && rarely()) { - versionValue = new DeleteVersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), - versionValue.term, clock.getAndIncrement()); + versionValue = new DeleteVersionValue(versionValue.version + 1, + maxSeqNo.incrementAndGet(), versionValue.term, clock.getAndIncrement()); deletes.put(bytesRef, (DeleteVersionValue) versionValue); + map.putDeleteUnderLock(bytesRef, (DeleteVersionValue) versionValue); } else { - versionValue = new VersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term); + versionValue = new IndexVersionValue(randomTranslogLocation(), + versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term); + map.putIndexUnderLock(bytesRef, (IndexVersionValue) versionValue); } values.put(bytesRef, versionValue); - map.putUnderLock(bytesRef, versionValue); } if (rarely()) { final long pruneSeqNo = randomLongBetween(0, maxSeqNo.get()); @@ -268,7 +271,7 @@ public void testCarryOnSafeAccess() throws IOException { } try (Releasable r = map.acquireLock(uid(""))) { - map.maybePutUnderLock(new BytesRef(""), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(new BytesRef(""), randomIndexVersionValue()); } assertFalse(map.isUnsafe()); assertEquals(1, map.getAllCurrent().size()); @@ -278,7 +281,7 @@ public void testCarryOnSafeAccess() throws IOException { assertFalse(map.isUnsafe()); assertFalse(map.isSafeAccessRequired()); try (Releasable r = map.acquireLock(uid(""))) { - map.maybePutUnderLock(new BytesRef(""), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(new BytesRef(""), randomIndexVersionValue()); } assertTrue(map.isUnsafe()); assertFalse(map.isSafeAccessRequired()); @@ -288,7 +291,7 @@ public void testCarryOnSafeAccess() throws IOException { public void testRefreshTransition() throws IOException { LiveVersionMap map = new LiveVersionMap(); try (Releasable r = map.acquireLock(uid("1"))) { - map.maybePutUnderLock(uid("1"), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(uid("1"), randomIndexVersionValue()); assertTrue(map.isUnsafe()); assertNull(map.getUnderLock(uid("1"))); map.beforeRefresh(); @@ -299,7 +302,7 @@ public void testRefreshTransition() throws IOException { assertFalse(map.isUnsafe()); map.enforceSafeAccess(); - map.maybePutUnderLock(uid("1"), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(uid("1"), randomIndexVersionValue()); assertFalse(map.isUnsafe()); assertNotNull(map.getUnderLock(uid("1"))); map.beforeRefresh(); @@ -320,9 +323,10 @@ public void testAddAndDeleteRefreshConcurrently() throws IOException, Interrupte AtomicLong version = new AtomicLong(); CountDownLatch start = new CountDownLatch(2); BytesRef uid = uid("1"); - VersionValue initialVersion = new VersionValue(version.incrementAndGet(), 1, 1); + VersionValue initialVersion; try (Releasable ignore = map.acquireLock(uid)) { - map.putUnderLock(uid, initialVersion); + initialVersion = new IndexVersionValue(randomTranslogLocation(), version.incrementAndGet(), 1, 1); + map.putIndexUnderLock(uid, (IndexVersionValue) initialVersion); } Thread t = new Thread(() -> { start.countDown(); @@ -337,14 +341,13 @@ public void testAddAndDeleteRefreshConcurrently() throws IOException, Interrupte } else { underLock = nextVersionValue; } - if (underLock.isDelete()) { - nextVersionValue = new VersionValue(version.incrementAndGet(), 1, 1); - } else if (randomBoolean()) { - nextVersionValue = new VersionValue(version.incrementAndGet(), 1, 1); + if (underLock.isDelete() || randomBoolean()) { + nextVersionValue = new IndexVersionValue(randomTranslogLocation(), version.incrementAndGet(), 1, 1); + map.putIndexUnderLock(uid, (IndexVersionValue) nextVersionValue); } else { nextVersionValue = new DeleteVersionValue(version.incrementAndGet(), 1, 1, 0); + map.putDeleteUnderLock(uid, (DeleteVersionValue) nextVersionValue); } - map.putUnderLock(uid, nextVersionValue); } } } catch (Exception e) { @@ -375,7 +378,7 @@ public void testPruneTombstonesWhileLocked() throws InterruptedException, IOExce BytesRef uid = uid("1"); ; try (Releasable ignore = map.acquireLock(uid)) { - map.putUnderLock(uid, new DeleteVersionValue(0, 0, 0, 0)); + map.putDeleteUnderLock(uid, new DeleteVersionValue(0, 0, 0, 0)); map.beforeRefresh(); // refresh otherwise we won't prune since it's tracked by the current map map.afterRefresh(false); Thread thread = new Thread(() -> { @@ -392,4 +395,16 @@ public void testPruneTombstonesWhileLocked() throws InterruptedException, IOExce thread.join(); assertEquals(0, map.getAllTombstones().size()); } + + IndexVersionValue randomIndexVersionValue() { + return new IndexVersionValue(randomTranslogLocation(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + } + + Translog.Location randomTranslogLocation() { + if (randomBoolean()) { + return null; + } else { + return new Translog.Location(randomNonNegativeLong(), randomNonNegativeLong(), randomInt()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java b/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java index 3b953edece1b4..242a568295dd6 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java @@ -20,12 +20,17 @@ package org.elasticsearch.index.engine; import org.apache.lucene.util.RamUsageTester; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; public class VersionValueTests extends ESTestCase { - public void testRamBytesUsed() { - VersionValue versionValue = new VersionValue(randomLong(), randomLong(), randomLong()); + public void testIndexRamBytesUsed() { + Translog.Location translogLoc = null; + if (randomBoolean()) { + translogLoc = new Translog.Location(randomNonNegativeLong(), randomNonNegativeLong(), randomInt()); + } + IndexVersionValue versionValue = new IndexVersionValue(translogLoc, randomLong(), randomLong(), randomLong()); assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed()); } From 89179af8a21fca289f27d4d2230cf13025aa2d2d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 19 Apr 2018 16:04:52 -0400 Subject: [PATCH 21/28] Account translog location to ram usage in version map This commit accounts a translog location's ram usage in version map. --- .../java/org/elasticsearch/index/engine/IndexVersionValue.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java index 4f67372926712..a658c84c16bbd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java @@ -37,7 +37,7 @@ final class IndexVersionValue extends VersionValue { @Override public long ramBytesUsed() { - return RAM_BYTES_USED; + return RAM_BYTES_USED + RamUsageEstimator.shallowSizeOf(translogLocation); } @Override From d0e31d42d339b8c9f6feeca68dc6614d050fd262 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 19 Apr 2018 11:25:27 -0700 Subject: [PATCH 22/28] Add support to match_phrase query for zero_terms_query. (#29598) (cherry picked from commit b9e1a002139a3fb550dfa68643efc6566ae95320) --- .../query-dsl/match-phrase-query.asciidoc | 2 + .../index/query/MatchPhraseQueryBuilder.java | 49 +++++++++++- .../query/MatchPhraseQueryBuilderTests.java | 37 ++++++++- .../index/search/MatchPhraseQueryIT.java | 77 +++++++++++++++++++ 4 files changed, 162 insertions(+), 3 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java diff --git a/docs/reference/query-dsl/match-phrase-query.asciidoc b/docs/reference/query-dsl/match-phrase-query.asciidoc index 943d0e84d36db..1f4b19eedc132 100644 --- a/docs/reference/query-dsl/match-phrase-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-query.asciidoc @@ -39,3 +39,5 @@ GET /_search } -------------------------------------------------- // CONSOLE + +This query also accepts `zero_terms_query`, as explained in <>. diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index 03a1f78289409..41e0dd8ffa0cb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -28,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery; import java.io.IOException; import java.util.Objects; @@ -39,6 +41,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "match_phrase"; public static final ParseField SLOP_FIELD = new ParseField("slop"); + public static final ParseField ZERO_TERMS_QUERY_FIELD = new ParseField("zero_terms_query"); private final String fieldName; @@ -48,6 +51,8 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder { @@ -68,6 +71,11 @@ protected MatchPhraseQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { matchQuery.slop(randomIntBetween(0, 10)); } + + if (randomBoolean()) { + matchQuery.zeroTermsQuery(randomFrom(ZeroTermsQuery.ALL, ZeroTermsQuery.NONE)); + } + return matchQuery; } @@ -88,6 +96,12 @@ protected Map getAlternateVersions() { @Override protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { assertThat(query, notNullValue()); + + if (query instanceof MatchAllDocsQuery) { + assertThat(queryBuilder.zeroTermsQuery(), equalTo(ZeroTermsQuery.ALL)); + return; + } + assertThat(query, either(instanceOf(BooleanQuery.class)).or(instanceOf(PhraseQuery.class)) .or(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)) .or(instanceOf(IndexOrDocValuesQuery.class)).or(instanceOf(MatchNoDocsQuery.class))); @@ -108,7 +122,7 @@ public void testBadAnalyzer() throws IOException { assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); } - public void testPhraseMatchQuery() throws IOException { + public void testFromSimpleJson() throws IOException { String json1 = "{\n" + " \"match_phrase\" : {\n" + " \"message\" : \"this is a test\"\n" + @@ -120,6 +134,7 @@ public void testPhraseMatchQuery() throws IOException { " \"message\" : {\n" + " \"query\" : \"this is a test\",\n" + " \"slop\" : 0,\n" + + " \"zero_terms_query\" : \"NONE\",\n" + " \"boost\" : 1.0\n" + " }\n" + " }\n" + @@ -128,6 +143,26 @@ public void testPhraseMatchQuery() throws IOException { checkGeneratedJson(expected, qb); } + public void testFromJson() throws IOException { + String json = "{\n" + + " \"match_phrase\" : {\n" + + " \"message\" : {\n" + + " \"query\" : \"this is a test\",\n" + + " \"slop\" : 2,\n" + + " \"zero_terms_query\" : \"ALL\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}"; + + MatchPhraseQueryBuilder parsed = (MatchPhraseQueryBuilder) parseQuery(json); + checkGeneratedJson(json, parsed); + + assertEquals(json, "this is a test", parsed.value()); + assertEquals(json, 2, parsed.slop()); + assertEquals(json, ZeroTermsQuery.ALL, parsed.zeroTermsQuery()); + } + public void testParseFailsWithMultipleFields() throws IOException { String json = "{\n" + " \"match_phrase\" : {\n" + diff --git a/server/src/test/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java b/server/src/test/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java new file mode 100644 index 0000000000000..ebd0bf0460aeb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.search; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +public class MatchPhraseQueryIT extends ESIntegTestCase { + private static final String INDEX = "test"; + + @Before + public void setUp() throws Exception { + super.setUp(); + CreateIndexRequestBuilder createIndexRequest = prepareCreate(INDEX).setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.standard_stopwords.type", "standard") + .putList("index.analysis.analyzer.standard_stopwords.stopwords", "of", "the", "who")); + assertAcked(createIndexRequest); + ensureGreen(); + } + + public void testZeroTermsQuery() throws ExecutionException, InterruptedException { + List indexRequests = getIndexRequests(); + indexRandom(true, false, indexRequests); + + MatchPhraseQueryBuilder baseQuery = QueryBuilders.matchPhraseQuery("name", "the who") + .analyzer("standard_stopwords"); + + MatchPhraseQueryBuilder matchNoneQuery = baseQuery.zeroTermsQuery(ZeroTermsQuery.NONE); + SearchResponse matchNoneResponse = client().prepareSearch(INDEX).setQuery(matchNoneQuery).get(); + assertHitCount(matchNoneResponse, 0L); + + MatchPhraseQueryBuilder matchAllQuery = baseQuery.zeroTermsQuery(ZeroTermsQuery.ALL); + SearchResponse matchAllResponse = client().prepareSearch(INDEX).setQuery(matchAllQuery).get(); + assertHitCount(matchAllResponse, 2L); + } + + + private List getIndexRequests() { + List requests = new ArrayList<>(); + requests.add(client().prepareIndex(INDEX, "band").setSource("name", "the beatles")); + requests.add(client().prepareIndex(INDEX, "band").setSource("name", "led zeppelin")); + return requests; + } +} From f47dfa30ef434808f5d350e2658efcd2ffe0bfc0 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 19 Apr 2018 16:58:58 -0400 Subject: [PATCH 23/28] Remove 7.0.0 from 6.x changelog (#29621) This commit removes 7.0.0 entires from the changelog for the 6.x branch. --- docs/CHANGELOG.asciidoc | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 9c6e36a2e1d0d..02603540b4f91 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -5,27 +5,9 @@ = Elasticsearch Release Notes -== Elasticsearch 7.0.0 - -=== Breaking Changes - -=== Breaking Java Changes - -=== Deprecations - -=== New Features - -=== Enhancements - -=== Bug Fixes - -=== Regressions - -=== Known Issues - == Elasticsearch version 6.3.0 -=== New Features +=== New Features === Enhancements From 691af11b06d8d045d520af8f5d749637b9bd3807 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 19 Apr 2018 19:15:30 -0400 Subject: [PATCH 24/28] TEST: Mute testPrimaryRelocationWhileIndexing AwaitsFix #29626 --- .../elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index 7de5862b855dd..d4df3e4e383fe 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -45,6 +45,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase { private static final int RELOCATION_COUNT = 15; @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29626") public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); client().admin().indices().prepareCreate("test") From 6d2d5095ed2a0f37c22801d49eb72d23508408bb Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 19 Apr 2018 19:56:03 -0400 Subject: [PATCH 25/28] Remove stale comment from JVM stats (#29625) We removed catched throwable from the code base and left behind was a comment about catching InternalError in MemoryManagementMXBean. We are not going to catch InternalError here as we expect that to be fatal. This commit removes that stale comment. --- .../main/java/org/elasticsearch/monitor/jvm/JvmStats.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index e9d3adba68255..45180fd1c6f0a 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -82,10 +82,8 @@ public static JvmStats jvmStats() { peakUsage.getUsed() < 0 ? 0 : peakUsage.getUsed(), peakUsage.getMax() < 0 ? 0 : peakUsage.getMax() )); - } catch (Exception ex) { - /* ignore some JVMs might barf here with: - * java.lang.InternalError: Memory Pool not found - * we just omit the pool in that case!*/ + } catch (final Exception ignored) { + } } Mem mem = new Mem(heapCommitted, heapUsed, heapMax, nonHeapCommitted, nonHeapUsed, Collections.unmodifiableList(pools)); From c87bd30c476db0d6bc366e29fe6273aa6680afca Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 19 Apr 2018 20:12:24 -0400 Subject: [PATCH 26/28] Do not serialize common stats flags using ordinal (#29600) This commit remove serializing of common stats flags via its enum ordinal and uses an explicit index defined on the enum. This is to enable us to remove an unused flag (Suggest) without ruining the ordering and thus breaking serialization. --- .../admin/indices/stats/CommonStatsFlags.java | 49 ++++++++++--------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index 7d6e7c124cd37..b222fed7f0fff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -53,7 +53,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { final long longFlags = in.readLong(); flags.clear(); for (Flag flag : Flag.values()) { - if ((longFlags & (1 << flag.ordinal())) != 0) { + if ((longFlags & (1 << flag.getIndex())) != 0) { flags.add(flag); } } @@ -68,7 +68,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { long longFlags = 0; for (Flag flag : flags) { - longFlags |= (1 << flag.ordinal()); + longFlags |= (1 << flag.getIndex()); } out.writeLong(longFlags); @@ -207,34 +207,39 @@ public CommonStatsFlags clone() { } public enum Flag { - // Do not change the order of these flags we use - // the ordinal for encoding! Only append to the end! - Store("store"), - Indexing("indexing"), - Get("get"), - Search("search"), - Merge("merge"), - Flush("flush"), - Refresh("refresh"), - QueryCache("query_cache"), - FieldData("fielddata"), - Docs("docs"), - Warmer("warmer"), - Completion("completion"), - Segments("segments"), - Translog("translog"), - Suggest("suggest"), // unused - RequestCache("request_cache"), - Recovery("recovery"); + Store("store", 0), + Indexing("indexing", 1), + Get("get", 2), + Search("search", 3), + Merge("merge", 4), + Flush("flush", 5), + Refresh("refresh", 6), + QueryCache("query_cache", 7), + FieldData("fielddata", 8), + Docs("docs", 9), + Warmer("warmer", 10), + Completion("completion", 11), + Segments("segments", 12), + Translog("translog", 13), + Suggest("suggest", 14), // unused + RequestCache("request_cache", 15), + Recovery("recovery", 16); private final String restName; + private final int index; - Flag(String restName) { + Flag(final String restName, final int index) { this.restName = restName; + this.index = index; } public String getRestName() { return restName; } + + private int getIndex() { + return index; + } + } } From e4b3b59d321cd16a51af01e56cf2190d3c2873dd Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 19 Apr 2018 20:49:56 -0400 Subject: [PATCH 27/28] Never leave stale delete tombstones in version map (#29619) Today the VersionMap does not clean up a stale delete tombstone if it does not require safe access. However, in a very rare situation due to concurrent refreshes, the safe-access flag may be flipped over then an engine accidentally consult that stale delete tombstone. This commit ensures to never leave stale delete tombstones in a version map by always pruning delete tombstones when putting a new index entry regardless of the value of the safe-access flag. --- .../index/engine/LiveVersionMap.java | 4 +++ .../index/engine/InternalEngineTests.java | 17 +++++++++ .../index/engine/LiveVersionMapTests.java | 36 +++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 6d9dc4a38974c..18d3cedb37e60 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -312,6 +312,10 @@ void maybePutIndexUnderLock(BytesRef uid, IndexVersionValue version) { if (maps.isSafeAccessMode()) { putIndexUnderLock(uid, version); } else { + // Even though we don't store a record of the indexing operation (and mark as unsafe), + // we should still remove any previous delete for this uuid (avoid accidental accesses). + // Not this should not hurt performance because the tombstone is small (or empty) when unsafe is relevant. + removeTombstoneUnderLock(uid); maps.current.markAsUnsafe(); assert putAssertionMap(uid, version); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 88bab9b67b6b1..2270901729594 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1595,6 +1595,23 @@ public void testInternalVersioningOnPrimary() throws IOException { assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); } + public void testVersionOnPrimaryWithConcurrentRefresh() throws Exception { + List ops = generateSingleDocHistory(false, VersionType.INTERNAL, false, 2, 10, 100); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean running = new AtomicBoolean(true); + Thread refreshThread = new Thread(() -> { + latch.countDown(); + while (running.get()) { + engine.refresh("test"); + } + }); + refreshThread.start(); + latch.await(); + assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); + running.set(false); + refreshThread.join(); + } + private int assertOpsOnPrimary(List ops, long currentOpVersion, boolean docDeleted, InternalEngine engine) throws IOException { String lastFieldValue = null; diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index e0efcf9f0f73f..286e85cef3fc6 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -40,6 +40,8 @@ import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class LiveVersionMapTests extends ESTestCase { @@ -396,6 +398,40 @@ public void testPruneTombstonesWhileLocked() throws InterruptedException, IOExce assertEquals(0, map.getAllTombstones().size()); } + public void testRandomlyIndexDeleteAndRefresh() throws Exception { + final LiveVersionMap versionMap = new LiveVersionMap(); + final BytesRef uid = uid("1"); + final long versions = between(10, 1000); + VersionValue latestVersion = null; + for (long i = 0; i < versions; i++) { + if (randomBoolean()) { + versionMap.beforeRefresh(); + versionMap.afterRefresh(randomBoolean()); + } + if (randomBoolean()) { + versionMap.enforceSafeAccess(); + } + try (Releasable ignore = versionMap.acquireLock(uid)) { + if (randomBoolean()) { + latestVersion = new DeleteVersionValue(randomNonNegativeLong(), randomLong(), randomLong(), randomLong()); + versionMap.putDeleteUnderLock(uid, (DeleteVersionValue) latestVersion); + assertThat(versionMap.getUnderLock(uid), equalTo(latestVersion)); + } else if (randomBoolean()) { + latestVersion = new IndexVersionValue(randomTranslogLocation(), randomNonNegativeLong(), randomLong(), randomLong()); + versionMap.maybePutIndexUnderLock(uid, (IndexVersionValue) latestVersion); + if (versionMap.isSafeAccessRequired()) { + assertThat(versionMap.getUnderLock(uid), equalTo(latestVersion)); + } else { + assertThat(versionMap.getUnderLock(uid), nullValue()); + } + } + if (versionMap.getUnderLock(uid) != null) { + assertThat(versionMap.getUnderLock(uid), equalTo(latestVersion)); + } + } + } + } + IndexVersionValue randomIndexVersionValue() { return new IndexVersionValue(randomTranslogLocation(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); } From 12aebd46533e0ec7deedca2027fffdef91c65176 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 19 Apr 2018 21:25:14 -0400 Subject: [PATCH 28/28] TEST: Unmute testPrimaryRelocationWhileIndexing Previously we did not put an indexing to a version map if that map does not require safe access but removed the existing delete tombstone only if assertion enabled. In #29585, we removed the side-effect caused by assertion then this test started failing. This failure can be explained as follows: - Step 1: Index a doc then delete that doc - Step 2: The version map can switch to unsafe mode because of concurrent refreshes (implicitly called by flushes) - Step 3: Index a document - the version map won't add this version value and won't prune the tombstone (previously it did) - Step 4: Delete a document - this will return NOT_FOUND instead of DELETED because of the stale delete tombstone This failure is actually fixed by #29619 in which we never leave stale delete tombstones Closes #29626 --- .../elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index d4df3e4e383fe..7de5862b855dd 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -45,7 +45,6 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase { private static final int RELOCATION_COUNT = 15; @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29626") public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); client().admin().indices().prepareCreate("test")