From e94ec18d78e072f651b517f3b79297f898aa8cb2 Mon Sep 17 00:00:00 2001 From: weizijun Date: Wed, 24 Nov 2021 16:21:03 +0800 Subject: [PATCH 01/55] fixup (#80901) a tiny catch for a rollover test --- .../action/admin/indices/rollover/ConditionTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java index dd8464803aa90..edd7403016d24 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java @@ -40,7 +40,7 @@ public void testMaxDocs() { assertThat(evaluate.matched, equalTo(true)); long maxDocsNotMatch = randomIntBetween(0, 99); - evaluate = maxDocsCondition.evaluate(new Condition.Stats(0, maxDocsNotMatch, randomByteSize(), randomByteSize())); + evaluate = maxDocsCondition.evaluate(new Condition.Stats(maxDocsNotMatch, 0, randomByteSize(), randomByteSize())); assertThat(evaluate.condition, equalTo(maxDocsCondition)); assertThat(evaluate.matched, equalTo(false)); } From e6e812c75f3d6e30525dc0fb26cc1f87efcc3826 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 24 Nov 2021 09:51:13 +0000 Subject: [PATCH 02/55] Allow to set the security manager (#80957) Post JDK 17 the security manager is disabled by default - setSecurityManager throws UOE - see JEP 411. This change adds a command line option to explicitly allow to set the security manager, which enables early testing with releases greater than JDK 17. --- .../gradle/internal/ElasticsearchTestBasePlugin.java | 1 + .../org/elasticsearch/tools/launchers/SystemJvmOptions.java | 2 ++ 2 files changed, 3 insertions(+) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 840a35b4dea6c..ebd9841f2108b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -97,6 +97,7 @@ public void execute(Task t) { "-Xmx" + System.getProperty("tests.heap.size", "512m"), "-Xms" + System.getProperty("tests.heap.size", "512m"), "--illegal-access=deny", + "-Djava.security.manager=allow", // TODO: only open these for mockito when it is modularized "--add-opens=java.base/java.security.cert=ALL-UNNAMED", "--add-opens=java.base/java.nio.channels=ALL-UNNAMED", diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java index e6d50f9bda03a..d5ebf57d6968b 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java @@ -27,6 +27,8 @@ static List systemJvmOptions() { * networkaddress.cache.negative ttl; set to -1 to cache forever. */ "-Des.networkaddress.cache.negative.ttl=10", + // Allow to set the security manager. + "-Djava.security.manager=allow", // pre-touch JVM emory pages during initialization "-XX:+AlwaysPreTouch", // explicitly set the stack size From 89946c51b3b65146e4a3671379994eaf8a4ce065 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 24 Nov 2021 09:54:15 +0000 Subject: [PATCH 03/55] [Transform] Remove legacy transform templates (#80948) Legacy transform templates will exist in clusters where transforms were first used prior to 7.16.0. (From 7.16.0 transforms uses composable templates.) Unlike ML, there's no danger these legacy templates date back to 6.x and contain types, so they are not at risk of being broken in 8.x. But it's still good to remove them from the cluster to keep it as clean as possible. --- .../xpack/transform/Transform.java | 20 +++++ .../xpack/restart/FullClusterRestartIT.java | 78 +++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 1cac435918727..22deda76cff4a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -109,7 +110,9 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.function.Supplier; +import java.util.function.UnaryOperator; import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.elasticsearch.xpack.core.transform.TransformMessages.FAILED_TO_UNSET_RESET_MODE; @@ -287,6 +290,23 @@ public List getNamedXContent() { return new TransformNamedXContentProvider().getNamedXContentParsers(); } + @Override + public UnaryOperator> getIndexTemplateMetadataUpgrader() { + return templates -> { + // These are all legacy templates that were created in old versions. None are needed now. + // The "internal" indices became system indices and the "notifications" indices now use composable templates. + templates.remove(".data-frame-internal-1"); + templates.remove(".data-frame-internal-2"); + templates.remove(".transform-internal-003"); + templates.remove(".transform-internal-004"); + templates.remove(".transform-internal-005"); + templates.remove(".data-frame-notifications-1"); + templates.remove(".transform-notifications-000001"); + templates.remove(".transform-notifications-000002"); + return templates; + }; + } + @Override public Collection getSystemIndexDescriptors(Settings settings) { try { diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index d64da74fe9080..0336538124e6b 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -51,6 +51,7 @@ import static org.elasticsearch.upgrades.FullClusterRestartIT.assertNumHits; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -390,6 +391,83 @@ public void testRollupAfterRestart() throws Exception { } } + public void testTransformLegacyTemplateCleanup() throws Exception { + assumeTrue("Before 7.2 transforms didn't exist", getOldClusterVersion().onOrAfter(Version.V_7_2_0)); + if (isRunningAgainstOldCluster()) { + + // create the source index + final Request createIndexRequest = new Request("PUT", "customers"); + createIndexRequest.setJsonEntity( + "{" + + "\"mappings\": {" + + " \"properties\": {" + + " \"customer_id\": { \"type\": \"keyword\" }," + + " \"price\": { \"type\": \"double\" }" + + " }" + + "}" + + "}" + ); + + Map createIndexResponse = entityAsMap(client().performRequest(createIndexRequest)); + assertThat(createIndexResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + // create a transform + String endpoint = getOldClusterVersion().onOrAfter(Version.V_7_5_0) + ? "_transform/transform-full-cluster-restart-test" + : "_data_frame/transforms/transform-full-cluster-restart-test"; + final Request createTransformRequest = new Request("PUT", endpoint); + + createTransformRequest.setJsonEntity( + "{" + + "\"source\":{" + + " \"index\":\"customers\"" + + "}," + + "\"description\":\"testing\"," + + "\"dest\":{" + + " \"index\":\"max_price\"" + + "}," + + "\"pivot\": {" + + " \"group_by\":{" + + " \"customer_id\":{" + + " \"terms\":{" + + " \"field\":\"customer_id\"" + + " }" + + " }" + + " }," + + " \"aggregations\":{" + + " \"max_price\":{" + + " \"max\":{" + + " \"field\":\"price\"" + + " }" + + " }" + + " }" + + "}" + + "}" + ); + + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + } else { + // legacy index templates created in previous releases should not be present anymore + assertBusy(() -> { + Request request = new Request("GET", "/_template/.transform-*,.data-frame-*"); + try { + Response response = client().performRequest(request); + Map responseLevel = entityAsMap(response); + assertNotNull(responseLevel); + assertThat(responseLevel.keySet(), empty()); + } catch (ResponseException e) { + // not found is fine + assertThat( + "Unexpected failure getting templates: " + e.getResponse().getStatusLine(), + e.getResponse().getStatusLine().getStatusCode(), + is(404) + ); + } + }); + } + } + public void testSlmPolicyAndStats() throws IOException { SnapshotLifecyclePolicy slmPolicy = new SnapshotLifecyclePolicy( "test-policy", From 0f108251341303dbf4afabaadc8f1d673a368b0c Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 24 Nov 2021 12:19:04 +0100 Subject: [PATCH 04/55] clear auto-follow errors on deleting pattern (#80544) --- .../ccr/action/AutoFollowCoordinator.java | 43 +- ...ransportDeleteAutoFollowPatternAction.java | 37 +- .../action/AutoFollowCoordinatorTests.java | 642 +++++------------- 3 files changed, 220 insertions(+), 502 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index f514694e83396..38f7603255d6a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -84,12 +84,28 @@ public class AutoFollowCoordinator extends AbstractLifecycleComponent implements private volatile TimeValue waitForMetadataTimeOut; private volatile Map autoFollowers = Collections.emptyMap(); + private volatile Set patterns = Set.of(); // The following fields are read and updated under a lock: private long numberOfSuccessfulIndicesAutoFollowed = 0; private long numberOfFailedIndicesAutoFollowed = 0; private long numberOfFailedRemoteClusterStateRequests = 0; - private final LinkedHashMap> recentAutoFollowErrors; + private final LinkedHashMap> recentAutoFollowErrors; + + private static final class AutoFollowErrorKey { + private final String pattern; + private final String index; + + private AutoFollowErrorKey(String pattern, String index) { + this.pattern = Objects.requireNonNull(pattern); + this.index = index; + } + + @Override + public String toString() { + return index != null ? pattern + ':' + index : pattern; + } + } public AutoFollowCoordinator( final Settings settings, @@ -109,7 +125,7 @@ public AutoFollowCoordinator( this.executor = Objects.requireNonNull(executor); this.recentAutoFollowErrors = new LinkedHashMap<>() { @Override - protected boolean removeEldestEntry(final Map.Entry> eldest) { + protected boolean removeEldestEntry(final Map.Entry> eldest) { return size() > MAX_AUTO_FOLLOW_ERRORS; } }; @@ -162,21 +178,31 @@ public synchronized AutoFollowStats getStats() { } } + var recentAutoFollowErrorsCopy = new TreeMap>(); + for (var entry : recentAutoFollowErrors.entrySet()) { + recentAutoFollowErrorsCopy.put(entry.getKey().toString(), entry.getValue()); + } + return new AutoFollowStats( numberOfFailedIndicesAutoFollowed, numberOfFailedRemoteClusterStateRequests, numberOfSuccessfulIndicesAutoFollowed, - new TreeMap<>(recentAutoFollowErrors), + recentAutoFollowErrorsCopy, timesSinceLastAutoFollowPerRemoteCluster ); } synchronized void updateStats(List results) { + // purge stats for removed patterns + var currentPatterns = this.patterns; + recentAutoFollowErrors.keySet().removeIf(key -> currentPatterns.contains(key.pattern) == false); + // add new stats long newStatsReceivedTimeStamp = absoluteMillisTimeProvider.getAsLong(); for (AutoFollowResult result : results) { + var onlyPatternKey = new AutoFollowErrorKey(result.autoFollowPatternName, null); if (result.clusterStateFetchException != null) { recentAutoFollowErrors.put( - result.autoFollowPatternName, + onlyPatternKey, Tuple.tuple(newStatsReceivedTimeStamp, new ElasticsearchException(result.clusterStateFetchException)) ); numberOfFailedRemoteClusterStateRequests++; @@ -188,9 +214,9 @@ synchronized void updateStats(List results) { result.clusterStateFetchException ); } else { - recentAutoFollowErrors.remove(result.autoFollowPatternName); + recentAutoFollowErrors.remove(onlyPatternKey); for (Map.Entry entry : result.autoFollowExecutionResults.entrySet()) { - final String patternAndIndexKey = result.autoFollowPatternName + ":" + entry.getKey().getName(); + var patternAndIndexKey = new AutoFollowErrorKey(result.autoFollowPatternName, entry.getKey().getName()); if (entry.getValue() != null) { numberOfFailedIndicesAutoFollowed++; recentAutoFollowErrors.put( @@ -199,7 +225,7 @@ synchronized void updateStats(List results) { ); LOGGER.warn( new ParameterizedMessage( - "failure occurred while auto following index [{}] for auto follow " + "pattern [{}]", + "failure occurred while auto following index [{}] for auto follow pattern [{}]", entry.getKey(), result.autoFollowPatternName ), @@ -211,7 +237,6 @@ synchronized void updateStats(List results) { } } } - } } @@ -227,6 +252,8 @@ void updateAutoFollowers(ClusterState followerClusterState) { return; } + this.patterns = Set.copyOf(autoFollowMetadata.getPatterns().keySet()); + final CopyOnWriteHashMap autoFollowersCopy = CopyOnWriteHashMap.copyOf(this.autoFollowers); Set newRemoteClusters = autoFollowMetadata.getPatterns() .values() diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java index a791028b22ce7..980afd99977c9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java @@ -19,17 +19,13 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; -import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - public class TransportDeleteAutoFollowPatternAction extends AcknowledgedTransportMasterNodeAction { @Inject @@ -72,28 +68,23 @@ public ClusterState execute(ClusterState currentState) { static ClusterState innerDelete(DeleteAutoFollowPatternAction.Request request, ClusterState currentState) { AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().custom(AutoFollowMetadata.TYPE); - if (currentAutoFollowMetadata == null) { - throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", request.getName()); - } - Map patterns = currentAutoFollowMetadata.getPatterns(); - AutoFollowPattern autoFollowPatternToRemove = patterns.get(request.getName()); - if (autoFollowPatternToRemove == null) { + if (currentAutoFollowMetadata == null || currentAutoFollowMetadata.getPatterns().get(request.getName()) == null) { throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", request.getName()); } - final Map patternsCopy = new HashMap<>(patterns); - final Map> followedLeaderIndexUUIDSCopy = new HashMap<>( - currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs() - ); - final Map> headers = new HashMap<>(currentAutoFollowMetadata.getHeaders()); - patternsCopy.remove(request.getName()); - followedLeaderIndexUUIDSCopy.remove(request.getName()); - headers.remove(request.getName()); + AutoFollowMetadata newAutoFollowMetadata = removePattern(currentAutoFollowMetadata, request.getName()); - AutoFollowMetadata newAutoFollowMetadata = new AutoFollowMetadata(patternsCopy, followedLeaderIndexUUIDSCopy, headers); - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metadata(Metadata.builder(currentState.getMetadata()).putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata).build()); - return newState.build(); + return ClusterState.builder(currentState) + .metadata(Metadata.builder(currentState.getMetadata()).putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata)) + .build(); + } + + private static AutoFollowMetadata removePattern(AutoFollowMetadata metadata, String name) { + return new AutoFollowMetadata( + Maps.copyMapWithRemovedEntry(metadata.getPatterns(), name), + Maps.copyMapWithRemovedEntry(metadata.getFollowedLeaderIndexUUIDs(), name), + Maps.copyMapWithRemovedEntry(metadata.getHeaders(), name) + ); } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index baf6b5906af74..671c920270a9f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -96,24 +96,7 @@ public void testAutoFollower() { ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -183,24 +166,7 @@ public void testAutoFollower_dataStream() { ClusterState remoteState = createRemoteClusterStateWithDataStream("logs-foobar"); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -268,24 +234,7 @@ public void testAutoFollowerClusterStateApiFailure() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -335,24 +284,7 @@ public void testAutoFollowerUpdateClusterStateFailure() { when(client.getRemoteClusterClient(anyString())).thenReturn(client); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -715,24 +647,7 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { when(client.getRemoteClusterClient(anyString())).thenReturn(client); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -790,24 +705,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa } public void testGetLeaderIndicesToFollow() { - final AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + final AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "metrics-*"); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); Metadata.Builder imdBuilder = Metadata.builder(); @@ -882,24 +780,7 @@ public void testGetLeaderIndicesToFollow() { } public void testGetLeaderIndicesToFollow_shardsNotStarted() { - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "*"); // 1 shard started and another not started: ClusterState remoteState = createRemoteClusterState("index1", true); @@ -934,24 +815,7 @@ public void testGetLeaderIndicesToFollow_shardsNotStarted() { } public void testGetLeaderIndicesToFollowWithClosedIndices() { - final AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + final AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "*"); // index is opened ClusterState remoteState = ClusterStateCreationUtils.stateWithActivePrimary("test-index", true, randomIntBetween(1, 3), 0); @@ -1090,24 +954,8 @@ public void testCleanFollowedLeaderIndicesNoEntry() { } public void testGetFollowerIndexName() { - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "metrics-*"); + assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("metrics-0")); autoFollowPattern = new AutoFollowPattern( @@ -1152,15 +1000,7 @@ public void testGetFollowerIndexName() { } public void testStats() { - AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( - Settings.EMPTY, - null, - mockClusterService(), - new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, - () -> 1L, - Runnable::run - ); + AutoFollowCoordinator autoFollowCoordinator = createAutoFollowCoordinator(); autoFollowCoordinator.updateStats(Collections.singletonList(new AutoFollowCoordinator.AutoFollowResult("_alias1"))); AutoFollowStats autoFollowStats = autoFollowCoordinator.getStats(); @@ -1244,69 +1084,9 @@ public void testUpdateAutoFollowers() { ); // Add 3 patterns: Map patterns = new HashMap<>(); - patterns.put( - "pattern1", - new AutoFollowPattern( - "remote1", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern2", - new AutoFollowPattern( - "remote2", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern3", - new AutoFollowPattern( - "remote2", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern1", createAutoFollowPattern("remote1", "logs-*")); + patterns.put("pattern2", createAutoFollowPattern("remote2", "logs-*")); + patterns.put("pattern3", createAutoFollowPattern("remote2", "metrics-*")); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() @@ -1335,27 +1115,7 @@ public void testUpdateAutoFollowers() { assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().get("remote2"), notNullValue()); assertThat(removedAutoFollower1.removed, is(true)); // Add pattern 4: - patterns.put( - "pattern4", - new AutoFollowPattern( - "remote1", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern4", createAutoFollowPattern("remote1", "metrics-*")); clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() @@ -1388,15 +1148,7 @@ public void testUpdateAutoFollowers() { } public void testUpdateAutoFollowersNoPatterns() { - AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( - Settings.EMPTY, - null, - mockClusterService(), - new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, - () -> 1L, - Runnable::run - ); + AutoFollowCoordinator autoFollowCoordinator = createAutoFollowCoordinator(); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() @@ -1411,15 +1163,7 @@ public void testUpdateAutoFollowersNoPatterns() { } public void testUpdateAutoFollowersNoAutoFollowMetadata() { - AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( - Settings.EMPTY, - null, - mockClusterService(), - new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, - () -> 1L, - Runnable::run - ); + AutoFollowCoordinator autoFollowCoordinator = createAutoFollowCoordinator(); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")).build(); autoFollowCoordinator.updateAutoFollowers(clusterState); assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(0)); @@ -1442,69 +1186,9 @@ public void testUpdateAutoFollowersNoActivePatterns() { // Add 3 patterns: Map patterns = new HashMap<>(); - patterns.put( - "pattern1", - new AutoFollowPattern( - "remote1", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern2", - new AutoFollowPattern( - "remote2", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern3", - new AutoFollowPattern( - "remote2", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern1", createAutoFollowPattern("remote1", "logs-*")); + patterns.put("pattern2", createAutoFollowPattern("remote2", "logs-*")); + patterns.put("pattern3", createAutoFollowPattern("remote2", "metrics-*")); autoFollowCoordinator.updateAutoFollowers( ClusterState.builder(new ClusterName("remote")) @@ -1587,27 +1271,7 @@ public void testUpdateAutoFollowersNoActivePatterns() { assertThat(removedAutoFollower2.removed, is(false)); // Add active pattern 4 and make pattern 2 inactive - patterns.put( - "pattern4", - new AutoFollowPattern( - "remote1", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern4", createAutoFollowPattern("remote1", "metrics-*")); patterns.computeIfPresent( "pattern2", (name, pattern) -> new AutoFollowPattern( @@ -1670,24 +1334,7 @@ public void testWaitForMetadataVersion() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -1751,24 +1398,7 @@ public void testWaitForTimeOut() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -1822,24 +1452,7 @@ public void testAutoFollowerSoftDeletesDisabled() { ClusterState remoteState = createRemoteClusterState("logs-20190101", false); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -1905,24 +1518,7 @@ public void testAutoFollowerFollowerIndexAlreadyExists() { ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -2094,27 +1690,7 @@ public void testClosedIndicesAreNotAutoFollowed() { .putCustom( AutoFollowMetadata.TYPE, new AutoFollowMetadata( - Map.of( - pattern, - new AutoFollowPattern( - "remote", - List.of("docs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ), + Map.of(pattern, createAutoFollowPattern("remote", "docs-*")), Map.of(pattern, List.of()), Map.of(pattern, Map.of()) ) @@ -2350,6 +1926,150 @@ public void testFollowerIndexIsCreatedInExecuteAutoFollow() { assertThat(autoFollowResults.v2().contains(indexName), equalTo(true)); } + public void testRemovesClusterLevelErrorsOnRemovingAutoFollowPattern() { + // given auto-follow pattern added + var pattern1 = createAutoFollowPattern("remote1", "logs-*"); + var pattern2 = createAutoFollowPattern("remote2", "logs-*"); + var pattern3 = createAutoFollowPattern("remote2", "metrics-*");// same remote + + var autoFollowCoordinator = createAutoFollowCoordinator(); + autoFollowCoordinator.updateAutoFollowers( + createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2, "pattern3", pattern3)) + ); + + // and stats are published + autoFollowCoordinator.updateStats( + List.of( + new AutoFollowCoordinator.AutoFollowResult("pattern1", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern2", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern3", new RuntimeException("ClusterStateFetchException")) + ) + ); + + // when auto-follow pattern `pattern3` is removed + var before = autoFollowCoordinator.getStats(); + autoFollowCoordinator.updateAutoFollowers(createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2))); + autoFollowCoordinator.updateStats(List.of());// actually triggers the purge + var after = autoFollowCoordinator.getStats(); + + // then stats are removed as well (but only for the removed pattern) + assertThat(before.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern1", "pattern2", "pattern3"))); + assertThat(after.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern1", "pattern2"))); + } + + public void testRemovesIndexLevelErrorsOnRemovingAutoFollowPattern() { + // given auto-follow pattern added + var pattern1 = createAutoFollowPattern("remote1", "logs-*"); + var pattern2 = createAutoFollowPattern("remote2", "logs-*"); + var pattern3 = createAutoFollowPattern("remote2", "metrics-*");// same remote + + var autoFollowCoordinator = createAutoFollowCoordinator(); + autoFollowCoordinator.updateAutoFollowers( + createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2, "pattern3", pattern3)) + ); + + // and stats are published + autoFollowCoordinator.updateStats( + List.of( + new AutoFollowCoordinator.AutoFollowResult( + "pattern1", + List.of(Tuple.tuple(new Index("logs-1", UUIDs.base64UUID()), new RuntimeException("AutoFollowExecutionException"))) + ), + new AutoFollowCoordinator.AutoFollowResult( + "pattern2", + List.of(Tuple.tuple(new Index("logs-1", UUIDs.base64UUID()), new RuntimeException("AutoFollowExecutionException"))) + ), + new AutoFollowCoordinator.AutoFollowResult( + "pattern3", + List.of(Tuple.tuple(new Index("metrics-1", UUIDs.base64UUID()), new RuntimeException("AutoFollowExecutionException"))) + ) + ) + ); + + // when auto-follow pattern `pattern3` is removed + var before = autoFollowCoordinator.getStats(); + autoFollowCoordinator.updateAutoFollowers(createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2))); + autoFollowCoordinator.updateStats(List.of());// actually triggers the purge + var after = autoFollowCoordinator.getStats(); + + // then stats are removed as well (but only for the removed pattern) + assertThat( + before.getRecentAutoFollowErrors().keySet(), + equalTo(Set.of("pattern1:logs-1", "pattern2:logs-1", "pattern3:metrics-1")) + ); + assertThat(after.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern1:logs-1", "pattern2:logs-1"))); + } + + public void testRemovesErrorsIfPatternContainsColon() { + // given auto-follow pattern added + var pattern1 = createAutoFollowPattern("remote1", "logs-*"); + var pattern2 = createAutoFollowPattern("remote2", "logs-*"); + var pattern3 = createAutoFollowPattern("remote2", "metrics-*");// same remote + + var autoFollowCoordinator = createAutoFollowCoordinator(); + autoFollowCoordinator.updateAutoFollowers( + createClusterStateWith(Map.of("pattern:1", pattern1, "pattern:2", pattern2, "pattern:3", pattern3)) + ); + + // and stats are published + autoFollowCoordinator.updateStats( + List.of( + new AutoFollowCoordinator.AutoFollowResult("pattern:1", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern:2", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern:3", new RuntimeException("ClusterStateFetchException")) + ) + ); + + // when auto-follow pattern `pattern:3` is removed + var before = autoFollowCoordinator.getStats(); + autoFollowCoordinator.updateAutoFollowers(createClusterStateWith(Map.of("pattern:1", pattern1, "pattern:2", pattern2))); + autoFollowCoordinator.updateStats(List.of());// actually triggers the purge + var after = autoFollowCoordinator.getStats(); + + // then stats are removed as well (but only for the removed pattern) + assertThat(before.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern:1", "pattern:2", "pattern:3"))); + assertThat(after.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern:1", "pattern:2"))); + } + + private AutoFollowCoordinator createAutoFollowCoordinator() { + return new AutoFollowCoordinator( + Settings.EMPTY, + null, + mockClusterService(), + new CcrLicenseChecker(() -> true, () -> false), + () -> 1L, + () -> 1L, + Runnable::run + ); + } + + private ClusterState createClusterStateWith(Map patterns) { + return ClusterState.builder(new ClusterName("remote")) + .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Map.of(), Map.of()))) + .build(); + } + + private AutoFollowPattern createAutoFollowPattern(String remoteCluster, String pattern) { + return new AutoFollowPattern( + remoteCluster, + List.of(pattern), + List.of(), + null, + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + } + private Tuple, Set> executeAutoFollow( String indexPattern, ClusterState finalRemoteState @@ -2364,27 +2084,7 @@ private Tuple, Set> execute .putCustom( AutoFollowMetadata.TYPE, new AutoFollowMetadata( - Map.of( - pattern, - new AutoFollowPattern( - "remote", - List.of(indexPattern), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ), + Map.of(pattern, createAutoFollowPattern("remote", indexPattern)), Map.of(pattern, List.of()), Map.of(pattern, Map.of()) ) From 594fe00396ed70a2cd035775badbdb5ef2aecc98 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Wed, 24 Nov 2021 13:57:32 +0000 Subject: [PATCH 05/55] Fix WatcherRestartIT (#80986) This removes the unnecessary watcher start call in the test to reduce the possibility of flakiness. The call is not needed as the Watcher plugin is active and it will install (and upgrade/remove) the templates regardless if watcher is started. --- .../test/java/org/elasticsearch/upgrades/WatcherRestartIT.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java index 54921de6b9320..d0bced071bd78 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java @@ -30,9 +30,6 @@ public void testWatcherRestart() throws Exception { } public void testEnsureWatcherDeletesLegacyTemplates() throws Exception { - client().performRequest(new Request("POST", "/_watcher/_start")); - ensureWatcherStarted(); - if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { // legacy index template created in previous releases should not be present anymore assertBusy(() -> { From 99df2fbff456f0c8b3ad35f71899174bffacd107 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Wed, 24 Nov 2021 16:09:20 +0100 Subject: [PATCH 06/55] Fix flood stage with system indices (#80674) System indices do not allow setting changes. This made flood stage handling no longer work for nodes that host a shard of a system index. Now pass in an origin to ensure ES can mark indices located on a node above flood stage as "read-only / allow-delete", regardless of whether it is a system index or not. --- .../allocation/DiskThresholdMonitorIT.java | 82 +++++++++++++++++++ .../put/UpdateSettingsRequestBuilder.java | 8 ++ .../allocation/DiskThresholdMonitor.java | 1 + 3 files changed, 91 insertions(+) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java new file mode 100644 index 0000000000000..917742010e9be --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.DiskUsageIntegTestCase; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Locale; + +import static org.elasticsearch.index.store.Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase { + + private static final long FLOODSTAGE_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes(); + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES * 2 + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES * 2 + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") + .build(); + } + + public void testFloodStageExceeded() throws Exception { + internalCluster().startMasterOnlyNode(); + final String dataNodeName = internalCluster().startDataOnlyNode(); + + final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance( + ClusterInfoService.class + ); + + final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms") + .build() + ); + // ensure we have a system index on the data node too. + assertAcked(client().admin().indices().prepareCreate(TaskResultsService.TASK_INDEX)); + + getTestFileStore(dataNodeName).setTotalSpace(1L); + refreshClusterInfo(); + assertBusy(() -> { + assertBlocked( + client().prepareIndex().setIndex(indexName).setId("1").setSource("f", "g"), + IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK + ); + assertThat( + client().admin() + .indices() + .prepareGetSettings(indexName) + .setNames(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE) + .get() + .getSetting(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), + equalTo("true") + ); + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 1e64c26f91b49..0ec646dcddd8e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -83,4 +83,12 @@ public UpdateSettingsRequestBuilder setPreserveExisting(boolean preserveExisting request.setPreserveExisting(preserveExisting); return this; } + + /** + * Sets the origin to use, only set this when the settings update is requested by ES internal processes. + */ + public UpdateSettingsRequestBuilder origin(String origin) { + request.origin(origin); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index c5fb7d1307002..868d96c1c098c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -453,6 +453,7 @@ protected void updateIndicesReadOnly(Set indicesToUpdate, ActionListener .indices() .prepareUpdateSettings(indicesToUpdate.toArray(Strings.EMPTY_ARRAY)) .setSettings(readOnlySettings) + .origin("disk-threshold-monitor") .execute(wrappedListener.map(r -> null)); } From 9548bdbbbb85ccf9261ea0226a4d3161eb4ed346 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 24 Nov 2021 17:20:22 +0100 Subject: [PATCH 07/55] Fix flaky DynamicMappingIT testDynamicRuntimeObjectFields (#80999) The final part of this test checks that we throw a MapperParsingException when we try to index into a dynamic runtime object that has already been mapped to a different type. Under very rare circumstances this can fail when the mapping update that a previous document index operation has triggered hasn't been completely applied on the shard the second document is targeted at. In this case, indexing the second document can itself trigger a mapping merge operation that can fail with a different exception type (IAE) with a very similar message. In order to simplify the test and make it more robust we can use the same document id for both index requests, making sure we target the same shard group. Closes #80722 --- .../java/org/elasticsearch/index/mapper/DynamicMappingIT.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 38b0bed97457d..6bc6accbfa76f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -447,10 +447,13 @@ public void testDynamicRuntimeObjectFields() { ); // the parent object has been mapped dynamic:true, hence the field gets indexed + // we use a fixed doc id here to make sure this document and the one we sent later with a conflicting type + // target the same shard where we are sure the mapping update has been applied assertEquals( RestStatus.CREATED, client().prepareIndex("test") .setSource("obj.runtime.dynamic.number", 1) + .setId("id") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get() .status() From 537d5cf940ee5fad71863382a8ece541b49176a1 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 24 Nov 2021 19:48:39 +0200 Subject: [PATCH 08/55] Rename `BaseTasksRequest.parentTaskId` to `targetParentTaskId` (#80908) `BaseTasksRequest` is a subclass of `TransportRequest`. Both of them contain a member variable `TaskId parentTaskId`. `TransportRequest` provides a setter for its member called `setParentTask` while `BaseTasksRequest` provides `setParentTaskId`. This is very confusing as it can easily lead to mistakes where the one is called in a place where the other should have been called. In particular, the `TransportRequest.parentTaskId` holds the task id of the parent task. `BaseTasksRequest.parentTaskId` holds the id of a task which is used to filter the tasks which are matched for that tasks request. This commit renames `BaseTasksRequest.parentTaskId` to `targetParentTaskId` to disambiguate the two concepts and prevent confusion and mistakes. It also renames `BaseTasksRequest.taskId` to `targetTaskId` to match. Co-authored-by: Elastic Machine --- .../client/TasksRequestConverters.java | 6 +- .../client/TasksRequestConvertersTests.java | 6 +- .../TasksClientDocumentationIT.java | 2 +- .../documentation/ReindexDocumentationIT.java | 4 +- .../reindex/RestRethrottleAction.java | 2 +- .../reindex/TransportRethrottleAction.java | 2 +- .../elasticsearch/reindex/CancelTests.java | 6 +- .../reindex/RethrottleTests.java | 4 +- .../elasticsearch/reindex/RoundTripTests.java | 4 +- .../TransportRethrottleActionTests.java | 2 +- .../node/tasks/CancellableTasksIT.java | 16 ++--- .../persistent/PersistentTasksExecutorIT.java | 29 ++++++-- .../search/SearchCancellationIT.java | 6 +- .../search/ccs/CrossClusterSearchIT.java | 2 +- .../node/tasks/cancel/CancelTasksRequest.java | 8 +-- .../cancel/TransportCancelTasksAction.java | 12 ++-- .../action/search/SearchTransportService.java | 2 +- .../support/tasks/BaseTasksRequest.java | 70 ++++++++++++++----- .../support/tasks/TasksRequestBuilder.java | 8 +-- .../support/tasks/TransportTasksAction.java | 12 ++-- .../persistent/PersistentTasksService.java | 2 +- .../action/RestCancellableNodeClient.java | 2 +- .../admin/cluster/RestCancelTasksAction.java | 4 +- .../admin/cluster/RestListTasksAction.java | 2 +- .../node/tasks/CancellableTasksTests.java | 14 ++-- .../node/tasks/TransportTasksActionTests.java | 4 +- .../tasks/cancel/CancelTasksRequestTests.java | 8 +-- .../RestCancellableNodeClientTests.java | 2 +- .../xpack/search/AsyncSearchTask.java | 2 +- .../AbstractEqlBlockingIntegTestCase.java | 2 +- .../ml/action/TransportDeleteJobAction.java | 2 +- .../ml/dataframe/steps/ReindexingStep.java | 2 +- .../AbstractSqlBlockingIntegTestCase.java | 2 +- 33 files changed, 151 insertions(+), 100 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java index a9c9df2c451e9..54525a8cd304d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java @@ -34,15 +34,15 @@ static Request cancelTasks(CancelTasksRequest req) { } static Request listTasks(ListTasksRequest listTaskRequest) { - if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { - throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); + if (listTaskRequest.getTargetTaskId() != null && listTaskRequest.getTargetTaskId().isSet()) { + throw new IllegalArgumentException("TargetTaskId cannot be used for list tasks request"); } Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(listTaskRequest.getTimeout()) .withDetailed(listTaskRequest.getDetailed()) .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) - .withParentTaskId(listTaskRequest.getParentTaskId()) + .withParentTaskId(listTaskRequest.getTargetParentTaskId()) .withNodes(listTaskRequest.getNodes()) .withActions(listTaskRequest.getActions()) .putParam("group_by", "none"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java index 7b115b1f4c6f1..2b92e924d1062 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java @@ -72,7 +72,7 @@ public void testListTasks() { if (randomBoolean()) { if (randomBoolean()) { TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); - request.setParentTaskId(taskId); + request.setTargetParentTaskId(taskId); expectedParams.put("parent_task_id", taskId.toString()); } else { request.setParentTask(TaskId.EMPTY_TASK_ID); @@ -102,12 +102,12 @@ public void testListTasks() { } { ListTasksRequest request = new ListTasksRequest(); - request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); + request.setTargetTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> TasksRequestConverters.listTasks(request) ); - assertEquals("TaskId cannot be used for list tasks request", exception.getMessage()); + assertEquals("TargetTaskId cannot be used for list tasks request", exception.getMessage()); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java index c7d434def0dae..2a541255409f5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -67,7 +67,7 @@ public void testListTasks() throws IOException { // tag::list-tasks-request-filter request.setActions("cluster:*"); // <1> request.setNodes("nodeId1", "nodeId2"); // <2> - request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3> + request.setTargetParentTaskId(new TaskId("parentTaskId", 42)); // <3> // end::list-tasks-request-filter // tag::list-tasks-request-detailed diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index b5a200afc78a3..ae7f95bc43ae7 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -217,13 +217,13 @@ public void testTasks() throws Exception { .setActions(UpdateByQueryAction.NAME).get().getTasks(); // Cancel a specific update-by-query request client.admin().cluster().prepareCancelTasks() - .setTaskId(taskId).get().getTasks(); + .setTargetTaskId(taskId).get().getTasks(); // end::update-by-query-cancel-task } { // tag::update-by-query-rethrottle new RethrottleRequestBuilder(client, RethrottleAction.INSTANCE) - .setTaskId(taskId) + .setTargetTaskId(taskId) .setRequestsPerSecond(2.0f) .get(); // end::update-by-query-rethrottle diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java index 8605a210aec55..c046e3c3aba39 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java @@ -44,7 +44,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { RethrottleRequest internalRequest = new RethrottleRequest(); - internalRequest.setTaskId(new TaskId(request.param("taskId"))); + internalRequest.setTargetTaskId(new TaskId(request.param("taskId"))); Float requestsPerSecond = AbstractBaseReindexRestHandler.parseRequestsPerSecond(request); if (requestsPerSecond == null) { throw new IllegalArgumentException("requests_per_second is a required parameter"); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java index d7ad28381cf63..2fd2c5f93d3ee 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java @@ -93,7 +93,7 @@ private static void rethrottleParentTask( if (runningSubtasks > 0) { RethrottleRequest subRequest = new RethrottleRequest(); subRequest.setRequestsPerSecond(newRequestsPerSecond / runningSubtasks); - subRequest.setParentTaskId(new TaskId(localNodeId, task.getId())); + subRequest.setTargetParentTaskId(new TaskId(localNodeId, task.getId())); logger.debug("rethrottling children of task [{}] to [{}] requests per second", task.getId(), subRequest.getRequestsPerSecond()); client.execute(RethrottleAction.INSTANCE, subRequest, ActionListener.wrap(r -> { r.rethrowFailures("Rethrottle"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java index be62bc0205b39..75a72ec34571a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java @@ -139,7 +139,7 @@ private void testCancel( // Cancel the request while the action is blocked by the indexing operation listeners. // This will prevent further requests from being sent. - ListTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTaskId(mainTask.getTaskId()).get(); + ListTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTargetTaskId(mainTask.getTaskId()).get(); cancelTasksResponse.rethrowFailures("Cancel"); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); @@ -155,7 +155,7 @@ private void testCancel( ListTasksResponse sliceList = client().admin() .cluster() .prepareListTasks() - .setParentTaskId(mainTask.getTaskId()) + .setTargetParentTaskId(mainTask.getTaskId()) .setDetailed(true) .get(); sliceList.rethrowFailures("Fetch slice tasks"); @@ -193,7 +193,7 @@ private void testCancel( String tasks = client().admin() .cluster() .prepareListTasks() - .setParentTaskId(mainTask.getTaskId()) + .setTargetParentTaskId(mainTask.getTaskId()) .setDetailed(true) .get() .toString(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java index b3be206e5c5c0..99ceb5087bff0 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java @@ -108,7 +108,7 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a .getTask() .getStatus(); long finishedSubTasks = parent.getSliceStatuses().stream().filter(Objects::nonNull).count(); - ListTasksResponse list = client().admin().cluster().prepareListTasks().setParentTaskId(taskToRethrottle).get(); + ListTasksResponse list = client().admin().cluster().prepareListTasks().setTargetParentTaskId(taskToRethrottle).get(); list.rethrowFailures("subtasks"); assertThat(finishedSubTasks + list.getTasks().size(), greaterThanOrEqualTo((long) numSlices)); assertThat(list.getTasks().size(), greaterThan(0)); @@ -192,7 +192,7 @@ private ListTasksResponse rethrottleTask(TaskId taskToRethrottle, float newReque assertBusy(() -> { try { - ListTasksResponse rethrottleResponse = rethrottle().setTaskId(taskToRethrottle) + ListTasksResponse rethrottleResponse = rethrottle().setTargetTaskId(taskToRethrottle) .setRequestsPerSecond(newRequestsPerSecond) .get(); rethrottleResponse.rethrowFailures("Rethrottle"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java index 073c0a68a5400..cacc829654f63 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java @@ -174,12 +174,12 @@ public void testRethrottleRequest() throws IOException { if (randomBoolean()) { request.setActions(randomFrom(UpdateByQueryAction.NAME, ReindexAction.NAME)); } else { - request.setTaskId(new TaskId(randomAlphaOfLength(5), randomLong())); + request.setTargetTaskId(new TaskId(randomAlphaOfLength(5), randomLong())); } RethrottleRequest tripped = new RethrottleRequest(toInputByteStream(request)); assertEquals(request.getRequestsPerSecond(), tripped.getRequestsPerSecond(), 0.00001); assertArrayEquals(request.getActions(), tripped.getActions()); - assertEquals(request.getTaskId(), tripped.getTaskId()); + assertEquals(request.getTargetTaskId(), tripped.getTargetTaskId()); } private StreamInput toInputByteStream(Writeable example) throws IOException { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java index 1359dd394e51e..902a32ae75cbe 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java @@ -75,7 +75,7 @@ private void rethrottleTestCase( if (runningSlices > 0) { verify(client).execute(eq(RethrottleAction.INSTANCE), subRequest.capture(), subListener.capture()); - assertEquals(new TaskId(localNodeId, task.getId()), subRequest.getValue().getParentTaskId()); + assertEquals(new TaskId(localNodeId, task.getId()), subRequest.getValue().getTargetParentTaskId()); assertEquals(newRequestsPerSecond / runningSlices, subRequest.getValue().getRequestsPerSecond(), 0.00001f); simulator.accept(subListener.getValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 1c646fb9beebd..4de462ee96a73 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -167,7 +167,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(rootTaskId) + .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); if (randomBoolean()) { @@ -179,7 +179,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { .get() .getTasks(); for (TaskInfo subTask : randomSubsetOf(runningTasks)) { - client().admin().cluster().prepareCancelTasks().setTaskId(subTask.getTaskId()).waitForCompletion(false).get(); + client().admin().cluster().prepareCancelTasks().setTargetTaskId(subTask.getTaskId()).waitForCompletion(false).get(); } } try { @@ -217,13 +217,13 @@ public void testCancelTaskMultipleTimes() throws Exception { ActionFuture mainTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); allowPartialRequest(rootRequest); - CancelTasksResponse resp = client().admin().cluster().prepareCancelTasks().setTaskId(taskId).waitForCompletion(false).get(); + CancelTasksResponse resp = client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); assertThat(resp.getTaskFailures(), empty()); assertThat(resp.getNodeFailures(), empty()); ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(true) .execute(); assertFalse(cancelFuture.isDone()); @@ -234,7 +234,7 @@ public void testCancelTaskMultipleTimes() throws Exception { CancelTasksResponse cancelError = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(randomBoolean()) .get(); assertThat(cancelError.getNodeFailures(), hasSize(1)); @@ -255,7 +255,7 @@ public void testDoNotWaitForCompletion() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(waitForCompletion) .execute(); if (waitForCompletion) { @@ -274,7 +274,7 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { TestRequest rootRequest = generateTestRequest(nodes, 0, between(1, 3)); ActionFuture rootTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); - client().admin().cluster().prepareCancelTasks().setTaskId(taskId).waitForCompletion(false).get(); + client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); DiscoveryNode nodeWithParentTask = nodes.stream().filter(n -> n.getId().equals(taskId.getNodeId())).findFirst().get(); TransportTestAction mainAction = internalCluster().getInstance(TransportTestAction.class, nodeWithParentTask.getName()); PlainActionFuture future = new PlainActionFuture<>(); @@ -323,7 +323,7 @@ public void testRemoveBanParentsOnDisconnect() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(rootTaskId) + .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 8fada58de5c4a..685fae6114760 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -89,7 +89,11 @@ public void testPersistentActionFailure() throws Exception { logger.info("Failing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("fail").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("fail") + .setTargetTaskId(firstRunningTask.getTaskId()) + .get() + .getTasks() + .size(), equalTo(1) ); @@ -255,7 +259,7 @@ public void testPersistentActionStatusUpdate() throws Exception { // Complete the running task and make sure it finishes properly assertThat( new TestTasksRequestBuilder(client()).setOperation("update_status") - .setTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.getTaskId()) .get() .getTasks() .size(), @@ -296,7 +300,11 @@ public void testPersistentActionStatusUpdate() throws Exception { logger.info("Completing the running task"); // Complete the running task and make sure it finishes properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("finish") + .setTargetTaskId(firstRunningTask.getTaskId()) + .get() + .getTasks() + .size(), equalTo(1) ); @@ -327,7 +335,11 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { logger.info("Completing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("finish") + .setTargetTaskId(firstRunningTask.getTaskId()) + .get() + .getTasks() + .size(), equalTo(1) ); @@ -435,7 +447,7 @@ public void testAbortLocally() throws Exception { assertThat( new TestTasksRequestBuilder(client()).setOperation("abort_locally") - .setTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.getTaskId()) .get() .getTasks() .size(), @@ -494,12 +506,15 @@ private void stopOrCancelTask(TaskId taskId) { if (randomBoolean()) { logger.info("Completing the running task"); // Complete the running task and make sure it finishes properly - assertThat(new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(taskId).get().getTasks().size(), equalTo(1)); + assertThat( + new TestTasksRequestBuilder(client()).setOperation("finish").setTargetTaskId(taskId).get().getTasks().size(), + equalTo(1) + ); } else { logger.info("Cancelling the running task"); // Cancel the running task and make sure it finishes properly - assertThat(client().admin().cluster().prepareCancelTasks().setTaskId(taskId).get().getTasks().size(), equalTo(1)); + assertThat(client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).get().getTasks().size(), equalTo(1)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index 39b7e5df5f319..33ddf58122a20 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -131,7 +131,11 @@ private void cancelSearch(String action) { TaskInfo searchTask = listTasksResponse.getTasks().get(0); logger.info("Cancelling search"); - CancelTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTaskId(searchTask.getTaskId()).get(); + CancelTasksResponse cancelTasksResponse = client().admin() + .cluster() + .prepareCancelTasks() + .setTargetTaskId(searchTask.getTaskId()) + .get(); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); assertThat(cancelTasksResponse.getTasks().get(0).getTaskId(), equalTo(searchTask.getTaskId())); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index bb25fbe01771f..312c5ae220c1c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -213,7 +213,7 @@ public void testCancel() throws Exception { .filter(t -> t.getParentTaskId().isSet() == false) .findFirst() .get(); - final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTaskId(rootTask.getTaskId()); + final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTask.getTaskId()); cancelRequest.setWaitForCompletion(randomBoolean()); final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); assertBusy(() -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index df0db7d81e94c..d3596f2a652bf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -81,10 +81,10 @@ public String getDescription() { + reason + "], waitForCompletion[" + waitForCompletion - + "], taskId[" - + getTaskId() - + "], parentTaskId[" - + getParentTaskId() + + "], targetTaskId[" + + getTargetTaskId() + + "], targetParentTaskId[" + + getTargetParentTaskId() + "], nodes" + Arrays.toString(getNodes()) + ", actions" diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index cebdb6266f253..339d3d2e17e75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -60,21 +60,21 @@ protected CancelTasksResponse newResponse( } protected void processTasks(CancelTasksRequest request, Consumer operation) { - if (request.getTaskId().isSet()) { + if (request.getTargetTaskId().isSet()) { // we are only checking one task, we can optimize it - CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId()); + CancellableTask task = taskManager.getCancellableTask(request.getTargetTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept(task); } else { - throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation"); + throw new IllegalArgumentException("task [" + request.getTargetTaskId() + "] doesn't support this operation"); } } else { - if (taskManager.getTask(request.getTaskId().getId()) != null) { + if (taskManager.getTask(request.getTargetTaskId().getId()) != null) { // The task exists, but doesn't support cancellation - throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation"); + throw new IllegalArgumentException("task [" + request.getTargetTaskId() + "] doesn't support cancellation"); } else { - throw new ResourceNotFoundException("task [{}] is not found", request.getTaskId()); + throw new ResourceNotFoundException("task [{}] is not found", request.getTargetTaskId()); } } } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 00c74fcad4d26..e00fa8155d1ef 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -668,7 +668,7 @@ private boolean assertNodePresent() { } public void cancelSearchTask(SearchTask task, String reason) { - CancelTasksRequest req = new CancelTasksRequest().setTaskId(new TaskId(client.getLocalNodeId(), task.getId())) + CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(new TaskId(client.getLocalNodeId(), task.getId())) .setReason("Fatal failure during search: " + reason); // force the origin to execute the cancellation as a system user new OriginSettingClient(client, GetTaskAction.TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.wrap(() -> {})); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java index ad3e95836572f..1f71e4d1f6ff6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java @@ -38,9 +38,9 @@ public class BaseTasksRequest> extends private String[] actions = ALL_ACTIONS; - private TaskId parentTaskId = TaskId.EMPTY_TASK_ID; + private TaskId targetParentTaskId = TaskId.EMPTY_TASK_ID; - private TaskId taskId = TaskId.EMPTY_TASK_ID; + private TaskId targetTaskId = TaskId.EMPTY_TASK_ID; // NOTE: This constructor is only needed, because the setters in this class, // otherwise it can be removed and above fields can be made final. @@ -48,8 +48,8 @@ public BaseTasksRequest() {} protected BaseTasksRequest(StreamInput in) throws IOException { super(in); - taskId = TaskId.readFromStream(in); - parentTaskId = TaskId.readFromStream(in); + targetTaskId = TaskId.readFromStream(in); + targetParentTaskId = TaskId.readFromStream(in); nodes = in.readStringArray(); actions = in.readStringArray(); timeout = in.readOptionalTimeValue(); @@ -58,8 +58,8 @@ protected BaseTasksRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - taskId.writeTo(out); - parentTaskId.writeTo(out); + targetTaskId.writeTo(out); + targetParentTaskId.writeTo(out); out.writeStringArrayNullable(nodes); out.writeStringArrayNullable(actions); out.writeOptionalTimeValue(timeout); @@ -68,7 +68,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (taskId.isSet() && nodes.length > 0) { + if (targetTaskId.isSet() && nodes.length > 0) { validationException = addValidationError("task id cannot be used together with node ids", validationException); } return validationException; @@ -105,29 +105,61 @@ public final Request setNodes(String... nodes) { * * By default tasks with any ids are returned. */ - public TaskId getTaskId() { - return taskId; + public TaskId getTargetTaskId() { + return targetTaskId; } @SuppressWarnings("unchecked") - public final Request setTaskId(TaskId taskId) { - this.taskId = taskId; + public final Request setTargetTaskId(TaskId targetTaskId) { + this.targetTaskId = targetTaskId; return (Request) this; } + /** + * @deprecated Use {@link #getTargetTaskId()} + */ + @Deprecated + public TaskId getTaskId() { + return getTargetTaskId(); + } + + /** + * @deprecated Use {@link #setTargetTaskId(TaskId)} + */ + @Deprecated + public final Request setTaskId(TaskId taskId) { + return setTargetTaskId(taskId); + } + /** * Returns the parent task id that tasks should be filtered by */ - public TaskId getParentTaskId() { - return parentTaskId; + public TaskId getTargetParentTaskId() { + return targetParentTaskId; } @SuppressWarnings("unchecked") - public Request setParentTaskId(TaskId parentTaskId) { - this.parentTaskId = parentTaskId; + public Request setTargetParentTaskId(TaskId targetParentTaskId) { + this.targetParentTaskId = targetParentTaskId; return (Request) this; } + /** + * @deprecated Use {@link #getTargetParentTaskId()} + */ + @Deprecated + public TaskId getParentTaskId() { + return getTargetParentTaskId(); + } + + /** + * @deprecated Use {@link #setTargetParentTaskId(TaskId)} + */ + @Deprecated + public Request setParentTaskId(TaskId parentTaskId) { + return setTargetParentTaskId(parentTaskId); + } + public TimeValue getTimeout() { return this.timeout; } @@ -148,13 +180,13 @@ public boolean match(Task task) { if (CollectionUtils.isEmpty(getActions()) == false && Regex.simpleMatch(getActions(), task.getAction()) == false) { return false; } - if (getTaskId().isSet()) { - if (getTaskId().getId() != task.getId()) { + if (getTargetTaskId().isSet()) { + if (getTargetTaskId().getId() != task.getId()) { return false; } } - if (parentTaskId.isSet()) { - if (parentTaskId.equals(task.getParentTaskId()) == false) { + if (targetParentTaskId.isSet()) { + if (targetParentTaskId.equals(task.getParentTaskId()) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java index e283ecc34e4eb..c827e3b55deee 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java @@ -29,8 +29,8 @@ protected TasksRequestBuilder(ElasticsearchClient client, ActionType a * Set the task to lookup. */ @SuppressWarnings("unchecked") - public final RequestBuilder setTaskId(TaskId taskId) { - request.setTaskId(taskId); + public final RequestBuilder setTargetTaskId(TaskId taskId) { + request.setTargetTaskId(taskId); return (RequestBuilder) this; } @@ -56,8 +56,8 @@ public final RequestBuilder setTimeout(TimeValue timeout) { * Match all children of the provided task. */ @SuppressWarnings("unchecked") - public final RequestBuilder setParentTaskId(TaskId taskId) { - request.setParentTaskId(taskId); + public final RequestBuilder setTargetParentTaskId(TaskId taskId) { + request.setTargetParentTaskId(taskId); return (RequestBuilder) this; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index a3550a7d9f93f..445274896ada3 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -146,8 +146,8 @@ protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) { } protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) { - if (request.getTaskId().isSet()) { - return new String[] { request.getTaskId().getNodeId() }; + if (request.getTargetTaskId().isSet()) { + return new String[] { request.getTargetTaskId().getNodeId() }; } else { return clusterState.nodes().resolveNodes(request.getNodes()); } @@ -155,17 +155,17 @@ protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) @SuppressWarnings("unchecked") protected void processTasks(TasksRequest request, Consumer operation) { - if (request.getTaskId().isSet()) { + if (request.getTargetTaskId().isSet()) { // we are only checking one task, we can optimize it - Task task = taskManager.getTask(request.getTaskId().getId()); + Task task = taskManager.getTask(request.getTargetTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept((OperationTask) task); } else { - throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId()); + throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTargetTaskId()); } } else { - throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId()); + throw new ResourceNotFoundException("task [{}] is missing", request.getTargetTaskId()); } } else { for (Task task : taskManager.getTasks().values()) { diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index aa4866294f5a3..5da8f570eacfc 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -92,7 +92,7 @@ public void sendCompletionRequest( */ void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { CancelTasksRequest request = new CancelTasksRequest(); - request.setTaskId(new TaskId(clusterService.localNode().getId(), taskId)); + request.setTargetTaskId(new TaskId(clusterService.localNode().getId(), taskId)); request.setReason(reason); try { client.admin().cluster().cancelTasks(request, listener); diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java b/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java index deb76bfdd5271..70217c057a7a7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java @@ -104,7 +104,7 @@ public void onFailure(Exception e) { } private void cancelTask(TaskId taskId) { - CancelTasksRequest req = new CancelTasksRequest().setTaskId(taskId).setReason("http channel [" + httpChannel + "] closed"); + CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(taskId).setReason("http channel [" + httpChannel + "] closed"); // force the origin to execute the cancellation as a system user new OriginSettingClient(client, TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.wrap(() -> {})); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java index 5a0100a072ce6..d8bba544eb395 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java @@ -49,10 +49,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final String groupBy = request.param("group_by", "nodes"); CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); - cancelTasksRequest.setTaskId(taskId); + cancelTasksRequest.setTargetTaskId(taskId); cancelTasksRequest.setNodes(nodesIds); cancelTasksRequest.setActions(actions); - cancelTasksRequest.setParentTaskId(parentTaskId); + cancelTasksRequest.setTargetParentTaskId(parentTaskId); cancelTasksRequest.setWaitForCompletion(request.paramAsBoolean("wait_for_completion", cancelTasksRequest.waitForCompletion())); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index 2fd080204c1cb..20a7687e0fae1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -69,7 +69,7 @@ public static ListTasksRequest generateListTasksRequest(RestRequest request) { listTasksRequest.setNodes(nodes); listTasksRequest.setDetailed(detailed); listTasksRequest.setActions(actions); - listTasksRequest.setParentTaskId(parentTaskId); + listTasksRequest.setTargetParentTaskId(parentTaskId); listTasksRequest.setWaitForCompletion(waitForCompletion); listTasksRequest.setTimeout(timeout); return listTasksRequest; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index ad7166ef9b044..aa86f3d0a0414 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -282,7 +282,7 @@ public void onFailure(Exception e) { // Cancel main task CancelTasksRequest request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); + request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, @@ -316,7 +316,7 @@ public void onFailure(Exception e) { // Make sure that tasks are no longer running ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) + new ListTasksRequest().setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) ); assertEquals(0, listTasksResponse.getTasks().size()); @@ -354,7 +354,7 @@ public void onFailure(Exception e) { // Cancel all child tasks without cancelling the main task, which should quit on its own CancelTasksRequest request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setParentTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); + request.setTargetParentTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportCancelTasksAction, @@ -373,7 +373,7 @@ public void onFailure(Exception e) { // Make sure that main task is no longer running ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) + new ListTasksRequest().setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) ); assertEquals(0, listTasksResponse.getTasks().size()); }); @@ -460,7 +460,7 @@ public void onFailure(Exception e) { // Make sure that tasks are running ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setParentTaskId(new TaskId(mainNode, mainTask.getId())) + new ListTasksRequest().setTargetParentTaskId(new TaskId(mainNode, mainTask.getId())) ); assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size())); @@ -474,7 +474,7 @@ public void onFailure(Exception e) { // Simulate issuing cancel request on the node that is about to leave the cluster CancelTasksRequest request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); + request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster"); @@ -502,7 +502,7 @@ public void onFailure(Exception e) { // Make sure that tasks are no longer running ListTasksResponse listTasksResponse1 = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setTaskId(new TaskId(mainNode, mainTask.getId())) + new ListTasksRequest().setTargetTaskId(new TaskId(mainNode, mainTask.getId())) ); assertEquals(0, listTasksResponse1.getTasks().size()); }); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index a87ed2331de9f..d982e83487cd7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -428,7 +428,7 @@ public void testFindChildTasks() throws Exception { // Find tasks with common parent listTasksRequest = new ListTasksRequest(); - listTasksRequest.setParentTaskId(new TaskId(parentNode, parentTaskId)); + listTasksRequest.setTargetParentTaskId(new TaskId(parentNode, parentTaskId)); response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); assertEquals(testNodes.length, response.getTasks().size()); for (TaskInfo task : response.getTasks()) { @@ -506,7 +506,7 @@ public void testCancellingTasksThatDontSupportCancellation() throws Exception { // Try to cancel main task using id request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), task.getId())); + request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), task.getId())); response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, request diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java index 73916abfcdf1a..df0f1b8b99a11 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java @@ -20,11 +20,11 @@ public void testGetDescription() { CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); cancelTasksRequest.setActions("action1", "action2"); cancelTasksRequest.setNodes("node1", "node2"); - cancelTasksRequest.setTaskId(new TaskId("node1", 1)); - cancelTasksRequest.setParentTaskId(new TaskId("node1", 0)); + cancelTasksRequest.setTargetTaskId(new TaskId("node1", 1)); + cancelTasksRequest.setTargetParentTaskId(new TaskId("node1", 0)); assertEquals( - "reason[by user request], waitForCompletion[false], taskId[node1:1], " - + "parentTaskId[node1:0], nodes[node1, node2], actions[action1, action2]", + "reason[by user request], waitForCompletion[false], targetTaskId[node1:1], " + + "targetParentTaskId[node1:0], nodes[node1, node2], actions[action1, action2]", cancelTasksRequest.getDescription() ); Task task = cancelTasksRequest.createTask(1, "type", "action", null, Collections.emptyMap()); diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java index a94c03adade83..f50d2fc1f17b3 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java @@ -170,7 +170,7 @@ public Task exe switch (action.name()) { case CancelTasksAction.NAME: CancelTasksRequest cancelTasksRequest = (CancelTasksRequest) request; - assertTrue("tried to cancel the same task more than once", cancelledTasks.add(cancelTasksRequest.getTaskId())); + assertTrue("tried to cancel the same task more than once", cancelledTasks.add(cancelTasksRequest.getTargetTaskId())); Task task = request.createTask(counter.getAndIncrement(), "cancel_task", action.name(), null, Collections.emptyMap()); if (randomBoolean()) { listener.onResponse(null); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index d7027859a9107..24a92005609b5 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -146,7 +146,7 @@ public void cancelTask(TaskManager taskManager, Runnable runnable, String reason */ public void cancelTask(Runnable runnable, String reason) { if (isCancelled() == false && isCancelling.compareAndSet(false, true)) { - CancelTasksRequest req = new CancelTasksRequest().setTaskId(searchId.getTaskId()).setReason(reason); + CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(searchId.getTaskId()).setReason(reason); client.admin().cluster().cancelTasks(req, new ActionListener<>() { @Override public void onResponse(CancelTasksResponse cancelTasksResponse) { diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java index 0c3361baf843b..ae27e100b0a6d 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java @@ -258,7 +258,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTaskId(taskId).get(); + CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).getAction(), equalTo(action)); logger.trace("Task is cancelled " + taskId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index d0111ab260480..da59bf43aa244 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -361,7 +361,7 @@ private void cancelResetTaskIfExists(String jobId, ActionListener liste CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); cancelTasksRequest.setReason("deleting job"); cancelTasksRequest.setActions(ResetJobAction.NAME); - cancelTasksRequest.setTaskId(job.getBlocked().getTaskId()); + cancelTasksRequest.setTargetTaskId(job.getBlocked().getTaskId()); executeAsyncWithOrigin( client, ML_ORIGIN, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java index a93ff43a4dfa3..2d3f9e724053c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java @@ -267,7 +267,7 @@ public void cancel(String reason, TimeValue timeout) { LOGGER.debug("[{}] Cancelling reindex task [{}]", config.getId(), reindexTaskId); CancelTasksRequest cancelReindex = new CancelTasksRequest(); - cancelReindex.setTaskId(reindexTaskId); + cancelReindex.setTargetTaskId(reindexTaskId); cancelReindex.setReason(reason); cancelReindex.setTimeout(timeout); diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java index 42ce6a30877b4..8a3f0e0c21b97 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java @@ -271,7 +271,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTaskId(taskId).get(); + CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).getAction(), equalTo(action)); logger.trace("Task is cancelled " + taskId); From d1af86cfdd770b6079d51b0421bfd377ae0c7d69 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 24 Nov 2021 10:09:45 -0800 Subject: [PATCH 09/55] [DOCS] Fixes start and stop trained model deployment APIs (#80978) --- .../apis/get-dfanalytics-stats.asciidoc | 2 +- .../apis/get-dfanalytics.asciidoc | 2 +- .../start-trained-model-deployment.asciidoc | 40 ++++++++++--------- .../apis/stop-dfanalytics.asciidoc | 2 +- .../stop-trained-model-deployment.asciidoc | 14 ++++--- docs/reference/ml/ml-shared.asciidoc | 33 ++++++++++----- 6 files changed, 56 insertions(+), 37 deletions(-) diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index 5efa48b559e9d..b18899cc86558 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -43,7 +43,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-data-frame-analytics-def `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-dfa-jobs] `from`:: (Optional, integer) diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index 21228d20ced4e..c225676b8ebc9 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -54,7 +54,7 @@ You can get information for all {dfanalytics-jobs} by using _all, by specifying `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-dfa-jobs] `from`:: (Optional, integer) diff --git a/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc index a05e01d26c1ca..9fbec6d026e32 100644 --- a/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc @@ -6,6 +6,8 @@ Start trained model deployment ++++ +experimental::[] + Starts a new trained model deployment. [[start-trained-model-deployment-request]] @@ -34,25 +36,6 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] [[start-trained-model-deployment-query-params]] == {api-query-parms-title} -`timeout`:: -(Optional, time) -Controls the amount of time to wait for the model to deploy. Defaults -to 20 seconds. - -`wait_for`:: -(Optional, string) -Specifies the allocation status to wait for before returning. Defaults to -`started`. The value `starting` indicates deployment is starting but not yet on -any node. The value `started` indicates the model has started on at least one -node. The value `fully_allocated` indicates the deployment has started on all -valid nodes. - -`model_threads`:: -(Optional, integer) -Indicates how many threads are used when sending inference requests to -the model. Increasing this value generally increases the throughput. Defaults to -1. - `inference_threads`:: (Optional, integer) Sets the number of threads used by the inference process. This generally increases @@ -61,12 +44,31 @@ greater than the number of available CPU cores on the machine does not increase inference speed. Defaults to 1. +`model_threads`:: +(Optional, integer) +Indicates how many threads are used when sending inference requests to +the model. Increasing this value generally increases the throughput. Defaults to +1. + `queue_capacity`:: (Optional, integer) Controls how many inference requests are allowed in the queue at a time. Once the number of requests exceeds this value, new requests are rejected with a 429 error. Defaults to 1024. +`timeout`:: +(Optional, time) +Controls the amount of time to wait for the model to deploy. Defaults +to 20 seconds. + +`wait_for`:: +(Optional, string) +Specifies the allocation status to wait for before returning. Defaults to +`started`. The value `starting` indicates deployment is starting but not yet on +any node. The value `started` indicates the model has started on at least one +node. The value `fully_allocated` indicates the deployment has started on all +valid nodes. + [[start-trained-model-deployment-example]] == {api-examples-title} diff --git a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc index af58b2c2f6b72..3ac7be860fd1c 100644 --- a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc @@ -49,7 +49,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-data-frame-analytics-def `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-dfa-jobs] `force`:: diff --git a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc index eb58a9baf8d8c..a486ee37bb239 100644 --- a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc @@ -8,15 +8,18 @@ experimental::[] +Stops a trained model deployment. + [[stop-trained-model-deployment-request]] == {api-request-title} `POST _ml/trained_models//deployment/_stop` -//// [[stop-trained-model-deployment-prereq]] == {api-prereq-title} -//// + +Requires the `manage_ml` cluster privilege. This privilege is included in the +`machine_learning_admin` built-in role. //// [[stop-trained-model-deployment-desc]] @@ -36,12 +39,11 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] - +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-deployments] `force`:: - (Optional, Boolean) If true, the deployment is stopped even if it is referenced by - ingest pipelines. +(Optional, Boolean) If true, the deployment is stopped even if it is referenced +by ingest pipelines. //// [role="child_attributes"] diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index 3a11ca6118299..97b9a42c49582 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -32,22 +32,22 @@ are no matches or only partial matches. -- end::allow-no-match-datafeeds[] -tag::allow-no-match-jobs[] +tag::allow-no-match-deployments[] Specifies what to do when the request: + -- -* Contains wildcard expressions and there are no jobs that match. +* Contains wildcard expressions and there are no deployments that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. -The default value is `true`, which returns an empty `jobs` array -when there are no matches and the subset of results when there are partial -matches. If this parameter is `false`, the request returns a `404` status code -when there are no matches or only partial matches. +The default value is `true`, which returns an empty array when there are no +matches and the subset of results when there are partial matches. If this +parameter is `false`, the request returns a `404` status code when there are no +matches or only partial matches. -- -end::allow-no-match-jobs[] +end::allow-no-match-deployments[] -tag::allow-no-match[] +tag::allow-no-match-dfa-jobs[] Specifies what to do when the request: + -- @@ -60,7 +60,22 @@ when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- -end::allow-no-match[] +end::allow-no-match-dfa-jobs[] + +tag::allow-no-match-jobs[] +Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array +when there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status code +when there are no matches or only partial matches. +-- +end::allow-no-match-jobs[] tag::allow-no-match-models[] Specifies what to do when the request: From 2a30dfe4d213769e5c4982d201117562377e0e8d Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 24 Nov 2021 14:34:25 -0500 Subject: [PATCH 10/55] [DOCS] Fix `type` response values for index recovery API (#81000) We updated the `type` response values in https://github.com/elastic/elasticsearch/pull/19516. This updates the docs with the correct values. Closes https://github.com/elastic/elasticsearch/issues/80264 Co-authored-by: David Turner --- docs/reference/indices/recovery.asciidoc | 42 +++++++++++++----------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 8564b5e2b46ed..4a61155175d4d 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -44,12 +44,12 @@ the replica shard is available for search. Recovery automatically occurs during the following processes: -* Node startup or failure. This type of recovery is called a local store - recovery. +* Node startup. This type of recovery is called a local store recovery. * Primary shard replication. * Relocation of a shard to a different node in the same cluster. -* <>. - +* <> operation. +* <>, <>, or +<> operation. // end::shard-recovery-desc[] [[index-recovery-api-path-params]] @@ -80,26 +80,28 @@ ID of the shard. `type`:: + -- -(String) -Recovery type. -Returned values include: +(String) Recovery source for the shard. Returned values include: -`STORE`:: -The recovery is related to -a node startup or failure. -This type of recovery is called a local store recovery. +`EMPTY_STORE`:: +An empty store. Indicates a new primary shard or the forced allocation of an +empty primary shard using the <>. -`SNAPSHOT`:: -The recovery is related to -a <>. +`EXISTING_STORE`:: +The store of an existing primary shard. Indicates recovery is related +to node startup or the allocation of an existing primary shard. -`REPLICA`:: -The recovery is related to a primary shard replication. +`LOCAL_SHARDS`:: +Shards of another index on the same node. Indicates recovery is related to a +<>, <>, or +<> operation. -`RELOCATING`:: -The recovery is related to -the relocation of a shard -to a different node in the same cluster. +`PEER`:: +A primary shard on another node. Indicates recovery is related to shard +replication. + +`SNAPSHOT`:: +A snapshot. Indicates recovery is related to a +<> operation. -- `STAGE`:: From 4ff1962ecbc83112aa5db93adbd1963a64cb2bf7 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 24 Nov 2021 13:02:00 -0800 Subject: [PATCH 11/55] Support additional version schemes in relaxed mode (#81010) --- .../src/main/java/org/elasticsearch/gradle/Version.java | 4 +++- .../src/test/java/org/elasticsearch/gradle/VersionTests.java | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/Version.java b/build-tools/src/main/java/org/elasticsearch/gradle/Version.java index a86e16ad740fd..dfa8be295a8f7 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/Version.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/Version.java @@ -39,7 +39,9 @@ public enum Mode { private static final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)(?:-(alpha\\d+|beta\\d+|rc\\d+|SNAPSHOT))?"); - private static final Pattern relaxedPattern = Pattern.compile("v?(\\d+)\\.(\\d+)\\.(\\d+)(?:-([a-zA-Z0-9_]+(?:-[a-zA-Z0-9]+)*))?"); + private static final Pattern relaxedPattern = Pattern.compile( + "v?(\\d+)\\.(\\d+)\\.(\\d+)(?:[\\-+]+([a-zA-Z0-9_]+(?:-[a-zA-Z0-9]+)*))?" + ); public Version(int major, int minor, int revision) { this(major, minor, revision, null); diff --git a/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java b/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java index 2dae3d9f70900..97eb21b814d70 100644 --- a/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java +++ b/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java @@ -41,6 +41,7 @@ public void testRelaxedVersionParsing() { assertVersionEquals("6.1.2-foo", 6, 1, 2, Version.Mode.RELAXED); assertVersionEquals("6.1.2-foo-bar", 6, 1, 2, Version.Mode.RELAXED); assertVersionEquals("16.01.22", 16, 1, 22, Version.Mode.RELAXED); + assertVersionEquals("20.10.10+dfsg1", 20, 10, 10, Version.Mode.RELAXED); } public void testCompareWithStringVersions() { From b4b489f52e7e0c901df472a2bb57fca6b596f8a0 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 24 Nov 2021 18:15:32 -0700 Subject: [PATCH 12/55] Fix RefreshListenerTests testDisallowAddListeners (#80575) A recent change to RefreshListeners introduced new functionality sequence number listeners. This change broke a test when the max allowed listeners are randomly set to 2. This commit resolves it by checking whether a new listener is allowed and asserting appropriately. Closes #79689. --- .../index/shard/RefreshListenersTests.java | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 0d6d90b0f246a..7ff97e84a9907 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -17,7 +17,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; @@ -87,7 +86,6 @@ public class RefreshListenersTests extends ESTestCase { private volatile int maxListeners; private ThreadPool threadPool; private Store store; - private MeanMetric refreshMetric; @Before public void setupListeners() throws Exception { @@ -95,18 +93,16 @@ public void setupListeners() throws Exception { maxListeners = randomIntBetween(2, 1000); // Now setup the InternalEngine which is much more complicated because we aren't mocking anything threadPool = new TestThreadPool(getTestName()); - refreshMetric = new MeanMetric(); listeners = new RefreshListeners( () -> maxListeners, () -> engine.refresh("too-many-listeners"), logger, threadPool.getThreadContext(), - refreshMetric + new MeanMetric() ); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); ShardId shardId = new ShardId(new Index("index", "_na_"), 1); - String allocationId = UUIDs.randomBase64UUID(random()); Directory directory = newDirectory(); store = new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); IndexWriterConfig iwc = newIndexWriterConfig(); @@ -456,7 +452,6 @@ public void testLotsOfThreads() throws Exception { refresher.cancel(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79689") public void testDisallowAddListeners() throws Exception { assertEquals(0, listeners.pendingCount()); TestLocationListener listener = new TestLocationListener(); @@ -503,8 +498,21 @@ public void testDisallowAddListeners() throws Exception { } assertFalse(listeners.addOrNotify(index("1").getTranslogLocation(), new TestLocationListener())); - assertFalse(listeners.addOrNotify(index("1").getSeqNo(), new TestSeqNoListener())); - assertEquals(3, listeners.pendingCount()); + final int expectedPending; + if (listeners.pendingCount() == maxListeners) { + // Rejected + TestSeqNoListener rejected = new TestSeqNoListener(); + assertTrue(listeners.addOrNotify(index("1").getSeqNo(), rejected)); + assertNotNull(rejected.error); + expectedPending = 2; + } else { + TestSeqNoListener acceptedListener = new TestSeqNoListener(); + assertFalse(listeners.addOrNotify(index("1").getSeqNo(), acceptedListener)); + assertFalse(acceptedListener.isDone.get()); + assertNull(acceptedListener.error); + expectedPending = 3; + } + assertEquals(expectedPending, listeners.pendingCount()); } public void testSequenceNumberMustBeIssued() throws Exception { From b83b08f9f5f19b3cca87c72e302c0039420ac47e Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 25 Nov 2021 08:39:05 +0000 Subject: [PATCH 13/55] Throw NoSeedNodeLeftException on proxy failure (#80961) Today we throw an `IllegalStateException` if we can't form any connections to a remote cluster in proxy mode, which is typically treated as a non-retryable error. However in sniff mode we throw a `NoSeedNodeLeftException` which does trigger retries. Since connection failures are often transient things (in either mode), this commit moves proxy mode to use a retryable `NoSeedNodeLeftException` in this case too. Closes #80898 --- .../transport/NoSeedNodeLeftException.java | 14 +++++++++++--- .../transport/ProxyConnectionStrategy.java | 4 +--- .../transport/SniffConnectionStrategy.java | 2 +- .../transport/ProxyConnectionStrategyTests.java | 8 +++++++- .../xpack/ccr/action/ShardFollowNodeTask.java | 11 ++++------- 5 files changed, 24 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java b/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java index 6b727b985abc4..ef5e014b63c36 100644 --- a/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java +++ b/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java @@ -14,12 +14,20 @@ import java.io.IOException; /** - * Thrown after failed to connect to all seed nodes of the remote cluster. + * Thrown after completely failing to connect to any node of the remote cluster. */ public class NoSeedNodeLeftException extends ElasticsearchException { - public NoSeedNodeLeftException(String clusterName) { - super("no seed node left for cluster: [" + clusterName + "]"); + public NoSeedNodeLeftException(String message) { + super(message); + } + + NoSeedNodeLeftException(RemoteConnectionStrategy.ConnectionStrategy connectionStrategy, String clusterName) { + super( + connectionStrategy == RemoteConnectionStrategy.ConnectionStrategy.SNIFF + ? "no seed node left for cluster: [" + clusterName + "]" + : "Unable to open any proxy connections to cluster [" + clusterName + "]" + ); } public NoSeedNodeLeftException(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index e878796e1fb69..4129f39eb5663 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -287,9 +287,7 @@ public void onFailure(Exception e) { } else { int openConnections = connectionManager.size(); if (openConnections == 0) { - finished.onFailure( - new IllegalStateException("Unable to open any proxy connections to remote cluster [" + clusterAlias + "]") - ); + finished.onFailure(new NoSeedNodeLeftException(strategyType(), clusterAlias)); } else { logger.debug( "unable to open maximum number of connections [remote cluster: {}, opened: {}, maximum: {}]", diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 71cf8807b8a05..df69e3d26d809 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -339,7 +339,7 @@ private void collectRemoteNodes(Iterator> seedNodesSuppl onFailure.accept(e); }); } else { - listener.onFailure(new NoSeedNodeLeftException(clusterAlias)); + listener.onFailure(new NoSeedNodeLeftException(strategyType(), clusterAlias)); } } diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index 41538c1bb20dc..3c2392eeab087 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -34,6 +34,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; + public class ProxyConnectionStrategyTests extends ESTestCase { private final String clusterAlias = "cluster-alias"; @@ -202,7 +205,10 @@ public void testConnectFailsWithIncompatibleNodes() { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); strategy.connect(connectFuture); - expectThrows(Exception.class, connectFuture::actionGet); + assertThat( + expectThrows(NoSeedNodeLeftException.class, connectFuture::actionGet).getMessage(), + allOf(containsString("Unable to open any proxy connections"), containsString('[' + clusterAlias + ']')) + ); assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); assertEquals(0, connectionManager.size()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 2e502e30f53f3..299940ab0894b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -632,13 +632,10 @@ static boolean shouldRetry(final Exception e) { || actual instanceof NoShardAvailableActionException || actual instanceof UnavailableShardsException || actual instanceof AlreadyClosedException - || actual instanceof ElasticsearchSecurityException - || // If user does not have sufficient privileges - actual instanceof ClusterBlockException - || // If leader index is closed or no elected master - actual instanceof IndexClosedException - || // If follow index is closed - actual instanceof ConnectTransportException + || actual instanceof ElasticsearchSecurityException // If user does not have sufficient privileges + || actual instanceof ClusterBlockException // If leader index is closed or no elected master + || actual instanceof IndexClosedException // If follow index is closed + || actual instanceof ConnectTransportException || actual instanceof NodeClosedException || actual instanceof NoSuchRemoteClusterException || actual instanceof NoSeedNodeLeftException From daf37b484468cd0edf2ff4186d87100bef52917e Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 25 Nov 2021 09:58:18 +0100 Subject: [PATCH 14/55] Cleanup bulk api logic (#80987) * Always check whether it is prohibited to use custom routing on a data stream. * Always invoke prohibitAppendWritesInBackingIndices(...), but in the method check whether the operation is of type index or create. Follow-up from #80624. --- .../action/bulk/TransportBulkAction.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 851a45af20ec3..4c02c5969e8d1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -348,6 +348,11 @@ public void onRejection(Exception rejectedException) { } static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, Metadata metadata) { + DocWriteRequest.OpType opType = writeRequest.opType(); + if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { + // op type not create or index, then bail early + return; + } IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(writeRequest.index()); if (indexAbstraction == null) { return; @@ -365,7 +370,6 @@ static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest // so checking if write op is append-only and if so fail. // (Updates and deletes are allowed to target a backing index) - DocWriteRequest.OpType opType = writeRequest.opType(); // CREATE op_type is considered append-only and // INDEX op_type is considered append-only when no if_primary_term and if_seq_no is specified. // (the latter maybe an update, but at this stage we can't determine that. In order to determine @@ -524,10 +528,8 @@ protected void doRun() { throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); } - if (docWriteRequest.opType() == OpType.CREATE || docWriteRequest.opType() == OpType.INDEX) { - prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); - prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); - } + prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); + prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); docWriteRequest.process(); From c67b47078970319ea9b7540c1fecbf6c1656e375 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 25 Nov 2021 10:49:41 +0100 Subject: [PATCH 15/55] Reuse MappingMetadata instances in Metadata class. (#80348) Hash the mapping source of a MappingMetadata instance and then cache it in Metadata class. A mapping with the same hash will use a cached MappingMetadata instance. This can significantly reduce the number of MappingMetadata instances for data streams and index patterns. Idea originated from #69772, but just focusses on the jvm heap memory savings. And hashes the mapping instead of assigning it an uuid. Relates to #77466 --- .../cluster/metadata/IndexMetadata.java | 39 +++++ .../cluster/metadata/MappingMetadata.java | 4 + .../cluster/metadata/Metadata.java | 84 ++++++++++- .../common/compress/CompressedXContent.java | 137 +++++++++--------- .../cluster/metadata/MetadataTests.java | 113 +++++++++++++++ .../DeflateCompressedXContentTests.java | 10 +- 6 files changed, 304 insertions(+), 83 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 853ff0bf5c0de..409fe6b934085 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -561,6 +561,45 @@ private IndexMetadata( assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } + IndexMetadata withMappingMetadata(MappingMetadata mapping) { + ImmutableOpenMap.Builder mappingBuilder = ImmutableOpenMap.builder(); + mappingBuilder.put(MapperService.SINGLE_MAPPING_NAME, mapping); + + return new IndexMetadata( + this.index, + this.version, + this.mappingVersion, + this.settingsVersion, + this.aliasesVersion, + this.primaryTerms, + this.state, + this.numberOfShards, + this.numberOfReplicas, + this.settings, + mappingBuilder.build(), + this.aliases, + this.customData, + this.inSyncAllocationIds, + this.requireFilters, + this.initialRecoveryFilters, + this.includeFilters, + this.excludeFilters, + this.indexCreatedVersion, + this.routingNumShards, + this.routingPartitionSize, + this.routingPaths, + this.waitForActiveShards, + this.rolloverInfos, + this.isSystem, + this.isHidden, + this.timestampRange, + this.priority, + this.creationDate, + this.ignoreDiskWatermarks, + this.tierPreference + ); + } + public Index getIndex() { return index; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java index 3fd51a03aa08c..b42c5bff0eb82 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java @@ -143,6 +143,10 @@ public boolean routingRequired() { return this.routingRequired; } + public String getSha256() { + return source.getSha256(); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(type()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 37cf925aef029..531f85d981827 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -208,6 +208,7 @@ public interface NonRestorableCustom extends Custom {} private final String[] visibleClosedIndices; private SortedMap indicesLookup; + private final Map mappingsByHash; private Metadata( String clusterUUID, @@ -229,7 +230,8 @@ private Metadata( String[] visibleOpenIndices, String[] allClosedIndices, String[] visibleClosedIndices, - SortedMap indicesLookup + SortedMap indicesLookup, + Map mappingsByHash ) { this.clusterUUID = clusterUUID; this.clusterUUIDCommitted = clusterUUIDCommitted; @@ -251,6 +253,7 @@ private Metadata( this.allClosedIndices = allClosedIndices; this.visibleClosedIndices = visibleClosedIndices; this.indicesLookup = indicesLookup; + this.mappingsByHash = mappingsByHash; } public Metadata withIncrementedVersion() { @@ -274,7 +277,8 @@ public Metadata withIncrementedVersion() { visibleOpenIndices, allClosedIndices, visibleClosedIndices, - indicesLookup + indicesLookup, + mappingsByHash ); } @@ -927,6 +931,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + Map getMappingsByHash() { + return mappingsByHash; + } + private static class MetadataDiff implements Diff { private final long version; @@ -1081,6 +1089,7 @@ public static class Builder { private final ImmutableOpenMap.Builder customs; private SortedMap previousIndicesLookup; + private final Map mappingsByHash; public Builder() { clusterUUID = UNKNOWN_CLUSTER_UUID; @@ -1089,6 +1098,7 @@ public Builder() { customs = ImmutableOpenMap.builder(); indexGraveyard(IndexGraveyard.builder().build()); // create new empty index graveyard to initialize previousIndicesLookup = null; + mappingsByHash = new HashMap<>(); } Builder(Metadata metadata) { @@ -1103,11 +1113,13 @@ public Builder() { this.templates = ImmutableOpenMap.builder(metadata.templates); this.customs = ImmutableOpenMap.builder(metadata.customs); previousIndicesLookup = metadata.getIndicesLookup(); + this.mappingsByHash = new HashMap<>(metadata.mappingsByHash); } public Builder put(IndexMetadata.Builder indexMetadataBuilder) { // we know its a new one, increment the version and store indexMetadataBuilder.version(indexMetadataBuilder.version() + 1); + dedupeMapping(indexMetadataBuilder); IndexMetadata indexMetadata = indexMetadataBuilder.build(); IndexMetadata previous = indices.put(indexMetadata.getIndex().getName(), indexMetadata); if (unsetPreviousIndicesLookup(previous, indexMetadata)) { @@ -1120,6 +1132,7 @@ public Builder put(IndexMetadata indexMetadata, boolean incrementVersion) { if (indices.get(indexMetadata.getIndex().getName()) == indexMetadata) { return this; } + indexMetadata = dedupeMapping(indexMetadata); // if we put a new index metadata, increment its version if (incrementVersion) { indexMetadata = IndexMetadata.builder(indexMetadata).version(indexMetadata.getVersion() + 1).build(); @@ -1186,13 +1199,16 @@ public Builder removeAllIndices() { previousIndicesLookup = null; indices.clear(); + mappingsByHash.clear(); return this; } public Builder indices(ImmutableOpenMap indices) { previousIndicesLookup = null; - this.indices.putAll(indices); + for (var cursor : indices) { + put(cursor.value, false); + } return this; } @@ -1637,6 +1653,8 @@ public Metadata build(boolean builtIndicesLookupEagerly) { } } + purgeUnusedEntries(indices); + // build all concrete indices arrays: // TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices. // When doing an operation across all indices, most of the time is spent on actually going to all shards and @@ -1677,7 +1695,8 @@ public Metadata build(boolean builtIndicesLookupEagerly) { visibleOpenIndicesArray, allClosedIndicesArray, visibleClosedIndicesArray, - indicesLookup + indicesLookup, + Collections.unmodifiableMap(mappingsByHash) ); } @@ -1896,6 +1915,63 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return builder.build(); } + + /** + * Dedupes {@link MappingMetadata} instance from the provided indexMetadata parameter using the sha256 + * hash from the compressed source of the mapping. If there is a mapping with the same sha256 hash then + * a new {@link IndexMetadata} is returned with the found {@link MappingMetadata} instance, otherwise + * the {@link MappingMetadata} instance of the indexMetadata parameter is recorded and the indexMetadata + * parameter is then returned. + */ + private IndexMetadata dedupeMapping(IndexMetadata indexMetadata) { + if (indexMetadata.mapping() == null) { + return indexMetadata; + } + + String digest = indexMetadata.mapping().getSha256(); + MappingMetadata entry = mappingsByHash.get(digest); + if (entry != null) { + return indexMetadata.withMappingMetadata(entry); + } else { + mappingsByHash.put(digest, indexMetadata.mapping()); + return indexMetadata; + } + } + + /** + * Similar to {@link #dedupeMapping(IndexMetadata)}. + */ + private void dedupeMapping(IndexMetadata.Builder indexMetadataBuilder) { + if (indexMetadataBuilder.mapping() == null) { + return; + } + + String digest = indexMetadataBuilder.mapping().getSha256(); + MappingMetadata entry = mappingsByHash.get(digest); + if (entry != null) { + indexMetadataBuilder.putMapping(entry); + } else { + mappingsByHash.put(digest, indexMetadataBuilder.mapping()); + } + } + + private void purgeUnusedEntries(ImmutableOpenMap indices) { + final Set sha256HashesInUse = new HashSet<>(mappingsByHash.size()); + for (var im : indices.values()) { + if (im.mapping() != null) { + sha256HashesInUse.add(im.mapping().getSha256()); + } + } + + final var iterator = mappingsByHash.entrySet().iterator(); + while (iterator.hasNext()) { + final var cacheKey = iterator.next().getKey(); + if (sha256HashesInUse.contains(cacheKey) == false) { + iterator.remove(); + } + } + } + } private static final ToXContent.Params FORMAT_PARAMS; diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index 1f99090ae813d..d43c78792938a 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -9,8 +9,10 @@ package org.elasticsearch.common.compress; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -24,9 +26,10 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.Arrays; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.util.Base64; import java.util.zip.CRC32; -import java.util.zip.CheckedOutputStream; import java.util.zip.DataFormatException; import java.util.zip.Inflater; @@ -38,46 +41,45 @@ */ public final class CompressedXContent { - private static final ThreadLocal inflater1 = ThreadLocal.withInitial(InflaterAndBuffer::new); - private static final ThreadLocal inflater2 = ThreadLocal.withInitial(InflaterAndBuffer::new); + private static final ThreadLocal inflater = ThreadLocal.withInitial(InflaterAndBuffer::new); - private static int crc32(BytesReference data) { - CRC32 crc32 = new CRC32(); + private static String sha256(BytesReference data) { + MessageDigest messageDigest = MessageDigests.sha256(); try { - data.writeTo(new CheckedOutputStream(Streams.NULL_OUTPUT_STREAM, crc32)); + data.writeTo(new DigestOutputStream(Streams.NULL_OUTPUT_STREAM, messageDigest)); } catch (IOException bogus) { // cannot happen throw new Error(bogus); } - return (int) crc32.getValue(); + return Base64.getEncoder().encodeToString(messageDigest.digest()); } - private static int crc32FromCompressed(byte[] compressed) { - CRC32 crc32 = new CRC32(); - try (InflaterAndBuffer inflaterAndBuffer = inflater1.get()) { + private static String sha256FromCompressed(byte[] compressed) { + MessageDigest messageDigest = MessageDigests.sha256(); + try (InflaterAndBuffer inflaterAndBuffer = inflater.get()) { final Inflater inflater = inflaterAndBuffer.inflater; final ByteBuffer buffer = inflaterAndBuffer.buffer; assert assertBufferIsCleared(buffer); setInflaterInput(compressed, inflater); do { if (inflater.inflate(buffer) > 0) { - crc32.update(buffer.flip()); + messageDigest.update(buffer.flip()); } buffer.clear(); } while (inflater.finished() == false); - return (int) crc32.getValue(); + return Base64.getEncoder().encodeToString(messageDigest.digest()); } catch (DataFormatException e) { throw new ElasticsearchException(e); } } private final byte[] bytes; - private final int crc32; + private final String sha256; // Used for serialization - private CompressedXContent(byte[] compressed, int crc32) { + private CompressedXContent(byte[] compressed, String sha256) { this.bytes = compressed; - this.crc32 = crc32; + this.sha256 = sha256; assertConsistent(); } @@ -90,8 +92,8 @@ public CompressedXContent(ToXContent xcontent) throws IOException { */ public CompressedXContent(ToXContent xcontent, ToXContent.Params params) throws IOException { BytesStreamOutput bStream = new BytesStreamOutput(); - CRC32 crc32 = new CRC32(); - OutputStream checkedStream = new CheckedOutputStream(CompressorFactory.COMPRESSOR.threadLocalOutputStream(bStream), crc32); + MessageDigest messageDigest = MessageDigests.sha256(); + OutputStream checkedStream = new DigestOutputStream(CompressorFactory.COMPRESSOR.threadLocalOutputStream(bStream), messageDigest); try (XContentBuilder builder = XContentFactory.jsonBuilder(checkedStream)) { if (xcontent.isFragment()) { builder.startObject(); @@ -102,7 +104,7 @@ public CompressedXContent(ToXContent xcontent, ToXContent.Params params) throws } } this.bytes = BytesReference.toBytes(bStream.bytes()); - this.crc32 = (int) crc32.getValue(); + this.sha256 = Base64.getEncoder().encodeToString(messageDigest.digest()); assertConsistent(); } @@ -115,18 +117,18 @@ public CompressedXContent(BytesReference data) throws IOException { if (compressor != null) { // already compressed... this.bytes = BytesReference.toBytes(data); - this.crc32 = crc32FromCompressed(this.bytes); + this.sha256 = sha256FromCompressed(this.bytes); } else { this.bytes = BytesReference.toBytes(CompressorFactory.COMPRESSOR.compress(data)); - this.crc32 = crc32(data); + this.sha256 = sha256(data); } assertConsistent(); } private void assertConsistent() { assert CompressorFactory.compressor(new BytesArray(bytes)) != null; - assert this.crc32 == crc32(uncompressed()); - assert this.crc32 == crc32FromCompressed(bytes); + assert this.sha256.equals(sha256(uncompressed())); + assert this.sha256.equals(sha256FromCompressed(bytes)); } public CompressedXContent(byte[] data) throws IOException { @@ -160,13 +162,31 @@ public String string() { return uncompressed().utf8ToString(); } + public String getSha256() { + return sha256; + } + public static CompressedXContent readCompressedString(StreamInput in) throws IOException { - int crc32 = in.readInt(); - return new CompressedXContent(in.readByteArray(), crc32); + final String sha256; + final byte[] compressedData; + if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + sha256 = in.readString(); + compressedData = in.readByteArray(); + } else { + int crc32 = in.readInt(); + compressedData = in.readByteArray(); + sha256 = sha256FromCompressed(compressedData); + } + return new CompressedXContent(compressedData, sha256); } public void writeTo(StreamOutput out) throws IOException { - out.writeInt(crc32); + if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + out.writeString(sha256); + } else { + int crc32 = crc32FromCompressed(bytes); + out.writeInt(crc32); + } out.writeByteArray(bytes); } @@ -176,54 +196,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; CompressedXContent that = (CompressedXContent) o; - - if (crc32 != that.crc32) { - return false; - } - - if (Arrays.equals(bytes, that.bytes)) { - return true; - } - // compression is not entirely deterministic in all cases depending on hwo the compressed bytes were assembled, check uncompressed - // equality - return equalsWhenUncompressed(bytes, that.bytes); - } - - // package private for testing - static boolean equalsWhenUncompressed(byte[] compressed1, byte[] compressed2) { - try (InflaterAndBuffer inflaterAndBuffer1 = inflater1.get(); InflaterAndBuffer inflaterAndBuffer2 = inflater2.get()) { - final Inflater inf1 = inflaterAndBuffer1.inflater; - final Inflater inf2 = inflaterAndBuffer2.inflater; - setInflaterInput(compressed1, inf1); - setInflaterInput(compressed2, inf2); - final ByteBuffer buf1 = inflaterAndBuffer1.buffer; - assert assertBufferIsCleared(buf1); - final ByteBuffer buf2 = inflaterAndBuffer2.buffer; - assert assertBufferIsCleared(buf2); - while (true) { - while (inf1.inflate(buf1) > 0 && buf1.hasRemaining()) - ; - while (inf2.inflate(buf2) > 0 && buf2.hasRemaining()) - ; - if (buf1.flip().equals(buf2.flip()) == false) { - return false; - } - if (inf1.finished()) { - // if the first inflater is done but the second one still has data we fail here, if it's the other way around we fail - // on the next round because we will only read bytes into 2 - return inf2.finished(); - } - buf1.clear(); - buf2.clear(); - } - } catch (DataFormatException e) { - throw new ElasticsearchException(e); - } + return sha256.equals(that.sha256); } @Override public int hashCode() { - return crc32; + return sha256.hashCode(); } @Override @@ -231,6 +209,25 @@ public String toString() { return string(); } + private static int crc32FromCompressed(byte[] compressed) { + CRC32 crc32 = new CRC32(); + try (InflaterAndBuffer inflaterAndBuffer = inflater.get()) { + final Inflater inflater = inflaterAndBuffer.inflater; + final ByteBuffer buffer = inflaterAndBuffer.buffer; + assert assertBufferIsCleared(buffer); + setInflaterInput(compressed, inflater); + do { + if (inflater.inflate(buffer) > 0) { + crc32.update(buffer.flip()); + } + buffer.clear(); + } while (inflater.finished() == false); + return (int) crc32.getValue(); + } catch (DataFormatException e) { + throw new ElasticsearchException(e); + } + } + /** * Set the given bytes as inflater input, accounting for the fact that they start with our header of size * {@link DeflateCompressor#HEADER_SIZE}. diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 49225f58059b2..7061d9556b82c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.alias.RandomAliasActionsGenerator; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -60,6 +62,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -1980,6 +1983,116 @@ private IndexMetadata buildIndexWithAlias( .build(); } + public void testMappingDuplication() { + final Set randomMappingDefinitions; + { + int numEntries = randomIntBetween(4, 8); + randomMappingDefinitions = new HashSet<>(numEntries); + for (int i = 0; i < numEntries; i++) { + Map mapping = RandomAliasActionsGenerator.randomMap(2); + String mappingAsString = Strings.toString((builder, params) -> builder.mapContents(mapping)); + randomMappingDefinitions.add(mappingAsString); + } + } + + Metadata metadata; + int numIndices = randomIntBetween(16, 32); + { + String[] definitions = randomMappingDefinitions.toArray(String[]::new); + Metadata.Builder mb = new Metadata.Builder(); + for (int i = 0; i < numIndices; i++) { + IndexMetadata.Builder indexBuilder = IndexMetadata.builder("index-" + i) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .putMapping(definitions[i % randomMappingDefinitions.size()]) + .numberOfShards(1) + .numberOfReplicas(0); + if (randomBoolean()) { + mb.put(indexBuilder); + } else { + mb.put(indexBuilder.build(), true); + } + } + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat( + metadata.indices().stream().map(entry -> entry.getValue().mapping()).collect(Collectors.toSet()), + hasSize(metadata.getMappingsByHash().size()) + ); + + // Add a new index with a new index with known mapping: + MappingMetadata mapping = metadata.indices().get("index-" + randomInt(numIndices - 1)).mapping(); + MappingMetadata entry = metadata.getMappingsByHash().get(mapping.getSha256()); + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.put( + IndexMetadata.builder("index-" + numIndices) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .putMapping(mapping) + .numberOfShards(1) + .numberOfReplicas(0) + ); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(mapping.getSha256()), equalTo(entry)); + + // Remove index and ensure mapping cache stays the same + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.remove("index-" + numIndices); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(mapping.getSha256()), equalTo(entry)); + + // Update a mapping of an index: + IndexMetadata luckyIndex = metadata.index("index-" + randomInt(numIndices - 1)); + entry = metadata.getMappingsByHash().get(luckyIndex.mapping().getSha256()); + MappingMetadata updatedMapping = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Map.of("mapping", "updated")); + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.put(IndexMetadata.builder(luckyIndex).putMapping(updatedMapping)); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size() + 1)); + assertThat(metadata.getMappingsByHash().get(luckyIndex.mapping().getSha256()), equalTo(entry)); + assertThat(metadata.getMappingsByHash().get(updatedMapping.getSha256()), equalTo(updatedMapping)); + + // Remove the index with updated mapping + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.remove(luckyIndex.getIndex().getName()); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(updatedMapping.getSha256()), nullValue()); + + // Add an index with new mapping and then later remove it: + MappingMetadata newMapping = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Map.of("new", "mapping")); + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.put( + IndexMetadata.builder("index-" + numIndices) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .putMapping(newMapping) + .numberOfShards(1) + .numberOfReplicas(0) + ); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size() + 1)); + assertThat(metadata.getMappingsByHash().get(newMapping.getSha256()), equalTo(newMapping)); + + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.remove("index-" + numIndices); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(newMapping.getSha256()), nullValue()); + } + public static Metadata randomMetadata() { return randomMetadata(1); } diff --git a/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java b/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java index 23dc9a601e8ab..a0b295117812e 100644 --- a/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java +++ b/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.io.OutputStream; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Random; @@ -134,13 +133,6 @@ public void testEqualsWhenUncompressed() throws IOException { final CompressedXContent two = new CompressedXContent( (builder, params) -> builder.stringListField("arr", Arrays.asList(randomJSON2)) ); - assertFalse(CompressedXContent.equalsWhenUncompressed(one.compressed(), two.compressed())); - } - - public void testEqualsCrcCollision() throws IOException { - final CompressedXContent content1 = new CompressedXContent("{\"d\":\"68&A<\"}".getBytes(StandardCharsets.UTF_8)); - final CompressedXContent content2 = new CompressedXContent("{\"d\":\"gZG- \"}".getBytes(StandardCharsets.UTF_8)); - assertEquals(content1.hashCode(), content2.hashCode()); // the inputs are a known CRC32 collision - assertNotEquals(content1, content2); + assertNotEquals(one.uncompressed(), two.uncompressed()); } } From 73e71009b096ed3e123a334f567712fea99558dd Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 25 Nov 2021 12:33:43 +0000 Subject: [PATCH 16/55] Fix split package org.apache.lucene.search.vectorhighlight (#81041) This PR moves the CustomFieldQuery class from org.apache.lucene.search.vectorhighlight to org.elasticsearch.lucene.search.vectorhighlight, thus avoiding the split package with lucene. It would appear that when CustomFieldQuery was originally conceived, it needed package-private access to its superclass, FieldQuery, but this is no longer the case (the superclass now exposes the necessary members publicly). --- server/build.gradle | 1 - .../lucene/search/vectorhighlight/CustomFieldQuery.java | 4 +++- .../fetch/subphase/highlight/FastVectorHighlighter.java | 2 +- .../org/elasticsearch/deps/lucene/VectorHighlighterTests.java | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) rename server/src/main/java/org/{apache => elasticsearch}/lucene/search/vectorhighlight/CustomFieldQuery.java (97%) diff --git a/server/build.gradle b/server/build.gradle index 9a87155eb86c1..781ab3c31958d 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -259,7 +259,6 @@ tasks.named('splitPackagesAudit').configure { ignoreClasses 'org.apache.lucene.queries.BinaryDocValuesRangeQuery', 'org.apache.lucene.queries.BlendedTermQuery', 'org.apache.lucene.queries.SpanMatchNoDocsQuery', - 'org.apache.lucene.search.vectorhighlight.CustomFieldQuery', // These are tricky because Lucene itself splits the index package, // but this should be fixed in Lucene 9 diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java similarity index 97% rename from server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java rename to server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java index 3a3439a326d30..5e90871e4689a 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.apache.lucene.search.vectorhighlight; +package org.elasticsearch.lucene.search.vectorhighlight; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -20,6 +20,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter; +import org.apache.lucene.search.vectorhighlight.FieldQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index 536909140271c..01c6a60552b70 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -11,7 +11,6 @@ import org.apache.lucene.search.vectorhighlight.BaseFragmentsBuilder; import org.apache.lucene.search.vectorhighlight.BoundaryScanner; import org.apache.lucene.search.vectorhighlight.BreakIteratorBoundaryScanner; -import org.apache.lucene.search.vectorhighlight.CustomFieldQuery; import org.apache.lucene.search.vectorhighlight.FieldFragList; import org.apache.lucene.search.vectorhighlight.FieldQuery; import org.apache.lucene.search.vectorhighlight.FragListBuilder; @@ -27,6 +26,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.lucene.search.vectorhighlight.CustomFieldQuery; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.Field; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.FieldOptions; diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index dac759607954b..4992b2e87e0d1 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -22,11 +22,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.vectorhighlight.CustomFieldQuery; import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter; import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.lucene.search.vectorhighlight.CustomFieldQuery; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; From 60b3ca674f84b248037ce04ccae00072753db525 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 25 Nov 2021 13:43:50 +0100 Subject: [PATCH 17/55] Add support for directly iterating over arrays (#80469) Avoid creating a stream and a spliterators just for a simple iteration over an array. Co-authored-by: David Turner --- .../mustache/MultiSearchTemplateResponse.java | 4 +- .../indices/segments/IndexShardSegments.java | 4 +- .../admin/indices/stats/IndexShardStats.java | 4 +- .../action/bulk/BulkResponse.java | 4 +- .../action/get/MultiGetResponse.java | 4 +- .../action/search/MultiSearchResponse.java | 4 +- .../termvectors/MultiTermVectorsResponse.java | 4 +- .../common/collect/Iterators.java | 27 +++++++++++ .../org/elasticsearch/monitor/fs/FsInfo.java | 4 +- .../elasticsearch/monitor/jvm/JvmStats.java | 4 +- .../org/elasticsearch/search/SearchHits.java | 3 +- .../common/collect/IteratorsTests.java | 47 +++++++++++++++++++ 12 files changed, 94 insertions(+), 19 deletions(-) diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 3dff5b0f4a853..86cba18da06c1 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -24,7 +25,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; public class MultiSearchTemplateResponse extends ActionResponse implements Iterable, ToXContentObject { @@ -115,7 +115,7 @@ public String toString() { @Override public Iterator iterator() { - return Arrays.stream(items).iterator(); + return Iterators.forArray(items); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java index afe0e4855f065..16ce3bb078ade 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java @@ -8,9 +8,9 @@ package org.elasticsearch.action.admin.indices.segments; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.index.shard.ShardId; -import java.util.Arrays; import java.util.Iterator; public class IndexShardSegments implements Iterable { @@ -38,6 +38,6 @@ public ShardSegments[] getShards() { @Override public Iterator iterator() { - return Arrays.stream(shards).iterator(); + return Iterators.forArray(shards); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java index 5b52a7209ef71..18a9c9d1ba27a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java @@ -8,13 +8,13 @@ package org.elasticsearch.action.admin.indices.stats; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; public class IndexShardStats implements Iterable, Writeable { @@ -47,7 +47,7 @@ public ShardStats getAt(int position) { @Override public Iterator iterator() { - return Arrays.stream(shards).iterator(); + return Iterators.forArray(shards); } private CommonStats total = null; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 258d91c712ba7..ae3c53ca5b8c8 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContentObject; @@ -19,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -124,7 +124,7 @@ public BulkItemResponse[] getItems() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 57158194416b9..eb979bc578554 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,7 +28,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -130,7 +130,7 @@ public MultiGetItemResponse[] getResponses() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index c40e622468759..041fdfeca76eb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -25,7 +26,6 @@ import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -126,7 +126,7 @@ public MultiSearchResponse(Item[] items, long tookInMillis) { @Override public Iterator iterator() { - return Arrays.stream(items).iterator(); + return Iterators.forArray(items); } /** diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index 52f389d7f4e19..bfa07fa55d3f9 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -20,7 +21,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; public class MultiTermVectorsResponse extends ActionResponse implements Iterable, ToXContentObject { @@ -102,7 +102,7 @@ public MultiTermVectorsItemResponse[] getResponses() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index 94ebb0261b270..a2629ffd0556c 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -83,4 +83,31 @@ public T next() { return iterators[index].next(); } } + + public static Iterator forArray(T[] array) { + return new ArrayIterator<>(array); + } + + private static final class ArrayIterator implements Iterator { + + private final T[] array; + private int index; + + private ArrayIterator(T[] array) { + this.array = Objects.requireNonNull(array, "Unable to iterate over a null array"); + } + + @Override + public boolean hasNext() { + return index < array.length; + } + + @Override + public T next() { + if (index >= array.length) { + throw new NoSuchElementException(); + } + return array[index++]; + } + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index 2054063e98a88..1b7ba960316a3 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -8,6 +8,7 @@ package org.elasticsearch.monitor.fs; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -18,7 +19,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.Set; @@ -495,7 +495,7 @@ public IoStats getIoStats() { @Override public Iterator iterator() { - return Arrays.stream(paths).iterator(); + return Iterators.forArray(paths); } @Override diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index 474ed7382c12a..04901d89c005a 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -8,6 +8,7 @@ package org.elasticsearch.monitor.jvm; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,7 +28,6 @@ import java.lang.management.RuntimeMXBean; import java.lang.management.ThreadMXBean; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -346,7 +346,7 @@ public GarbageCollector[] getCollectors() { @Override public Iterator iterator() { - return Arrays.stream(collectors).iterator(); + return Iterators.forArray(collectors); } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index fc9aa68a0f1f5..e5db8b81bbe61 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -170,7 +171,7 @@ public Object[] getCollapseValues() { @Override public Iterator iterator() { - return Arrays.stream(getHits()).iterator(); + return Iterators.forArray(getHits()); } public static final class Fields { diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index 2b77d4b6a4005..3a750419c2090 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.common.collect; +import org.elasticsearch.common.Randomness; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -16,6 +17,7 @@ import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicInteger; public class IteratorsTests extends ESTestCase { public void testConcatentation() { @@ -110,6 +112,51 @@ public void testNullIterator() { } } + public void testArrayIterator() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + + int i = 0; + while (iterator.hasNext()) { + assertEquals(array[i++], iterator.next()); + } + assertEquals(array.length, i); + } + + public void testArrayIteratorForEachRemaining() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + + AtomicInteger index = new AtomicInteger(); + iterator.forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + } + + public void testArrayIteratorIsUnmodifiable() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + + expectThrows(UnsupportedOperationException.class, iterator::remove); + } + + public void testArrayIteratorThrowsNoSuchElementExceptionWhenDepleted() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + for (int i = 0; i < array.length; i++) { + iterator.next(); + } + + expectThrows(NoSuchElementException.class, iterator::next); + } + + public void testArrayIteratorOnNull() { + expectThrows(NullPointerException.class, "Unable to iterate over a null array", () -> Iterators.forArray(null)); + } + + private static Integer[] randomIntegerArray() { + return Randomness.get().ints(randomIntBetween(0, 1000)).boxed().toArray(Integer[]::new); + } + private Iterator singletonIterator(T value) { return Collections.singleton(value).iterator(); } From a139aff04726d6b9ffb3562d165a6ad129b6eab9 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 25 Nov 2021 13:55:41 +0100 Subject: [PATCH 18/55] Disable bwc tests in order to backport #80348 (#81046) --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 59fc13e9e1ac8..9d5ab644667cc 100644 --- a/build.gradle +++ b/build.gradle @@ -131,9 +131,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true +boolean bwc_tests_enabled = false // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "" +String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/80348" /* * FIPS 140-2 behavior was fixed in 7.11.0. Before that there is no way to run elasticsearch in a * JVM that is properly configured to be in fips mode with BCFIPS. For now we need to disable From 15de6035148fd7904bb4f609f10d9b29af492a4f Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 25 Nov 2021 13:34:56 +0000 Subject: [PATCH 19/55] Fix split package org.apache.lucene.queries (#81043) --- .../java/org/elasticsearch/percolator/QueryAnalyzer.java | 2 +- .../org/elasticsearch/percolator/CandidateQueryTests.java | 2 +- .../org/elasticsearch/percolator/QueryAnalyzerTests.java | 2 +- server/build.gradle | 6 +----- .../search/SpanBooleanQueryRewriteWithMaxClause.java | 2 +- .../java/org/elasticsearch/index/mapper/RangeType.java | 2 +- .../index/query/SpanMultiTermQueryBuilder.java | 2 +- .../elasticsearch/index/search/MultiMatchQueryParser.java | 2 +- .../lucene/queries/BinaryDocValuesRangeQuery.java | 2 +- .../lucene/queries/BlendedTermQuery.java | 2 +- .../lucene/queries/SpanMatchNoDocsQuery.java | 2 +- .../lucene/search/vectorhighlight/CustomFieldQuery.java | 2 +- .../mapper/RangeFieldQueryStringQueryBuilderTests.java | 2 +- .../elasticsearch/index/mapper/RangeFieldTypeTests.java | 2 +- .../index/query/QueryStringQueryBuilderTests.java | 2 +- .../index/query/SpanMultiTermQueryBuilderTests.java | 2 +- .../index/query/SpanNearQueryBuilderTests.java | 2 +- .../index/search/MultiMatchQueryParserTests.java | 2 +- .../BaseRandomBinaryDocValuesRangeQueryTestCase.java | 8 ++++---- .../lucene/queries/BinaryDocValuesRangeQueryTests.java | 8 ++++---- .../lucene/queries/BlendedTermQueryTests.java | 1 - .../lucene/queries/SpanMatchNoDocsQueryTests.java | 1 - .../histogram/DateRangeHistogramAggregatorTests.java | 2 +- 23 files changed, 27 insertions(+), 33 deletions(-) rename server/src/main/java/org/{apache => elasticsearch}/lucene/queries/BinaryDocValuesRangeQuery.java (99%) rename server/src/main/java/org/{apache => elasticsearch}/lucene/queries/BlendedTermQuery.java (99%) rename server/src/main/java/org/{apache => elasticsearch}/lucene/queries/SpanMatchNoDocsQuery.java (98%) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 4fedaa9177cd0..f5b2e2ba54c11 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -9,7 +9,6 @@ import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.BooleanClause.Occur; @@ -30,6 +29,7 @@ import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.query.DateRangeIncludingNowQuery; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import java.util.ArrayList; import java.util.Arrays; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 636d49d0139c7..06b5a0f0b53f5 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanNotQuery; import org.apache.lucene.queries.spans.SpanOrQuery; @@ -83,6 +82,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.TestDocumentParserContext; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.VersionUtils; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index d30095466ad1e..b3b47d909b046 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -15,7 +15,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.intervals.IntervalQuery; import org.apache.lucene.queries.intervals.Intervals; import org.apache.lucene.queries.intervals.IntervalsSource; @@ -49,6 +48,7 @@ import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.percolator.QueryAnalyzer.QueryExtraction; import org.elasticsearch.percolator.QueryAnalyzer.Result; import org.elasticsearch.test.ESTestCase; diff --git a/server/build.gradle b/server/build.gradle index 781ab3c31958d..1d6a1f4f7689a 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -256,13 +256,9 @@ tasks.named("licenseHeaders").configure { tasks.named('splitPackagesAudit').configure { // Lucene packages should be owned by Lucene! - ignoreClasses 'org.apache.lucene.queries.BinaryDocValuesRangeQuery', - 'org.apache.lucene.queries.BlendedTermQuery', - 'org.apache.lucene.queries.SpanMatchNoDocsQuery', - // These are tricky because Lucene itself splits the index package, // but this should be fixed in Lucene 9 - 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper', + ignoreClasses 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper', // cli is owned by the libs/cli, so these should be moved to o.e.server.cli 'org.elasticsearch.cli.CommandLoggingConfigurator', diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java b/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java index 18be37bff52e0..22b9d25044ab5 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanQuery; @@ -24,6 +23,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import java.io.IOException; import java.util.Collection; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java index 092f555b50965..f8100e794dbd9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java @@ -17,7 +17,6 @@ import org.apache.lucene.document.LongRange; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -28,6 +27,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java index b2006dddfabdb..9ad3e462796dd 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.index.query; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -22,6 +21,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.support.QueryParsers; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java index e2d4fe399fe6a..ee17329f7767e 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java @@ -11,7 +11,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; @@ -24,6 +23,7 @@ import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQuery.java similarity index 99% rename from server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java rename to server/src/main/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQuery.java index 2ad15176c5592..f474b6f7c883b 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQuery.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.apache.lucene.queries; +package org.elasticsearch.lucene.queries; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocValues; diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java similarity index 99% rename from server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java rename to server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java index 6772838c95f49..d06d475503fad 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.apache.lucene.queries; +package org.elasticsearch.lucene.queries; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; diff --git a/server/src/main/java/org/apache/lucene/queries/SpanMatchNoDocsQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQuery.java similarity index 98% rename from server/src/main/java/org/apache/lucene/queries/SpanMatchNoDocsQuery.java rename to server/src/main/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQuery.java index a609ec3d96479..b70e573c3b032 100644 --- a/server/src/main/java/org/apache/lucene/queries/SpanMatchNoDocsQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQuery.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.apache.lucene.queries; +package org.elasticsearch.lucene.queries; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; diff --git a/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java index 5e90871e4689a..c0c0cbeef7199 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -10,7 +10,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.sandbox.search.CombinedFieldQuery; import org.apache.lucene.search.BoostQuery; @@ -25,6 +24,7 @@ import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import java.io.IOException; import java.util.Collection; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 871bc1e829992..c63727b0dfa9b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -15,7 +15,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.LongRange; import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; @@ -26,6 +25,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index 053897a9cf597..2428a9f21d65c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -14,7 +14,6 @@ import org.apache.lucene.document.InetAddressRange; import org.apache.lucene.document.IntRange; import org.apache.lucene.document.LongRange; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -31,6 +30,7 @@ import org.elasticsearch.index.mapper.RangeFieldMapper.RangeFieldType; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.test.IndexSettingsModule; import org.junit.Before; diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index fd34193126a01..288bd6b2339d4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -12,7 +12,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanTermQuery; @@ -49,6 +48,7 @@ import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.search.QueryStringQueryParser; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index ade08d877dde9..181b9c7dd3dc8 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.queries.spans.SpanQuery; @@ -33,6 +32,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java index 2379c73c3b51a..93cfb4d3f17aa 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java @@ -8,12 +8,12 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java index 7e3d6351eb676..e72e7e1d7d723 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -32,6 +31,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.search.MultiMatchQueryParser.FieldAndBoost; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.MockKeywordPlugin; diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java b/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java index ca85ebb717bb4..c036c6bb5799f 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java @@ -19,10 +19,10 @@ import java.util.Collections; import java.util.Objects; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; public abstract class BaseRandomBinaryDocValuesRangeQueryTestCase extends BaseRangeFieldQueryTestCase { diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java index 2066138bba5d4..bdd5e2c8becfb 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java @@ -22,10 +22,10 @@ import java.io.IOException; import static java.util.Collections.singleton; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; public class BinaryDocValuesRangeQueryTests extends ESTestCase { diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java index 41703db570644..1ab5c4e5d3fb2 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java @@ -17,7 +17,6 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DisjunctionMaxQuery; diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java index dff9f8e5a093a..dc818e043fab5 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java @@ -16,7 +16,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanQuery; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java index 0b0e68e9f5fb2..24701b59956e8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java @@ -13,7 +13,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; @@ -26,6 +25,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; From 64338bff3ef2fbea8b3bd153d92f79168c8cdd7a Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 25 Nov 2021 13:36:58 +0000 Subject: [PATCH 20/55] [ML] In 8.x ML will have to tolerate model snapshots for 6.4.0+ (#81039) Previously we intended that 8.x ML would only accept model snapshots created in version 7.0.0 or above. However, due to a bug (elastic/ml-cpp#1545) it's not possible to distinguish model snapshots created in versions 6.4.0-7.9.3 inclusive. Therefore, to be sure of meeting the stated policy of accepting model snapshots created in 7.0.0 or above ML will have to really accept those labelled as 6.4.0 or above. Fixes #81011 --- .../xpack/ml/integration/AnomalyJobCRUDIT.java | 2 +- .../xpack/ml/action/TransportOpenJobAction.java | 9 +++++---- .../job/task/OpenJobPersistentTasksExecutor.java | 14 ++++++++++---- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java index a7e3238e77f8f..f9ac43ab57d2e 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java @@ -197,7 +197,7 @@ public void testOpenJobWithOldSnapshot() { assertThat( ex.getMessage(), containsString( - "[open-job-with-old-model-snapshot] job snapshot [snap_1] has min version before [7.0.0], " + "[open-job-with-old-model-snapshot] job model snapshot [snap_1] has min version before [7.0.0], " + "please revert to a newer model snapshot or reset the job" ) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index f167d6534ea15..3cc30e813ccdd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -54,7 +54,8 @@ import java.util.function.Predicate; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.checkAssignmentState; /* @@ -191,17 +192,17 @@ public void onFailure(Exception e) { return; } assert modelSnapshot.getPage().results().size() == 1; - if (modelSnapshot.getPage().results().get(0).getMinVersion().onOrAfter(MIN_SUPPORTED_SNAPSHOT_VERSION)) { + if (modelSnapshot.getPage().results().get(0).getMinVersion().onOrAfter(MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION)) { modelSnapshotValidationListener.onResponse(true); return; } listener.onFailure( ExceptionsHelper.badRequestException( - "[{}] job snapshot [{}] has min version before [{}], " + "[{}] job model snapshot [{}] has min version before [{}], " + "please revert to a newer model snapshot or reset the job", jobParams.getJobId(), jobParams.getJob().getModelSnapshotId(), - MIN_SUPPORTED_SNAPSHOT_VERSION.toString() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION.toString() ) ); }, failure -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 222dd37917492..6c8ea314dd153 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -74,7 +74,12 @@ public class OpenJobPersistentTasksExecutor extends AbstractJobPersistentTasksExecutor { private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); - public static final Version MIN_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; + // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible + // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. + public static final Version MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = Version.fromString("6.4.0"); + // We tell the user we support model snapshots newer than 7.0.0 as that's the major version + // boundary, even though behind the scenes we have to support back to 6.4.0. + public static final Version MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; // Resuming a job with a running datafeed from its current snapshot was added in 7.11 and // can only be done if the master node is on or after that version. @@ -425,16 +430,17 @@ private void verifyCurrentSnapshotVersion(String jobId, ActionListener } assert snapshot.getPage().results().size() == 1; ModelSnapshot snapshotObj = snapshot.getPage().results().get(0); - if (snapshotObj.getMinVersion().onOrAfter(MIN_SUPPORTED_SNAPSHOT_VERSION)) { + if (snapshotObj.getMinVersion().onOrAfter(MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION)) { listener.onResponse(true); return; } listener.onFailure( ExceptionsHelper.badRequestException( - "[{}] job snapshot [{}] has min version before [{}], please revert to a newer model snapshot or reset the job", + "[{}] job model snapshot [{}] has min version before [{}], " + + "please revert to a newer model snapshot or reset the job", jobId, jobSnapshotId, - MIN_SUPPORTED_SNAPSHOT_VERSION.toString() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION.toString() ) ); }, snapshotFailure -> { From 8ee05f9d9d57894d60802f9dcffd1a488962fa96 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 25 Nov 2021 15:26:30 +0100 Subject: [PATCH 21/55] Fix after restore Lucene.pruneUnreferencedFiles() conditional (#81047) In #68821 we introduced a condition to skip the pruning of unreferenced files after the restore of a snapshot for searchable snapshot shards. Sadly I managed to mess this up in a refactoring (#75308) few months after. This commit reintroduces the right conditional which is to NOT prune Lucene files for searchable snapshot shards. --- .../repositories/blobstore/FileRestoreContext.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index dad6f296512f1..468f5c1e72374 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -175,7 +175,7 @@ public void restore(SnapshotFiles snapshotFiles, Store store, ActionListener Date: Thu, 25 Nov 2021 14:42:22 +0000 Subject: [PATCH 22/55] Fix split package between libs/cli and server, move org.elasticsearch.cli to org.elasticsearch.common.cli (#81038) Fix the split package org.elasticsearch.cli, between server and the cli library. Move the server org.elasticsearch.cli package to org.elasticsearch.common.cli. Removing split packages is a prerequisite to modularization. --- .../cli/keystore/BaseKeyStoreCommand.java | 2 +- .../cli/keystore/CreateKeyStoreCommand.java | 2 +- .../cli/keystore/HasPasswordKeyStoreCommand.java | 2 +- .../org/elasticsearch/cli/keystore/KeyStoreCli.java | 2 +- .../plugins/cli/InstallPluginCommand.java | 2 +- .../elasticsearch/plugins/cli/ListPluginsCommand.java | 2 +- .../java/org/elasticsearch/plugins/cli/PluginCli.java | 2 +- .../elasticsearch/plugins/cli/RemovePluginCommand.java | 2 +- .../src/main/java/org/elasticsearch/cli/Command.java | 2 +- .../cli/EvilEnvironmentAwareCommandTests.java | 1 + server/build.gradle | 10 +--------- .../org/elasticsearch/bootstrap/Elasticsearch.java | 2 +- .../cluster/coordination/ElasticsearchNodeCommand.java | 2 +- .../cluster/coordination/NodeToolCli.java | 2 +- .../{ => common}/cli/CommandLoggingConfigurator.java | 2 +- .../{ => common}/cli/EnvironmentAwareCommand.java | 6 +++++- .../{ => common}/cli/KeyStoreAwareCommand.java | 7 +++++-- .../{ => common}/cli/LoggingAwareCommand.java | 4 +++- .../{ => common}/cli/LoggingAwareMultiCommand.java | 4 +++- .../org/elasticsearch/index/shard/ShardToolCli.java | 2 +- .../license/licensor/tools/KeyPairGeneratorTool.java | 2 +- .../license/licensor/tools/LicenseGeneratorTool.java | 2 +- .../licensor/tools/LicenseVerificationTool.java | 2 +- .../xpack/security/cli/AutoConfigureNode.java | 2 +- .../xpack/security/cli/CertificateGenerateTool.java | 2 +- .../xpack/security/cli/CertificateTool.java | 4 ++-- .../xpack/security/cli/HttpCertificateCommand.java | 2 +- .../authc/esnative/tool/SetupPasswordTool.java | 4 ++-- .../xpack/security/authc/file/tool/UsersTool.java | 4 ++-- .../xpack/security/authc/saml/SamlMetadataCommand.java | 2 +- .../xpack/security/authc/service/FileTokensTool.java | 4 ++-- .../xpack/security/crypto/tool/SystemKeyTool.java | 2 +- .../tool/AutoConfigGenerateElasticPasswordHash.java | 2 +- .../xpack/security/tool/BaseRunAsSuperuserCommand.java | 2 +- .../main/java/org/elasticsearch/xpack/sql/cli/Cli.java | 2 +- .../watcher/trigger/schedule/tool/CronEvalTool.java | 2 +- 36 files changed, 52 insertions(+), 48 deletions(-) rename server/src/main/java/org/elasticsearch/{ => common}/cli/CommandLoggingConfigurator.java (97%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/EnvironmentAwareCommand.java (96%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/KeyStoreAwareCommand.java (92%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/LoggingAwareCommand.java (93%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/LoggingAwareMultiCommand.java (93%) diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java index 268cafe16bf1b..f694e8586e6b6 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java @@ -12,9 +12,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java index 4ad64d8595df1..b78971932b234 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java @@ -12,9 +12,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java index 6a25a84637888..9e8667cc77dae 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java @@ -10,9 +10,9 @@ import joptsimple.OptionSet; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.env.Environment; diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java index d751485c0922d..710531a1999ab 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java @@ -8,8 +8,8 @@ package org.elasticsearch.cli.keystore; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; /** * A cli tool for managing secrets in the elasticsearch keystore. diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java index a671be0fe45f9..2f72833c65703 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java @@ -11,8 +11,8 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginInfo; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java index aebb33447c0f4..0aed104c7117c 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java @@ -11,8 +11,8 @@ import joptsimple.OptionSet; import org.elasticsearch.Version; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginInfo; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java index bdf3f8395e0e8..f5e5b6136a5b4 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java @@ -9,8 +9,8 @@ package org.elasticsearch.plugins.cli; import org.elasticsearch.cli.Command; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.core.internal.io.IOUtils; import java.io.IOException; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java index 0cb0c927f18d4..5654984303116 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java @@ -11,8 +11,8 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.env.Environment; import java.util.Arrays; diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java index 07b5c17c04cf4..1df7e9432f3a1 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java @@ -93,7 +93,7 @@ public final int main(String[] args, Terminal terminal) throws Exception { /** * Executes the command, but all errors are thrown. */ - void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { + protected void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { final OptionSet options = parser.parse(args); if (options.has(helpOption)) { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java index c917e681dd963..df43f0ce2da0e 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java @@ -11,6 +11,7 @@ import joptsimple.OptionSet; import org.apache.lucene.util.TestRuleRestoreSystemProperties; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; diff --git a/server/build.gradle b/server/build.gradle index 1d6a1f4f7689a..c9c7cde70a1c9 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -258,13 +258,5 @@ tasks.named('splitPackagesAudit').configure { // Lucene packages should be owned by Lucene! // These are tricky because Lucene itself splits the index package, // but this should be fixed in Lucene 9 - ignoreClasses 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper', - - // cli is owned by the libs/cli, so these should be moved to o.e.server.cli - 'org.elasticsearch.cli.CommandLoggingConfigurator', - 'org.elasticsearch.cli.EnvironmentAwareCommand', - 'org.elasticsearch.cli.KeyStoreAwareCommand', - 'org.elasticsearch.cli.LoggingAwareCommand', - 'org.elasticsearch.cli.LoggingAwareMultiCommand' - + ignoreClasses 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper' } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index ae40ea349edb8..c3c864a7d1a44 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -14,10 +14,10 @@ import joptsimple.util.PathConverter; import org.elasticsearch.Build; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 1e37f1b654f2d..c7a93007e979b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -16,7 +16,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.Condition; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.cluster.ClusterModule; @@ -27,6 +26,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplateMetadata; import org.elasticsearch.cluster.metadata.DataStreamMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index 485da303cce6d..89b2fde4ab38a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -7,9 +7,9 @@ */ package org.elasticsearch.cluster.coordination; -import org.elasticsearch.cli.CommandLoggingConfigurator; import org.elasticsearch.cli.MultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.CommandLoggingConfigurator; import org.elasticsearch.env.NodeRepurposeCommand; import org.elasticsearch.env.OverrideNodeVersionCommand; diff --git a/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java b/server/src/main/java/org/elasticsearch/common/cli/CommandLoggingConfigurator.java similarity index 97% rename from server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java rename to server/src/main/java/org/elasticsearch/common/cli/CommandLoggingConfigurator.java index 3053d1cb92201..41a077cd769f5 100644 --- a/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/cli/CommandLoggingConfigurator.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; import org.apache.logging.log4j.Level; import org.elasticsearch.common.logging.LogConfigurator; diff --git a/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java b/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java similarity index 96% rename from server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java index b35899f098f7f..ed2429bf72fdc 100644 --- a/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java @@ -6,12 +6,16 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; import joptsimple.OptionSet; import joptsimple.OptionSpec; import joptsimple.util.KeyValuePair; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; diff --git a/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java b/server/src/main/java/org/elasticsearch/common/cli/KeyStoreAwareCommand.java similarity index 92% rename from server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/KeyStoreAwareCommand.java index a103b379ae7e7..3067d477d9cb0 100644 --- a/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/KeyStoreAwareCommand.java @@ -6,10 +6,13 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; import joptsimple.OptionSet; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; @@ -19,7 +22,7 @@ import java.util.Arrays; /** - * An {@link org.elasticsearch.cli.EnvironmentAwareCommand} that needs to access the elasticsearch keystore, possibly + * An {@link EnvironmentAwareCommand} that needs to access the elasticsearch keystore, possibly * decrypting it if it is password protected. */ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand { diff --git a/server/src/main/java/org/elasticsearch/cli/LoggingAwareCommand.java b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareCommand.java similarity index 93% rename from server/src/main/java/org/elasticsearch/cli/LoggingAwareCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/LoggingAwareCommand.java index fa9c20d61f607..9682a5680eb05 100644 --- a/server/src/main/java/org/elasticsearch/cli/LoggingAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareCommand.java @@ -6,7 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; + +import org.elasticsearch.cli.Command; /** * A command that is aware of logging. This class should be preferred over the base {@link Command} class for any CLI tools that depend on diff --git a/server/src/main/java/org/elasticsearch/cli/LoggingAwareMultiCommand.java b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareMultiCommand.java similarity index 93% rename from server/src/main/java/org/elasticsearch/cli/LoggingAwareMultiCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/LoggingAwareMultiCommand.java index fe005522735f4..d3996d815d1da 100644 --- a/server/src/main/java/org/elasticsearch/cli/LoggingAwareMultiCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareMultiCommand.java @@ -6,7 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; + +import org.elasticsearch.cli.MultiCommand; /** * A multi-command that is aware of logging. This class should be preferred over the base {@link MultiCommand} class for any CLI tools that diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java b/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java index c45cac1c081c4..306db0025c6e0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java @@ -7,8 +7,8 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; /** * Class encapsulating and dispatching commands from the {@code elasticsearch-shard} command line tool diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java index a4e69f0c1ab87..6cfb3184e73be 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java @@ -10,9 +10,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java index aa1f9bb58471c..4dc1b17b2f9f6 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java @@ -10,11 +10,11 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.license.License; diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java index 616ff9dff9ee9..87aefeefd6aae 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java @@ -10,11 +10,11 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.license.License; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java index e3053eaa00378..bea70937d1876 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java @@ -16,7 +16,6 @@ import org.bouncycastle.asn1.x509.GeneralNames; import org.bouncycastle.openssl.jcajce.JcaPEMWriter; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; @@ -25,6 +24,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java index 8a84cbdcb7025..d51d2032617f6 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java @@ -19,11 +19,11 @@ import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.ssl.PemUtils; import org.elasticsearch.common.util.set.Sets; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index c4a9b1119d55e..fea09bf252063 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -21,13 +21,13 @@ import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.ssl.PemUtils; import org.elasticsearch.common.util.set.Sets; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java index 5cbdb67e53658..5a1d7c270c1d7 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java @@ -21,12 +21,12 @@ import org.bouncycastle.util.io.pem.PemObjectGenerator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.SuppressForbidden; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.ssl.PemUtils; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index 5346e2b436902..a1b14bf8db899 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -12,13 +12,13 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java index b77e3b334332d..416f71911f0dc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java @@ -10,12 +10,12 @@ import joptsimple.OptionSpec; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java index cd74a39e7bc42..15ed84168ae96 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java @@ -14,11 +14,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.SuppressForbidden; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java index b8b4a0f634e90..79e5f74c39b78 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java @@ -10,12 +10,12 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.XPackSettings; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java index 098157b2d26c1..0c0b151f8cbb4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java @@ -10,10 +10,10 @@ import joptsimple.OptionSpec; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java index b762f7648775b..cfe1262057883 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java @@ -10,9 +10,9 @@ import joptsimple.OptionSet; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java index 22f01d57ee91d..6909da4df03bb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java @@ -12,9 +12,9 @@ import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index d60e83bc4b536..c87ce9ae2ddcf 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -10,9 +10,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.xpack.sql.cli.command.ClearScreenCliCommand; import org.elasticsearch.xpack.sql.cli.command.CliCommand; import org.elasticsearch.xpack.sql.cli.command.CliCommands; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java index de1596412daff..45832d418ec94 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -10,9 +10,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.xpack.core.scheduler.Cron; From accff0607af5421e10447dbc73a1036b4dd89dc3 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 25 Nov 2021 16:26:51 +0000 Subject: [PATCH 23/55] Remove superfluous lucene PostingFormat service configuration (#81049) This PR simply removes the org.apache.lucene.codecs.PostingsFormat service configuration file from elasticsearch server. The service implementation is part of lucene, and already configured by Lucene itself. --- .../META-INF/services/org.apache.lucene.codecs.PostingsFormat | 1 - 1 file changed, 1 deletion(-) delete mode 100644 server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat deleted file mode 100644 index 2c92f0ecd3f51..0000000000000 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ /dev/null @@ -1 +0,0 @@ -org.apache.lucene.search.suggest.document.Completion50PostingsFormat From 8ab03d021c4bc2d6c0c2e182bf8e424e04850794 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Nov 2021 08:40:50 -0800 Subject: [PATCH 24/55] [DOCS] Edits reset transforms API (#81027) --- docs/reference/rest-api/common-parms.asciidoc | 4 ++-- .../transform/apis/reset-transform.asciidoc | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index c0fe9c9d244f8..8122ad6bd7032 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1108,8 +1108,8 @@ end::timeoutparms[] tag::transform-id[] Identifier for the {transform}. This identifier can contain lowercase -alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start -and end with alphanumeric characters. +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 +character limit and must start and end with alphanumeric characters. end::transform-id[] tag::transform-id-wildcard[] diff --git a/docs/reference/transform/apis/reset-transform.asciidoc b/docs/reference/transform/apis/reset-transform.asciidoc index 26be42bd0b371..e4d142970e828 100644 --- a/docs/reference/transform/apis/reset-transform.asciidoc +++ b/docs/reference/transform/apis/reset-transform.asciidoc @@ -8,7 +8,7 @@ Reset {transform} ++++ -Resets an existing {transform}. +Resets a {transform}. [[reset-transform-request]] == {api-request-title} @@ -20,7 +20,14 @@ Resets an existing {transform}. * Requires the `manage_transform` cluster privilege. This privilege is included in the `transform_admin` built-in role. -* Before you can reset the {transform}, you must stop it; alternatively, use the `force` query parameter. + +[reset-transform-desc]] +== {api-description-title} + +Before you can reset the {transform}, you must stop it; alternatively, use the +`force` query parameter. + +If the destination index was created by the transform, it is deleted. [[reset-transform-path-parms]] == {api-path-parms-title} @@ -33,9 +40,10 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-id] == {api-query-parms-title} `force`:: -(Optional, Boolean) When `true`, the {transform} is reset regardless of its -current state. The default value is `false`, meaning that the {transform} must be -`stopped` before it can be reset. +(Optional, Boolean) +If this value is `true`, the {transform} is reset regardless of its current +state. If it's false, the {transform} must be `stopped` before it can be reset. +The default value is `false` [[reset-transform-examples]] == {api-examples-title} From 8da1236bca03f38b694e4e83a97fb91aca68b1dd Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Nov 2021 09:08:46 -0800 Subject: [PATCH 25/55] [DOCS] Clarify impact of force stop trained model deployment (#81026) --- .../df-analytics/apis/stop-trained-model-deployment.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc index a486ee37bb239..c3a17da0c5322 100644 --- a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc @@ -43,7 +43,8 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-deployments] `force`:: (Optional, Boolean) If true, the deployment is stopped even if it is referenced -by ingest pipelines. +by ingest pipelines. You can't use these pipelines until you restart the model +deployment. //// [role="child_attributes"] From e5de9d8ad7693691739ef1b3fc43ff9943afb737 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Nov 2021 10:06:52 -0800 Subject: [PATCH 26/55] [DOCS] Add actual and typical values in ML alerting docs (#80571) --- .../ml/anomaly-detection/ml-configuring-alerts.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index 6dd13006f4601..3844d5fcd7aed 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -228,6 +228,9 @@ The list of top records. .Properties of `context.topRecords` [%collapsible%open] ==== +`actual`::: +The actual value for the bucket. + `by_field_value`::: The value of the by field. @@ -248,6 +251,9 @@ The field used to segment the analysis. `score`::: A normalized score between 0-100, which is based on the probability of the anomalousness of this record. + +`typical`::: +The typical value for the bucket, according to analytical modeling. ==== [[anomaly-jobs-health-action-variables]] From d51678562e0b79a87f2c18bfae207385997cf311 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 25 Nov 2021 19:48:12 +0000 Subject: [PATCH 27/55] [ML] Expected snapshot min version can now be 8 (#81054) Followup to elastic/ml-cpp#2139 Fixes #81055 Fixes #81070 --- build.gradle | 4 ++-- .../elasticsearch/common/compress/CompressedXContent.java | 4 ++-- .../elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/build.gradle b/build.gradle index 9d5ab644667cc..59fc13e9e1ac8 100644 --- a/build.gradle +++ b/build.gradle @@ -131,9 +131,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = false +boolean bwc_tests_enabled = true // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/80348" +String bwc_tests_disabled_issue = "" /* * FIPS 140-2 behavior was fixed in 7.11.0. Before that there is no way to run elasticsearch in a * JVM that is properly configured to be in fips mode with BCFIPS. For now we need to disable diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index d43c78792938a..fa1a4421651ca 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -169,7 +169,7 @@ public String getSha256() { public static CompressedXContent readCompressedString(StreamInput in) throws IOException { final String sha256; final byte[] compressedData; - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { sha256 = in.readString(); compressedData = in.readByteArray(); } else { @@ -181,7 +181,7 @@ public static CompressedXContent readCompressedString(StreamInput in) throws IOE } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { out.writeString(sha256); } else { int crc32 = crc32FromCompressed(bytes); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java index 70210a8ffae8f..b65411d82cb10 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java @@ -153,8 +153,8 @@ private void testSnapshotUpgrade() throws Exception { GetModelSnapshotsResponse modelSnapshots = getModelSnapshots(job.getId()); assertThat(modelSnapshots.snapshots(), hasSize(2)); - assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo((byte) 7)); - assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo((byte) 7)); + assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); + assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); ModelSnapshot snapshot = modelSnapshots.snapshots() .stream() @@ -237,8 +237,8 @@ private void createJobAndSnapshots() throws Exception { GetModelSnapshotsResponse modelSnapshots = getModelSnapshots(job.getId()); assertThat(modelSnapshots.snapshots(), hasSize(2)); - assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo((byte) 7)); - assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo((byte) 7)); + assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); + assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); } private PutJobResponse buildAndPutJob(String jobId, TimeValue bucketSpan) throws Exception { From d29da0270d080e39c8d02df7c831a5e0fe8b377c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 25 Nov 2021 21:35:35 +0100 Subject: [PATCH 28/55] Make Circuit Breaker Lookup in BigArrays Faster (#81033) I noticed that when benchmarking translog writes (which currently involve copying a lot of bytes across 2 pooled buffers backed by `BigArrays`), a non-trivial amount of time is spent on looking up the circuit breaker. This PR makes that lookup faster in general by using a more efficient map, but also just caches the breaker instance on `BigArrays` itself to not have to do the lookup every 16k during a multi MB write in the first place. --- .../java/org/elasticsearch/common/util/BigArrays.java | 11 +++++++++-- .../breaker/HierarchyCircuitBreakerService.java | 3 +-- .../breaker/HierarchyCircuitBreakerServiceTests.java | 6 +++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index e4739879073e1..9b8b20d4cae55 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -391,7 +391,10 @@ public T set(long index, T value) { } final PageCacheRecycler recycler; + @Nullable private final CircuitBreakerService breakerService; + @Nullable + private final CircuitBreaker breaker; private final boolean checkBreaker; private final BigArrays circuitBreakingInstance; private final String breakerName; @@ -410,6 +413,11 @@ protected BigArrays( this.checkBreaker = checkBreaker; this.recycler = recycler; this.breakerService = breakerService; + if (breakerService != null) { + breaker = breakerService.getBreaker(breakerName); + } else { + breaker = null; + } this.breakerName = breakerName; if (checkBreaker) { this.circuitBreakingInstance = this; @@ -427,8 +435,7 @@ protected BigArrays( * we do not add the delta to the breaker if it trips. */ void adjustBreaker(final long delta, final boolean isDataAlreadyCreated) { - if (this.breakerService != null) { - CircuitBreaker breaker = this.breakerService.getBreaker(breakerName); + if (this.breaker != null) { if (this.checkBreaker) { // checking breaker means potentially tripping, but it doesn't // have to if the delta is negative diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 28c5bf3a9a985..8df5e9e8834a1 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -31,7 +31,6 @@ import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -204,7 +203,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c } childCircuitBreakers.put(breakerSettings.getName(), validateAndCreateBreaker(breakerSettings)); } - this.breakers = Collections.unmodifiableMap(childCircuitBreakers); + this.breakers = Map.copyOf(childCircuitBreakers); this.parentSettings = new BreakerSettings( CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 66b7c47b8eedd..487a0e3a34720 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -243,7 +243,7 @@ public void testBorrowingSiblingBreakerMemory() { assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); assertThat( exception.getMessage(), - containsString("usages [request=157286400/150mb, fielddata=54001664/51.5mb, inflight_requests=0/0b]") + containsString("usages [fielddata=54001664/51.5mb, request=157286400/150mb, inflight_requests=0/0b]") ); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); } @@ -305,11 +305,11 @@ long currentMemoryUsage() { assertThat( exception.getMessage(), containsString( - "usages [request=" + "usages [fielddata=0/0b, request=" + requestCircuitBreakerUsed + "/" + new ByteSizeValue(requestCircuitBreakerUsed) - + ", fielddata=0/0b, inflight_requests=0/0b]" + + ", inflight_requests=0/0b]" ) ); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); From 7ce8054f1f0212acaceaa0b9712b5ed049c46fec Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 26 Nov 2021 06:41:24 +0100 Subject: [PATCH 29/55] Fix Failures in HierarchyCircuitBreakerServiceTests (#81073) In #81033 the type of map that is iterated over to compute the various usage values was changed. This changed the order of the various breakers in the exception string and made it less deterministic (although it was always just deterministic by accident in these tests). => we shouldn't assume any order in the assertions --- .../HierarchyCircuitBreakerServiceTests.java | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 487a0e3a34720..819ded625d1b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -241,10 +241,10 @@ public void testBorrowingSiblingBreakerMemory() { ); assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be")); assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); - assertThat( - exception.getMessage(), - containsString("usages [fielddata=54001664/51.5mb, request=157286400/150mb, inflight_requests=0/0b]") - ); + assertThat(exception.getMessage(), containsString("usages [")); + assertThat(exception.getMessage(), containsString("fielddata=54001664/51.5mb")); + assertThat(exception.getMessage(), containsString("inflight_requests=0/0b")); + assertThat(exception.getMessage(), containsString("request=157286400/150mb")); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); } } @@ -302,16 +302,13 @@ long currentMemoryUsage() { ) ); final long requestCircuitBreakerUsed = (requestBreaker.getUsed() + reservationInBytes) * 2; + assertThat(exception.getMessage(), containsString("usages [")); + assertThat(exception.getMessage(), containsString("fielddata=0/0b")); assertThat( exception.getMessage(), - containsString( - "usages [fielddata=0/0b, request=" - + requestCircuitBreakerUsed - + "/" - + new ByteSizeValue(requestCircuitBreakerUsed) - + ", inflight_requests=0/0b]" - ) + containsString("request=" + requestCircuitBreakerUsed + "/" + new ByteSizeValue(requestCircuitBreakerUsed)) ); + assertThat(exception.getMessage(), containsString("inflight_requests=0/0b")); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); assertEquals(0, requestBreaker.getTrippedCount()); assertEquals(1, service.stats().getStats(CircuitBreaker.PARENT).getTrippedCount()); From f3b5299a2077c24ac98fd478fd3bcdc561acd13b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 29 Nov 2021 09:38:58 +0100 Subject: [PATCH 30/55] Move TransportGetLicenseAction to SAME Threadpool (#80993) This is motivated by a number of recent SDHs that had these transport actions queue up on the manangement pool. These were not the reason for the blockage on the managment queue, but they are often sent at a high rate by Beats in the same scenarios that see a high rate of stats requests from Beats. Moving them off of the management pool at least makes sure that we don't get Beats retrying them over and over on slowness and generally saves some resources by avoiding ctx switches and having these requests live for longer than necessary. There's no point in running this on the management pool. It should have already been fast enough for SAME with the exception of reading the public key from disk maybe. Made it so the public key is just a constant and doesn't have to be read+deserialized over and over and also cached the verified property for a `License` instance so it should never have to be computed in practice anyway. --- .../tools/LicenseVerificationTool.java | 3 ++- .../licensor/LicenseVerificationTests.java | 11 +++++++--- .../org/elasticsearch/license/License.java | 18 ++++++++++++++++ .../elasticsearch/license/LicenseService.java | 14 ++++++------- .../license/LicenseVerifier.java | 21 ++++++++++++------- .../license/LicensesMetadata.java | 2 +- .../license/TransportDeleteLicenseAction.java | 1 - .../license/TransportGetLicenseAction.java | 8 ++----- .../license/LicensesManagerServiceTests.java | 3 +-- 9 files changed, 51 insertions(+), 30 deletions(-) diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java index 87aefeefd6aae..1059b100fc396 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.license.CryptUtils; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseVerifier; import org.elasticsearch.xcontent.ToXContent; @@ -70,7 +71,7 @@ protected void execute(Terminal terminal, OptionSet options) throws Exception { } // verify - if (LicenseVerifier.verifyLicense(licenseSpec, Files.readAllBytes(publicKeyPath)) == false) { + if (LicenseVerifier.verifyLicense(licenseSpec, CryptUtils.readPublicKey(Files.readAllBytes(publicKeyPath))) == false) { throw new UserException(ExitCodes.DATA_ERROR, "Invalid License!"); } XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java index bed6e471c1c53..57d0a74ebc0b9 100644 --- a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.license.licensor; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.license.CryptUtils; import org.elasticsearch.license.DateUtils; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseVerifier; @@ -16,28 +17,32 @@ import java.nio.file.Files; import java.nio.file.Path; +import java.security.PublicKey; public class LicenseVerificationTests extends ESTestCase { protected Path pubKeyPath = null; + protected PublicKey publicKey; protected Path priKeyPath = null; @Before public void setup() throws Exception { pubKeyPath = getDataPath("/public.key"); + publicKey = CryptUtils.readPublicKey(Files.readAllBytes(pubKeyPath)); priKeyPath = getDataPath("/private.key"); } @After public void cleanUp() { pubKeyPath = null; + publicKey = null; priKeyPath = null; } public void testGeneratedLicenses() throws Exception { final TimeValue fortyEightHours = TimeValue.timeValueHours(2 * 24); final License license = TestUtils.generateSignedLicense(fortyEightHours, pubKeyPath, priKeyPath); - assertTrue(LicenseVerifier.verifyLicense(license, Files.readAllBytes(pubKeyPath))); + assertTrue(LicenseVerifier.verifyLicense(license, publicKey)); } public void testLicenseTampering() throws Exception { @@ -50,7 +55,7 @@ public void testLicenseTampering() throws Exception { .validate() .build(); - assertFalse(LicenseVerifier.verifyLicense(tamperedLicense, Files.readAllBytes(pubKeyPath))); + assertFalse(LicenseVerifier.verifyLicense(tamperedLicense, publicKey)); } public void testRandomLicenseVerification() throws Exception { @@ -58,7 +63,7 @@ public void testRandomLicenseVerification() throws Exception { randomIntBetween(License.VERSION_START, License.VERSION_CURRENT) ); License generatedLicense = generateSignedLicense(licenseSpec, pubKeyPath, priKeyPath); - assertTrue(LicenseVerifier.verifyLicense(generatedLicense, Files.readAllBytes(pubKeyPath))); + assertTrue(LicenseVerifier.verifyLicense(generatedLicense, publicKey)); } private static License generateSignedLicense(TestUtils.LicenseSpec spec, Path pubKeyPath, Path priKeyPath) throws Exception { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index b62afdd39c818..6026512c712dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -471,6 +471,24 @@ private static void validateLimits(String type, int maxNodes, int maxResourceUni } } + private Boolean isVerified; + + public boolean verified() { + final Boolean v = isVerified; + if (v != null) { + return v; + } + final boolean verified = doVerify(); + this.isVerified = verified; + return verified; + } + + private boolean doVerify() { + boolean autoGeneratedLicense = License.isAutoGeneratedLicense(signature()); + return (autoGeneratedLicense && SelfGeneratedLicense.verify(this)) + || (autoGeneratedLicense == false && LicenseVerifier.verifyLicense(this)); + } + public static License readLicense(StreamInput in) throws IOException { int version = in.readVInt(); // Version for future extensibility if (version > VERSION_CURRENT) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 343520657ebab..d294370979f2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -23,11 +23,11 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.protocol.xpack.license.LicensesStatus; import org.elasticsearch.protocol.xpack.license.PutLicenseResponse; import org.elasticsearch.threadpool.ThreadPool; @@ -110,7 +110,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste * Currently active license */ private final AtomicReference currentLicense = new AtomicReference<>(); - private SchedulerEngine scheduler; + private final SchedulerEngine scheduler; private final Clock clock; /** @@ -121,7 +121,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste /** * Callbacks to notify relative to license expiry */ - private List expirationCallbacks = new ArrayList<>(); + private final List expirationCallbacks = new ArrayList<>(); /** * Which license types are permitted to be uploaded to the cluster @@ -362,7 +362,7 @@ public void triggered(SchedulerEngine.Event event) { /** * Remove license from the cluster state metadata */ - public void removeLicense(final DeleteLicenseRequest request, final ActionListener listener) { + public void removeLicense(final ActionListener listener) { final PostStartBasicRequest startBasicRequest = new PostStartBasicRequest().acknowledge(true); clusterService.submitStateUpdateTask( "delete license", @@ -609,15 +609,13 @@ public static License getLicense(final Metadata metadata) { return getLicense(licensesMetadata); } - static License getLicense(final LicensesMetadata metadata) { + static License getLicense(@Nullable final LicensesMetadata metadata) { if (metadata != null) { License license = metadata.getLicense(); if (license == LicensesMetadata.LICENSE_TOMBSTONE) { return license; } else if (license != null) { - boolean autoGeneratedLicense = License.isAutoGeneratedLicense(license.signature()); - if ((autoGeneratedLicense && SelfGeneratedLicense.verify(license)) - || (autoGeneratedLicense == false && LicenseVerifier.verifyLicense(license))) { + if (license.verified()) { return license; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java index f31c7096bae68..0daf6811959ec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java @@ -21,6 +21,7 @@ import java.nio.ByteBuffer; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; +import java.security.PublicKey; import java.security.Signature; import java.security.SignatureException; import java.util.Arrays; @@ -38,7 +39,7 @@ public class LicenseVerifier { * @param license to verify * @return true if valid, false otherwise */ - public static boolean verifyLicense(final License license, byte[] publicKeyData) { + public static boolean verifyLicense(final License license, PublicKey publicKey) { byte[] signedContent = null; byte[] publicKeyFingerprint = null; try { @@ -58,7 +59,7 @@ public static boolean verifyLicense(final License license, byte[] publicKeyData) XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); license.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); Signature rsa = Signature.getInstance("SHA512withRSA"); - rsa.initVerify(CryptUtils.readPublicKey(publicKeyData)); + rsa.initVerify(publicKey); BytesRefIterator iterator = BytesReference.bytes(contentBuilder).iterator(); BytesRef ref; while ((ref = iterator.next()) != null) { @@ -74,15 +75,19 @@ public static boolean verifyLicense(final License license, byte[] publicKeyData) } } - public static boolean verifyLicense(final License license) { - final byte[] publicKeyBytes; + private static final PublicKey PUBLIC_KEY; + + static { try (InputStream is = LicenseVerifier.class.getResourceAsStream("/public.key")) { ByteArrayOutputStream out = new ByteArrayOutputStream(); Streams.copy(is, out); - publicKeyBytes = out.toByteArray(); - } catch (IOException ex) { - throw new IllegalStateException(ex); + PUBLIC_KEY = CryptUtils.readPublicKey(out.toByteArray()); + } catch (IOException e) { + throw new AssertionError("key file is part of the source and must deserialize correctly", e); } - return verifyLicense(license, publicKeyBytes); + } + + public static boolean verifyLicense(final License license) { + return verifyLicense(license, PUBLIC_KEY); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java index 10d11553ddfd7..0ff68b6b562f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java @@ -52,7 +52,7 @@ public class LicensesMetadata extends AbstractNamedDiffable .expiryDate(0) .build(); - private License license; + private final License license; // This field describes the version of x-pack for which this cluster has exercised a trial. If the field // is null, then no trial has been exercised. We keep the version to leave open the possibility that we diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java index ec1c075f888d9..f715592fc5f6c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java @@ -61,7 +61,6 @@ protected void masterOperation( final ActionListener listener ) throws ElasticsearchException { licenseService.removeLicense( - request, listener.delegateFailure( (l, postStartBasicResponse) -> l.onResponse(AcknowledgedResponse.of(postStartBasicResponse.isAcknowledged())) ) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java index 75ecf85968283..988b77595e3f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java @@ -23,13 +23,10 @@ public class TransportGetLicenseAction extends TransportMasterNodeReadAction { - private final LicenseService licenseService; - @Inject public TransportGetLicenseAction( TransportService transportService, ClusterService clusterService, - LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver @@ -43,9 +40,8 @@ public TransportGetLicenseAction( GetLicenseRequest::new, indexNameExpressionResolver, GetLicenseResponse::new, - ThreadPool.Names.MANAGEMENT + ThreadPool.Names.SAME ); - this.licenseService = licenseService; } @Override @@ -60,6 +56,6 @@ protected void masterOperation( ClusterState state, final ActionListener listener ) throws ElasticsearchException { - listener.onResponse(new GetLicenseResponse(licenseService.getLicense())); + listener.onResponse(new GetLicenseResponse(LicenseService.getLicense(state.metadata()))); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java index 7b6162211189d..4595694d99f1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.protocol.xpack.license.LicensesStatus; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; @@ -125,7 +124,7 @@ public void testRemoveLicenses() throws Exception { private void removeAndAckSignedLicenses(final LicenseService licenseService) { final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean success = new AtomicBoolean(false); - licenseService.removeLicense(new DeleteLicenseRequest(), new ActionListener() { + licenseService.removeLicense(new ActionListener() { @Override public void onResponse(PostStartBasicResponse postStartBasicResponse) { if (postStartBasicResponse.isAcknowledged()) { From 5cb5d921b68f41b0c4a559f2360dcb78f666ac61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Mon, 29 Nov 2021 11:12:14 +0100 Subject: [PATCH 31/55] Fix race condition in SnapshotBasedIndexRecoveryIT (#79404) If we don't cancel the re-location of the index to the same target node, it is possible that the recovery is retried, meaning that it's possible that the available permit is granted to indexRecoveredFromSnapshot1 instead of to indexRecoveredFromSnapshot2. Relates #79316 Closes #79420 --- .../recovery/PeerRecoveryTargetService.java | 7 +++++- .../SnapshotBasedIndexRecoveryIT.java | 23 ++++++++++++++++--- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 632e495e15a71..968555e0628b3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -169,7 +169,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { - final Releasable snapshotFileDownloadsPermit = recoverySettings.tryAcquireSnapshotDownloadPermits(); + final Releasable snapshotFileDownloadsPermit = tryAcquireSnapshotDownloadPermits(); // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.startRecovery( indexShard, @@ -258,6 +258,11 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); } + // Visible for testing + public Releasable tryAcquireSnapshotDownloadPermits() { + return recoverySettings.tryAcquireSnapshotDownloadPermits(); + } + /** * Prepare the start recovery request. * diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index a76cc2c6018c7..40bc86fbf77b9 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Releasable; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.MergePolicyConfig; @@ -85,6 +86,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE; @@ -914,7 +916,6 @@ public void testRecoveryUsingSnapshotsIsThrottledPerNode() throws Exception { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79420") public void testRecoveryUsingSnapshotsPermitIsReturnedAfterFailureOrCancellation() throws Exception { executeRecoveryWithSnapshotFileDownloadThrottled( ( @@ -930,7 +931,12 @@ public void testRecoveryUsingSnapshotsPermitIsReturnedAfterFailureOrCancellation client().admin() .indices() .prepareUpdateSettings(indexRecoveredFromSnapshot1) - .setSettings(Settings.builder().put("index.routing.allocation.require._name", targetNode)) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.require._name", (String) null) + .put("index.routing.allocation.include._name", sourceNode + "," + targetNode) + ) .get() ); @@ -963,6 +969,16 @@ public void testRecoveryUsingSnapshotsPermitIsReturnedAfterFailureOrCancellation targetMockTransportService.clearAllRules(); channelRef.get().sendResponse(new IOException("unable to clean files")); + PeerRecoveryTargetService peerRecoveryTargetService = internalCluster().getInstance( + PeerRecoveryTargetService.class, + targetNode + ); + assertBusy(() -> { + // Wait until the current RecoveryTarget releases the snapshot download permit + try (Releasable snapshotDownloadPermit = peerRecoveryTargetService.tryAcquireSnapshotDownloadPermits()) { + assertThat(snapshotDownloadPermit, is(notNullValue())); + } + }); } String indexRecoveredFromSnapshot2 = indices.get(1); @@ -1140,10 +1156,11 @@ private void executeRecoveryWithSnapshotFileDownloadThrottled(SnapshotBasedRecov indexName, Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") .put("index.routing.allocation.require._name", dataNodes.get(0)) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), 0) .build() ); indices.add(indexName); From c0b4b6080d721991ad7c5d666536b80561ad84bf Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Mon, 29 Nov 2021 12:44:28 +0200 Subject: [PATCH 32/55] TSDB: Add `_tsid` field to time_series indices (#80276) This PR adds support for a field named _tsid that uniquely identifies the time series a document belongs to. When a document is indexed in a time series index (IndexMode.TIME_SERIES), _tsid field is generated from the values of all dimension fields. --- .../upgrades/FullClusterRestartIT.java | 4 +- .../elasticsearch/upgrades/IndexingIT.java | 18 +- ...dimension_and_metric_in_non_tsdb_index.yml | 36 ++ .../rest-api-spec/test/tsdb/10_settings.yml | 2 + .../rest-api-spec/test/tsdb/20_mapping.yml | 5 +- .../rest-api-spec/test/tsdb/30_snapshot.yml | 9 +- .../rest-api-spec/test/tsdb/40_search.yml | 82 ++- .../rest-api-spec/test/tsdb/50_alias.yml | 54 +- .../test/tsdb/60_add_dimensions.yml | 40 +- .../test/tsdb/70_dimension_types.yml | 84 ++- .../test/tsdb/80_index_resize.yml | 22 +- .../org/elasticsearch/index/IndexMode.java | 21 + .../elasticsearch/index/IndexSortConfig.java | 18 +- .../index/mapper/IpFieldMapper.java | 21 +- .../index/mapper/KeywordFieldMapper.java | 34 +- .../index/mapper/LuceneDocument.java | 31 + .../index/mapper/MapperService.java | 9 +- .../index/mapper/NumberFieldMapper.java | 27 +- .../index/mapper/TimeSeriesIdFieldMapper.java | 226 +++++++ .../elasticsearch/indices/IndicesModule.java | 2 + .../elasticsearch/search/DocValueFormat.java | 29 + .../elasticsearch/search/SearchModule.java | 1 + .../bucket/terms/StringTerms.java | 3 + .../index/IndexSortSettingsTests.java | 88 ++- .../mapper/FieldFilterMapperPluginTests.java | 14 +- .../index/mapper/KeywordFieldMapperTests.java | 2 +- .../index/mapper/MappingParserTests.java | 9 +- .../mapper/TimeSeriesIdFieldMapperTests.java | 573 ++++++++++++++++++ .../indices/IndicesModuleTests.java | 2 + .../index/mapper/MapperServiceTestCase.java | 4 + 30 files changed, 1308 insertions(+), 162 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 745276d3c4145..4f7082e86781c 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -225,7 +225,7 @@ public void testNewReplicas() throws Exception { } public void testSearchTimeSeriesMode() throws Exception { - assumeTrue("time series mode introduced in 8.0.0", getOldClusterVersion().onOrAfter(Version.V_8_0_0)); + assumeTrue("time series index sort by _tsid introduced in 8.1.0", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); int numDocs; if (isRunningAgainstOldCluster()) { numDocs = createTimeSeriesModeIndex(1); @@ -267,7 +267,7 @@ public void testSearchTimeSeriesMode() throws Exception { } public void testNewReplicasTimeSeriesMode() throws Exception { - assumeTrue("time series mode introduced in 8.0.0", getOldClusterVersion().onOrAfter(Version.V_8_0_0)); + assumeTrue("time series index sort by _tsid introduced in 8.1.0", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); if (isRunningAgainstOldCluster()) { createTimeSeriesModeIndex(0); } else { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 21367aba17978..52bbd2b41bf9b 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -258,7 +258,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio } public void testTsdb() throws IOException { - assumeTrue("tsdb added in 8.0.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_0_0)); + assumeTrue("sort by _tsid added in 8.1.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_1_0)); StringBuilder bulk = new StringBuilder(); switch (CLUSTER_TYPE) { @@ -343,20 +343,9 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { Request request = new Request("POST", "/tsdb/_search"); request.addParameter("size", "0"); XContentBuilder body = JsonXContent.contentBuilder().startObject(); - // TODO replace tsid runtime field with real tsid - body.startObject("runtime_mappings"); - { - body.startObject("tsid"); - { - body.field("type", "keyword"); - body.field("script", "emit('dim:' + doc['dim'].value)"); - } - body.endObject(); - } - body.endObject(); body.startObject("aggs").startObject("tsids"); { - body.startObject("terms").field("field", "tsid").endObject(); + body.startObject("terms").field("field", "_tsid").endObject(); body.startObject("aggs").startObject("avg"); { body.startObject("avg").field("field", "value").endObject(); @@ -367,8 +356,7 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { request.setJsonEntity(Strings.toString(body.endObject())); ListMatcher tsidsExpected = matchesList(); for (int d = 0; d < expected.length; d++) { - // Object key = Map.of("dim", TSDB_DIMS.get(d)); TODO use this once tsid is real - Object key = "dim:" + TSDB_DIMS.get(d); + Object key = Map.of("dim", TSDB_DIMS.get(d)); tsidsExpected = tsidsExpected.item(matchesMap().extraOk().entry("key", key).entry("avg", Map.of("value", expected[d]))); } assertMap( diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml index ec67748212a5c..7b1b848240d1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml @@ -178,3 +178,39 @@ can't shadow metrics: runtime_mappings: deep.deeper.deepest: type: keyword + +--- +# Test that _tsid field is not added if an index is not a time-series index +no _tsid in standard indices: + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + + - do: + field_caps: + index: test + fields: [metricset, _tsid] + + - match: {fields.metricset.keyword.searchable: true} + - match: {fields.metricset.keyword.aggregatable: true} + - match: {fields.metricset.keyword.time_series_dimension: true} + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + - is_false: fields._tsid # _tsid metadata field must not exist in non-time-series indices diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml index 709f633e74820..eed1ccb7247cf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -186,6 +186,8 @@ set start_time and end_time: end_time: 1632625792000 mappings: properties: + "@timestamp": + type: date metricset: type: keyword time_series_dimension: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index c7b8b97b32ff4..1d11cde944d45 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -204,6 +204,9 @@ runtime field matching routing path: properties: "@timestamp": type: date + dim_kw: + type: "keyword" + time_series_dimension: true dim: type: object dynamic: runtime @@ -214,7 +217,7 @@ runtime field matching routing path: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": {"foo": "a"}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim_kw": "dim", "dim": {"foo": "a"}}' - match: {items.0.index.error.reason: "All fields that match routing_path must be keywords with [time_series_dimension: true] and without the [script] parameter. [dim.foo] was a runtime [keyword]."} --- diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml index e606e4dd82ca2..cb4c0ce663536 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml @@ -17,8 +17,8 @@ teardown: --- "Create a snapshot and then restore it": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 features: ["allowed_warnings"] # Create index @@ -134,10 +134,13 @@ teardown: search: index: test_index body: + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - match: {hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507} - # TODO assert the _tsid once we generate it + - match: {hits.hits.0.fields._tsid: [ { k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod } ] } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml index 223d87ab96a09..ca8b32fb0c89f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml @@ -90,7 +90,20 @@ query a metric: - match: {hits.total.value: 1} -# TODO add test showing that quering _tsid fails +--- +"query tsid fails": + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + catch: /\[_tsid\] is not searchable/ + search: + index: test + body: + query: + term: + _tsid: wont't work --- fetch a dimension: @@ -151,7 +164,24 @@ fetch a tag: - match: {hits.hits.0.fields.k8s\.pod\.ip: ['10.10.55.2']} - is_false: hits.hits.0.fields._tsid # tsid isn't fetched by default -# TODO add test to fetch the tsid +--- +"fetch the tsid": + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + search: + index: test + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- aggregate a dimension: @@ -229,23 +259,44 @@ aggregate a tag: - match: {aggregations.ips.buckets.2.key: 10.10.55.3} - match: {aggregations.ips.buckets.2.doc_count: 4} +--- +"aggregate the tsid": + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc -# TODO add a test aggregating the _tsid + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} --- field capabilities: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: field_caps: index: test - fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, _tsid] + fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, metricset, _tsid] - # TODO assert time_series_metric and time_series_dimension - - match: {fields.k8s\.pod\.uid.keyword.searchable: true} - - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} + - match: {fields.k8s\.pod\.uid.keyword.searchable: true} + - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} + - match: {fields.k8s\.pod\.uid.keyword.time_series_dimension: true} - is_false: fields.k8s\.pod\.uid.keyword.indices - is_false: fields.k8s\.pod\.uid.keyword.non_searchable_indices - is_false: fields.k8s\.pod\.uid.keyword.non_aggregatable_indices @@ -259,4 +310,15 @@ field capabilities: - is_false: fields.k8s\.pod\.ip.ip.indices - is_false: fields.k8s\.pod\.ip.ip.non_searchable_indices - is_false: fields.k8s\.pod\.ip.ip.non_aggregatable_indices - # TODO assert tsid once we build it: + - match: {fields.metricset.keyword.searchable: true} + - match: {fields.metricset.keyword.aggregatable: true} + - match: {fields.metricset.keyword.time_series_dimension: true} + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + - match: {fields._tsid._tsid.metadata_field: true} + - match: {fields._tsid._tsid.searchable: false} + - match: {fields._tsid._tsid.aggregatable: true} + - is_false: fields._tsid._tsid.indices + - is_false: fields._tsid._tsid.non_searchable_indices + - is_false: fields._tsid._tsid.non_aggregatable_indices diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml index 5a187ce0b6430..f404213bb5113 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml @@ -57,13 +57,40 @@ setup: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' -# TODO search on _tsid in an alias +--- +search an alias: + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + indices.put_alias: + index: test + name: test_alias + + - do: + search: + index: test_alias + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} --- index into alias: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.put_alias: @@ -85,4 +112,23 @@ index into alias: - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434595272, "rx": 530605511}}}}' - match: {errors: false} - # TODO search on tsid once we generate it + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 12} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - match: {aggregations.tsids.buckets.2.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.2.doc_count: 4} + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml index ca4aa52e15a13..b17b1303b4245 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml @@ -1,8 +1,8 @@ --- add dimensions with put_mapping: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -41,18 +41,18 @@ add dimensions with put_mapping: index: test body: fields: - # TODO fetch the tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO Fetch the tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to no dims with dynamic_template over index: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -87,17 +87,17 @@ add dimensions to no dims with dynamic_template over index: index: test body: fields: - # TODO fetch the tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch the tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to no dims with dynamic_template over bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -132,17 +132,17 @@ add dimensions to no dims with dynamic_template over bulk: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to some dims with dynamic_template over index: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -181,17 +181,17 @@ add dimensions to some dims with dynamic_template over index: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat, other_dim: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to some dims with dynamic_template over bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -229,8 +229,8 @@ add dimensions to some dims with dynamic_template over bulk: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat, other_dim: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml index 06eb087567238..aed895a97980c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml @@ -1,8 +1,8 @@ keyword dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -43,14 +43,36 @@ keyword dimension: - '{"@timestamp": "2021-04-28T18:35:54.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.3}' - is_false: errors - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {uid: 947e4ced-1786-4e53-9e0c-5c447e959507}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} --- long dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -93,14 +115,36 @@ long dimension: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:54.467Z", "metricset": "aa", "id": 2, "voltage": 3.3}' - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {id: 1, metricset: aa}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {id: 2, metricset: aa }} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} --- ip dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -143,4 +187,26 @@ ip dimension: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:54.467Z", "metricset": "aa", "ip": "2001:0db8:85a3::8a2e:0370:7334", "voltage": 3.3}' - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: { ip: "10.10.1.1", metricset: aa}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: { ip: "2001:db8:85a3::8a2e:370:7334", metricset: aa }} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml index 8dd33551912a4..2584fdf4dd2db 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml @@ -100,19 +100,19 @@ split: index: test_split body: fields: - # TODO fetch tsid + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- shrink: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.shrink: @@ -126,19 +126,20 @@ shrink: search: index: test_shrink body: - # TODO test fetching tsid + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- clone: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.clone: @@ -149,10 +150,11 @@ clone: search: index: test_clone body: - # TODO test fetching tsid + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 0107a1ad817d8..ec0e689dfb238 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -17,7 +17,9 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import java.io.IOException; import java.util.Collections; @@ -67,6 +69,13 @@ public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup ma public Map getDefaultMapping() { return Collections.emptyMap(); } + + @Override + public MetadataFieldMapper buildTimeSeriesIdFieldMapper() { + // non time-series indices must not have a TimeSeriesIdFieldMapper + return null; + } + }, TIME_SERIES { @Override @@ -124,6 +133,11 @@ private String routingRequiredBad() { private String tsdbMode() { return "[" + IndexSettings.MODE.getKey() + "=time_series]"; } + + @Override + public MetadataFieldMapper buildTimeSeriesIdFieldMapper() { + return TimeSeriesIdFieldMapper.INSTANCE; + } }; public static final Map DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING = Map.of( @@ -177,4 +191,11 @@ private String tsdbMode() { * @return */ public abstract Map getDefaultMapping(); + + /** + * Return an instance of the {@link TimeSeriesIdFieldMapper} that generates + * the _tsid field. The field mapper will be added to the list of the metadata + * field mappers for the index. + */ + public abstract MetadataFieldMapper buildTimeSeriesIdFieldMapper(); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index a9591dd76279f..a95b63321171f 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -18,7 +18,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.sort.SortOrder; @@ -127,11 +129,21 @@ private static MultiValueMode parseMultiValueMode(String value) { final FieldSortSpec[] sortSpecs; private final Version indexCreatedVersion; private final String indexName; + private final IndexMode indexMode; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); this.indexCreatedVersion = indexSettings.getIndexVersionCreated(); this.indexName = indexSettings.getIndex().getName(); + this.indexMode = indexSettings.getMode(); + + if (this.indexMode == IndexMode.TIME_SERIES) { + this.sortSpecs = new FieldSortSpec[] { + new FieldSortSpec(TimeSeriesIdFieldMapper.NAME), + new FieldSortSpec(DataStreamTimestampFieldMapper.DEFAULT_PATH) }; + return; + } + List fields = INDEX_SORT_FIELD_SETTING.get(settings); this.sortSpecs = fields.stream().map((name) -> new FieldSortSpec(name)).toArray(FieldSortSpec[]::new); @@ -198,7 +210,11 @@ public Sort buildIndexSort( FieldSortSpec sortSpec = sortSpecs[i]; final MappedFieldType ft = fieldTypeLookup.apply(sortSpec.field); if (ft == null) { - throw new IllegalArgumentException("unknown index sort field:[" + sortSpec.field + "]"); + String err = "unknown index sort field:[" + sortSpec.field + "]"; + if (this.indexMode == IndexMode.TIME_SERIES) { + err += " required by [" + IndexSettings.MODE.getKey() + "=time_series]"; + } + throw new IllegalArgumentException(err); } if (Objects.equals(ft.name(), sortSpec.field) == false) { if (this.indexCreatedVersion.onOrAfter(Version.V_7_13_0)) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 1b30adbae695d..b1497c8e988dc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -19,9 +19,11 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -497,18 +499,17 @@ private static InetAddress value(XContentParser parser, InetAddress nullValue) t } private void indexValue(DocumentParserContext context, InetAddress address) { + if (dimension) { + // Encode the tsid part of the dimension field if the _tsid field is enabled. + // If the _tsid field is not enabled, we can skip the encoding part. + BytesReference bytes = context.getMetadataMapper(TimeSeriesIdFieldMapper.NAME) != null + ? TimeSeriesIdFieldMapper.encodeTsidValue(NetworkAddress.format(address)) + : null; + context.doc().addDimensionBytes(fieldType().name(), bytes); + } if (indexed) { Field field = new InetAddressPoint(fieldType().name(), address); - if (dimension) { - // Add dimension field with key so that we ensure it is single-valued. - // Dimension fields are always indexed. - if (context.doc().getByKey(fieldType().name()) != null) { - throw new IllegalArgumentException("Dimension field [" + fieldType().name() + "] cannot be a multi-valued field."); - } - context.doc().addWithKey(fieldType().name(), field); - } else { - context.doc().add(field); - } + context.doc().add(field); } if (hasDocValues) { context.doc().add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(InetAddressPoint.encode(address)))); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index e946e4fc800f4..dd5c412dfabe5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -29,6 +29,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton.AUTOMATON_TYPE; import org.apache.lucene.util.automaton.MinimizationOperations; import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.AutomatonQueries; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -69,6 +70,8 @@ public static class Defaults { FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); FIELD_TYPE.freeze(); } + + public static final int IGNORE_ABOVE = Integer.MAX_VALUE; } public static class KeywordField extends Field { @@ -102,7 +105,7 @@ public static class Builder extends FieldMapper.Builder { "ignore_above", true, m -> toType(m).ignoreAbove, - Integer.MAX_VALUE + Defaults.IGNORE_ABOVE ); private final Parameter indexOptions = Parameter.restrictedStringParam( @@ -503,9 +506,6 @@ public void validateMatchedRoutingPath() { } } - /** The maximum keyword length allowed for a dimension field */ - private static final int DIMENSION_MAX_BYTES = 1024; - private final boolean indexed; private final boolean hasDocValues; private final String nullValue; @@ -587,7 +587,6 @@ protected void indexScriptValues( } private void indexValue(DocumentParserContext context, String value) { - if (value == null) { return; } @@ -598,27 +597,20 @@ private void indexValue(DocumentParserContext context, String value) { } value = normalizeValue(fieldType().normalizer(), name(), value); + if (dimension) { + // Encode the tsid part of the dimension field. Although, it would seem reasonable + // to skip the encode part if we don't generate a _tsid field (as we do with number + // and ip fields), we keep this test because we must ensure that the value of this + // dimension field is not larger than TimeSeriesIdFieldMapper.DIMENSION_VALUE_LIMIT + BytesReference bytes = TimeSeriesIdFieldMapper.encodeTsidValue(value); + context.doc().addDimensionBytes(fieldType().name(), bytes); + } // convert to utf8 only once before feeding postings/dv/stored fields final BytesRef binaryValue = new BytesRef(value); - if (dimension && binaryValue.length > DIMENSION_MAX_BYTES) { - throw new IllegalArgumentException( - "Dimension field [" + fieldType().name() + "] cannot be more than [" + DIMENSION_MAX_BYTES + "] bytes long." - ); - } if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { Field field = new KeywordField(fieldType().name(), binaryValue, fieldType); - if (dimension) { - // Check that a dimension field is single-valued and not an array - if (context.doc().getByKey(fieldType().name()) != null) { - throw new IllegalArgumentException("Dimension field [" + fieldType().name() + "] cannot be a multi-valued field."); - } - // Add dimension field with key so that we ensure it is single-valued. - // Dimension fields are always indexed. - context.doc().addWithKey(fieldType().name(), field); - } else { - context.doc().add(field); - } + context.doc().add(field); if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { context.addToFieldNames(fieldType().name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java index 22b5d8bfc8ffa..3cb2b030ebeff 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java @@ -10,12 +10,16 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; /** * Fork of {@link org.apache.lucene.document.Document} with additional functionality. @@ -27,6 +31,12 @@ public class LuceneDocument implements Iterable { private final String prefix; private final List fields; private Map keyedFields; + /** + * A sorted map of the serialized values of dimension fields that will be used + * for generating the _tsid field. The map will be used by {@link TimeSeriesIdFieldMapper} + * to build the _tsid field for the document. + */ + private SortedMap dimensionBytes; LuceneDocument(String path, LuceneDocument parent) { fields = new ArrayList<>(); @@ -99,6 +109,27 @@ public IndexableField getByKey(Object key) { return keyedFields == null ? null : keyedFields.get(key); } + /** + * Add the serialized byte reference for a dimension field. This will be used by {@link TimeSeriesIdFieldMapper} + * to build the _tsid field for the document. + */ + public void addDimensionBytes(String fieldName, BytesReference tsidBytes) { + if (dimensionBytes == null) { + // It is a {@link TreeMap} so that it is order by field name. + dimensionBytes = new TreeMap<>(); + } else if (dimensionBytes.containsKey(fieldName)) { + throw new IllegalArgumentException("Dimension field [" + fieldName + "] cannot be a multi-valued field."); + } + dimensionBytes.put(fieldName, tsidBytes); + } + + public SortedMap getDimensionBytes() { + if (dimensionBytes == null) { + return Collections.emptySortedMap(); + } + return dimensionBytes; + } + public IndexableField[] getFields(String name) { List f = new ArrayList<>(); for (IndexableField field : fields) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index e46193f9b237e..760ed2427cb44 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -206,9 +206,14 @@ Map, MetadataFieldMapper> getMetadataMapper if (existingMapper == null) { for (MetadataFieldMapper.TypeParser parser : metadataMapperParsers.values()) { MetadataFieldMapper metadataFieldMapper = parser.getDefault(parserContext()); - metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper); + // A MetadataFieldMapper may choose to not be added to the metadata mappers + // of an index (eg TimeSeriesIdFieldMapper is only added to time series indices) + // In this case its TypeParser will return null instead of the MetadataFieldMapper + // instance. + if (metadataFieldMapper != null) { + metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper); + } } - } else { metadataMappers.putAll(existingMapper.mapping().getMetadataMappersMap()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index a4bfe18814b7e..b8253c76ceac2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Numbers; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -1354,7 +1355,7 @@ protected String contentType() { protected void parseCreateField(DocumentParserContext context) throws IOException { Number value; try { - value = value(context.parser(), type, nullValue, coerce.value()); + value = value(context.parser(), type, nullValue, coerce()); } catch (InputCoercionException | IllegalArgumentException | JsonParseException e) { if (ignoreMalformed.value() && context.parser().currentToken().isValue()) { context.addIgnoredField(mappedFieldType.name()); @@ -1391,20 +1392,18 @@ private static Number value(XContentParser parser, NumberType numberType, Number } private void indexValue(DocumentParserContext context, Number numericValue) { - List fields = fieldType().type.createFields(fieldType().name(), numericValue, indexed, hasDocValues, stored); - if (dimension) { - // Check that a dimension field is single-valued and not an array - if (context.doc().getByKey(fieldType().name()) != null) { - throw new IllegalArgumentException("Dimension field [" + fieldType().name() + "] cannot be a multi-valued field."); - } - if (fields.size() > 0) { - // Add the first field by key so that we can validate if it has been added - context.doc().addWithKey(fieldType().name(), fields.get(0)); - context.doc().addAll(fields.subList(1, fields.size())); - } - } else { - context.doc().addAll(fields); + if (dimension && numericValue != null) { + // Dimension can only be one of byte, short, int, long. So, we encode the tsid + // part of the dimension field by using the long value. + // Also, there is no point in encoding the tsid value if we do not generate + // the _tsid field. + BytesReference bytes = context.getMetadataMapper(TimeSeriesIdFieldMapper.NAME) != null + ? TimeSeriesIdFieldMapper.encodeTsidValue(numericValue.longValue()) + : null; + context.doc().addDimensionBytes(fieldType().name(), bytes); } + List fields = fieldType().type.createFields(fieldType().name(), numericValue, indexed, hasDocValues, stored); + context.doc().addAll(fields); if (hasDocValues == false && (stored || indexed)) { context.addToFieldNames(fieldType().name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java new file mode 100644 index 0000000000000..876b43d3cffc0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.ByteBlockPool; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.function.Supplier; + +/** + * Mapper for {@code _tsid} field included generated when the index is + * {@link IndexMode#TIME_SERIES organized into time series}. + */ +public class TimeSeriesIdFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_tsid"; + public static final String CONTENT_TYPE = "_tsid"; + public static final TimeSeriesIdFieldType FIELD_TYPE = new TimeSeriesIdFieldType(); + public static final TimeSeriesIdFieldMapper INSTANCE = new TimeSeriesIdFieldMapper(); + + /** + * The maximum length of the tsid. The value itself comes from a range check in + * Lucene's writer for utf-8 doc values. + */ + private static final int LIMIT = ByteBlockPool.BYTE_BLOCK_SIZE - 2; + /** + * Maximum length of the name of dimension. We picked this so that we could + * comfortable fit 16 dimensions inside {@link #LIMIT}. + */ + private static final int DIMENSION_NAME_LIMIT = 512; + /** + * The maximum length of any single dimension. We picked this so that we could + * comfortable fit 16 dimensions inside {@link #LIMIT}. This should be quite + * comfortable given that dimensions are typically going to be less than a + * hundred bytes each, but we're being paranoid here. + */ + private static final int DIMENSION_VALUE_LIMIT = 1024; + + @Override + public FieldMapper.Builder getMergeBuilder() { + return new Builder().init(this); + } + + public static class Builder extends MetadataFieldMapper.Builder { + protected Builder() { + super(NAME); + } + + @Override + protected List> getParameters() { + return List.of(); + } + + @Override + public TimeSeriesIdFieldMapper build() { + return INSTANCE; + } + } + + public static final TypeParser PARSER = new FixedTypeParser(c -> c.getIndexSettings().getMode().buildTimeSeriesIdFieldMapper()); + + public static final class TimeSeriesIdFieldType extends MappedFieldType { + private TimeSeriesIdFieldType() { + super(NAME, false, false, true, TextSearchInfo.NONE, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new DocValueFetcher(docValueFormat(format, null), context.getForField(this)); + } + + @Override + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return DocValueFormat.TIME_SERIES_ID; + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + failIfNoDocValues(); + // TODO don't leak the TSID's binary format into the script + return new SortedSetOrdinalsIndexFieldData.Builder(name(), CoreValuesSourceType.KEYWORD); + } + + @Override + public Query termQuery(Object value, SearchExecutionContext context) { + throw new IllegalArgumentException("[" + NAME + "] is not searchable"); + } + } + + private TimeSeriesIdFieldMapper() { + super(FIELD_TYPE); + } + + @Override + public void postParse(DocumentParserContext context) throws IOException { + assert fieldType().isSearchable() == false; + + // SortedMap is expected to be sorted by key (field name) + SortedMap dimensionFields = context.doc().getDimensionBytes(); + if (dimensionFields.isEmpty()) { + throw new IllegalArgumentException("Dimension fields are missing."); + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(dimensionFields.size()); + for (Map.Entry entry : dimensionFields.entrySet()) { + String fieldName = entry.getKey(); + BytesRef fieldNameBytes = new BytesRef(fieldName); + int len = fieldNameBytes.length; + if (len > DIMENSION_NAME_LIMIT) { + throw new IllegalArgumentException( + "Dimension name must be less than [" + DIMENSION_NAME_LIMIT + "] bytes but [" + fieldName + "] was [" + len + "]." + ); + } + // Write field name in utf-8 instead of writeString's utf-16-ish thing + out.writeBytesRef(fieldNameBytes); + entry.getValue().writeTo(out); + } + + BytesReference timeSeriesId = out.bytes(); + if (timeSeriesId.length() > LIMIT) { + throw new IllegalArgumentException(NAME + " longer than [" + LIMIT + "] bytes [" + timeSeriesId.length() + "]."); + } + assert timeSeriesId != null : "In time series mode _tsid cannot be null"; + context.doc().add(new SortedSetDocValuesField(fieldType().name(), timeSeriesId.toBytesRef())); + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + /** + * Decode the {@code _tsid} into a human readable map. + */ + public static Map decodeTsid(StreamInput in) { + try { + int size = in.readVInt(); + Map result = new LinkedHashMap(size); + + for (int i = 0; i < size; i++) { + String name = in.readString(); + + int type = in.read(); + switch (type) { + case (byte) 's': + result.put(name, in.readBytesRef().utf8ToString()); + break; + case (byte) 'l': + result.put(name, in.readLong()); + break; + default: + throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + } + } + return result; + } catch (IOException | IllegalArgumentException e) { + throw new IllegalArgumentException("Error formatting " + NAME + ": " + e.getMessage(), e); + } + } + + static BytesReference encodeTsidValue(String value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 's'); + /* + * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish + * so its easier for folks to reason about the space taken up. Mostly + * it'll be smaller too. + */ + BytesRef bytes = new BytesRef(value); + if (bytes.length > DIMENSION_VALUE_LIMIT) { + throw new IllegalArgumentException( + "Dimension fields must be less than [" + DIMENSION_VALUE_LIMIT + "] bytes but was [" + bytes.length + "]." + ); + } + out.writeBytesRef(bytes); + return out.bytes(); + } catch (IOException e) { + throw new IllegalArgumentException("Dimension field cannot be serialized.", e); + } + } + + static BytesReference encodeTsidValue(long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'l'); + out.writeLong(value); + return out.bytes(); + } catch (IOException e) { + throw new IllegalArgumentException("Dimension field cannot be serialized.", e); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index d4393b2a5e563..917f261c219ea 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; @@ -194,6 +195,7 @@ private static Map initBuiltInMetadataMa // (so will benefit from "fields: []" early termination builtInMetadataMappers.put(IdFieldMapper.NAME, IdFieldMapper.PARSER); builtInMetadataMappers.put(RoutingFieldMapper.NAME, RoutingFieldMapper.PARSER); + builtInMetadataMappers.put(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.PARSER); builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 0afb6c48752d0..dfad44a37e3d7 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,6 +21,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import java.io.IOException; @@ -668,4 +670,31 @@ public double parseDouble(String value, boolean roundUp, LongSupplier now) { return Double.parseDouble(value); } }; + + DocValueFormat TIME_SERIES_ID = new TimeSeriesIdDocValueFormat(); + + /** + * DocValues format for time series id. + */ + class TimeSeriesIdDocValueFormat implements DocValueFormat { + private TimeSeriesIdDocValueFormat() {} + + @Override + public String getWriteableName() { + return "tsid"; + } + + @Override + public void writeTo(StreamOutput out) {} + + @Override + public String toString() { + return "tsid"; + } + + @Override + public Object format(BytesRef value) { + return TimeSeriesIdFieldMapper.decodeTsid(new BytesArray(value).streamInput()); + } + }; } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index f7858e536edf9..22e5214785ba5 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -972,6 +972,7 @@ private void registerValueFormats() { registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW); registerValueFormat(DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY); registerValueFormat(DocValueFormat.UNSIGNED_LONG_SHIFTED.getWriteableName(), in -> DocValueFormat.UNSIGNED_LONG_SHIFTED); + registerValueFormat(DocValueFormat.TIME_SERIES_ID.getWriteableName(), in -> DocValueFormat.TIME_SERIES_ID); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index b50aa2f7dc596..fe27738fe7589 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -85,6 +85,9 @@ public int compareKey(Bucket other) { @Override protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + if (format == DocValueFormat.TIME_SERIES_ID) { + return builder.field(CommonFields.KEY.getPreferredName(), format.format(termBytes)); + } return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString()); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 9cd0087b511f9..3bd46a0f1ac07 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -9,13 +9,17 @@ package org.elasticsearch.index; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataService; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -26,14 +30,18 @@ import org.elasticsearch.test.ESTestCase; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.function.Supplier; import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class IndexSortSettingsTests extends ESTestCase { + private static IndexSettings indexSettings(Settings settings) { return new IndexSettings(newIndexMeta("test", settings), Settings.EMPTY); } @@ -115,13 +123,8 @@ public void testInvalidMissing() { assertThat(exc.getMessage(), containsString("Illegal missing value:[default]," + " must be one of [_last, _first]")); } - public void testIndexSorting() { + public void testIndexSortingNoDocValues() { IndexSettings indexSettings = indexSettings(Settings.builder().put("index.sort.field", "field").build()); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); MappedFieldType fieldType = new MappedFieldType("field", false, false, false, TextSearchInfo.NONE, Collections.emptyMap()) { @Override public String typeName() { @@ -144,13 +147,7 @@ public Query termQuery(Object value, SearchExecutionContext context) { throw new UnsupportedOperationException(); } }; - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> config.buildIndexSort( - field -> fieldType, - (ft, searchLookupSupplier) -> indexFieldDataService.getForField(ft, "index", searchLookupSupplier) - ) - ); + Exception iae = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, fieldType)); assertEquals("docvalues not found for index sort field:[field]", iae.getMessage()); assertThat(iae.getCause(), instanceOf(UnsupportedOperationException.class)); assertEquals("index sorting not supported on runtime field [field]", iae.getCause().getMessage()); @@ -158,16 +155,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { public void testSortingAgainstAliases() { IndexSettings indexSettings = indexSettings(Settings.builder().put("index.sort.field", "field").build()); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); - MappedFieldType mft = new KeywordFieldMapper.KeywordFieldType("aliased"); - Exception e = expectThrows( - IllegalArgumentException.class, - () -> config.buildIndexSort(field -> mft, (ft, s) -> indexFieldDataService.getForField(ft, "index", s)) - ); + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Exception e = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, Map.of("field", aliased))); assertEquals("Cannot use alias [field] as an index sort field", e.getMessage()); } @@ -175,17 +164,54 @@ public void testSortingAgainstAliasesPre713() { IndexSettings indexSettings = indexSettings( Settings.builder().put("index.version.created", Version.V_7_12_0).put("index.sort.field", "field").build() ); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); - MappedFieldType mft = new KeywordFieldMapper.KeywordFieldType("aliased"); - config.buildIndexSort(field -> mft, (ft, s) -> indexFieldDataService.getForField(ft, "index", s)); - + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Sort sort = buildIndexSort(indexSettings, Map.of("field", aliased)); + assertThat(sort.getSort(), arrayWithSize(1)); + assertThat(sort.getSort()[0].getField(), equalTo("aliased")); assertWarnings( "Index sort for index [test] defined on field [field] which resolves to field [aliased]. " + "You will not be able to define an index sort over aliased fields in new indexes" ); } + + public void testTimeSeriesMode() { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "some_dimension") + .build() + ); + Sort sort = buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE, new DateFieldMapper.DateFieldType("@timestamp")); + assertThat(sort.getSort(), arrayWithSize(2)); + assertThat(sort.getSort()[0].getField(), equalTo("_tsid")); + assertThat(sort.getSort()[1].getField(), equalTo("@timestamp")); + } + + public void testTimeSeriesModeNoTimestamp() { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "some_dimension") + .build() + ); + Exception e = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE)); + assertThat(e.getMessage(), equalTo("unknown index sort field:[@timestamp] required by [index.mode=time_series]")); + } + + private Sort buildIndexSort(IndexSettings indexSettings, MappedFieldType... mfts) { + Map lookup = new HashMap<>(mfts.length); + for (MappedFieldType mft : mfts) { + assertNull(lookup.put(mft.name(), mft)); + } + return buildIndexSort(indexSettings, lookup); + } + + private Sort buildIndexSort(IndexSettings indexSettings, Map lookup) { + IndexSortConfig config = indexSettings.getIndexSortConfig(); + assertTrue(config.hasIndexSort()); + IndicesFieldDataCache cache = new IndicesFieldDataCache(indexSettings.getSettings(), null); + NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); + return config.buildIndexSort(lookup::get, (ft, s) -> indexFieldDataService.getForField(ft, "index", s)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java index e05732c4042a6..4ff57354123bd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java @@ -29,6 +29,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -113,8 +114,7 @@ public void testFieldCapabilities() { private static void assertFieldCaps(FieldCapabilitiesResponse fieldCapabilitiesResponse, Collection expectedFields) { Map> responseMap = new HashMap<>(fieldCapabilitiesResponse.get()); - Set builtInMetadataFields = IndicesModule.getBuiltInMetadataFields(); - for (String field : builtInMetadataFields) { + for (String field : builtInMetadataFields()) { Map remove = responseMap.remove(field); assertNotNull(" expected field [" + field + "] not found", remove); } @@ -125,13 +125,19 @@ private static void assertFieldCaps(FieldCapabilitiesResponse fieldCapabilitiesR assertEquals("Some unexpected fields were returned: " + responseMap.keySet(), 0, responseMap.size()); } + private static Set builtInMetadataFields() { + Set builtInMetadataFields = new HashSet<>(IndicesModule.getBuiltInMetadataFields()); + // Index is not a time-series index, and it will not contain a _tsid field + builtInMetadataFields.remove(TimeSeriesIdFieldMapper.NAME); + return builtInMetadataFields; + } + private static void assertFieldMappings( Map actual, Collection expectedFields ) { - Set builtInMetadataFields = IndicesModule.getBuiltInMetadataFields(); Map fields = new HashMap<>(actual); - for (String field : builtInMetadataFields) { + for (String field : builtInMetadataFields()) { GetFieldMappingsResponse.FieldMappingMetadata fieldMappingMetadata = fields.remove(field); assertNotNull(" expected field [" + field + "] not found", fieldMappingMetadata); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 63e73c36b3499..63b2ceb0c7925 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -387,7 +387,7 @@ public void testDimensionExtraLongKeyword() throws IOException { MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", randomAlphaOfLengthBetween(1025, 2048)))) ); - assertThat(e.getCause().getMessage(), containsString("Dimension field [field] cannot be more than [1024] bytes long.")); + assertThat(e.getCause().getMessage(), containsString("Dimension fields must be less than [1024] bytes but was")); } public void testConfigureSimilarity() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 1cad1c7d50ae8..32caede9f2189 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -50,10 +50,11 @@ private static MappingParser createMappingParser(Settings settings) { indexSettings.getIndexVersionCreated() ); Map, MetadataFieldMapper> metadataMappers = new LinkedHashMap<>(); - metadataMapperParsers.values() - .stream() - .map(parser -> parser.getDefault(parserContextSupplier.get())) - .forEach(m -> metadataMappers.put(m.getClass(), m)); + metadataMapperParsers.values().stream().map(parser -> parser.getDefault(parserContextSupplier.get())).forEach(m -> { + if (m != null) { + metadataMappers.put(m.getClass(), m); + } + }); return new MappingParser( parserContextSupplier, metadataMapperParsers, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java new file mode 100644 index 0000000000000..81acf8e6c74ef --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -0,0 +1,573 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; +import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class TimeSeriesIdFieldMapperTests extends MetadataMapperTestCase { + + @Override + protected String fieldName() { + return TimeSeriesIdFieldMapper.NAME; + } + + @Override + protected void registerParameters(ParameterChecker checker) throws IOException { + // There aren't any parameters + } + + private DocumentMapper createDocumentMapper(String routingPath, XContentBuilder mappings) throws IOException { + return createMapperService( + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name()) + .put(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING.getKey(), 200) // Increase dimension limit + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPath) + .build(), + mappings + ).documentMapper(); + } + + private ParsedDocument parseDocument(DocumentMapper docMapper, CheckedFunction f) + throws IOException { + // Add the @timestamp field required by DataStreamTimestampFieldMapper for all time series indices + return docMapper.parse(source(b -> f.apply(b).field("@timestamp", "2021-10-01"))); + } + + public void testEnabledInTimeSeriesMode() throws Exception { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + ParsedDocument doc = parseDocument(docMapper, b -> b.field("a", "value").field("b", 100).field("c", 500)); + assertThat( + doc.rootDoc().getBinaryValue("_tsid"), + equalTo(new BytesRef("\u0002\u0001as\u0005value\u0001bl\u0000\u0000\u0000\u0000\u0000\u0000\u0000d")) + ); + assertThat(doc.rootDoc().getField("a").binaryValue(), equalTo(new BytesRef("value"))); + assertThat(doc.rootDoc().getField("b").numericValue(), equalTo(100L)); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", "value").entry("b", 100L) + ); + } + + public void testDisabledInStandardMode() throws Exception { + DocumentMapper docMapper = createMapperService( + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.name()).build(), + mapping(b -> {}) + ).documentMapper(); + assertThat(docMapper.metadataMapper(TimeSeriesIdFieldMapper.class), is(nullValue())); + + ParsedDocument doc = docMapper.parse(source(b -> b.field("field", "value"))); + assertThat(doc.rootDoc().getBinaryValue("_tsid"), is(nullValue())); + assertThat(doc.rootDoc().get("field"), equalTo("value")); + } + + public void testIncludeInDocumentNotAllowed() throws Exception { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("_tsid", "foo"))); + + assertThat(e.getCause().getMessage(), containsString("Field [_tsid] is a metadata field and cannot be added inside a document")); + } + + /** + * Test with non-randomized string for sanity checking. + */ + public void testStrings() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "keyword") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", "foo").field("b", "bar").field("c", "baz").startObject("o").field("e", "bort").endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", "foo").entry("o.e", "bort") + ); + } + + public void testKeywordTooLong() throws IOException { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + + Exception e = expectThrows( + MapperParsingException.class, + () -> parseDocument(docMapper, b -> b.field("a", "more_than_1024_bytes".repeat(52)).field("@timestamp", "2021-10-01")) + ); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1040].")); + } + + public void testKeywordTooLongUtf8() throws IOException { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + + String theWordLong = "長い"; + Exception e = expectThrows( + MapperParsingException.class, + () -> parseDocument(docMapper, b -> b.field("a", theWordLong.repeat(200)).field("@timestamp", "2021-10-01")) + ); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1200].")); + } + + public void testKeywordNull() throws IOException { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", (String) null))); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields are missing.")); + } + + /** + * Test with non-randomized longs for sanity checking. + */ + public void testLong() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "long").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "long") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", 1234).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", 1234L) + ); + } + + public void testLongInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "long").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_a_long"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [long] in document with id '1'. Preview of field's value: 'not_a_long'") + ); + } + + public void testLongNull() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "long").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", (Long) null))); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields are missing.")); + } + + /** + * Test with non-randomized integers for sanity checking. + */ + public void testInteger() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "integer") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", Integer.MIN_VALUE).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", (long) Integer.MIN_VALUE) + ); + } + + public void testIntegerInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_an_int"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [integer] in document with id '1'. Preview of field's value: 'not_an_int'") + ); + } + + public void testIntegerOutOfRange() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", Long.MAX_VALUE))); + assertThat( + e.getMessage(), + equalTo( + "failed to parse field [a] of type [integer] in document with id '1'. Preview of field's value: '" + Long.MAX_VALUE + "'" + ) + ); + } + + /** + * Test with non-randomized shorts for sanity checking. + */ + public void testShort() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "short").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "short") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", Short.MIN_VALUE).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", (long) Short.MIN_VALUE) + ); + } + + public void testShortInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "short").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_a_short"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [short] in document with id '1'. Preview of field's value: 'not_a_short'") + ); + } + + public void testShortOutOfRange() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "short").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", Long.MAX_VALUE))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [short] in document with id '1'. Preview of field's value: '" + Long.MAX_VALUE + "'") + ); + } + + /** + * Test with non-randomized shorts for sanity checking. + */ + public void testByte() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "byte").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "byte") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", (int) Byte.MIN_VALUE).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", (long) Byte.MIN_VALUE) + ); + } + + public void testByteInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "byte").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_a_byte"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [byte] in document with id '1'. Preview of field's value: 'not_a_byte'") + ); + } + + public void testByteOutOfRange() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "byte").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", Long.MAX_VALUE))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [byte] in document with id '1'. Preview of field's value: '" + Long.MAX_VALUE + "'") + ); + } + + /** + * Test with non-randomized ips for sanity checking. + */ + public void testIp() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "ip").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "ip") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", "192.168.0.1").field("b", -1).field("c", "baz").startObject("o").field("e", "255.255.255.1").endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", "192.168.0.1").entry("o.e", "255.255.255.1") + ); + } + + public void testIpInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "ip").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_an_ip"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [ip] in document with id '1'. Preview of field's value: 'not_an_ip'") + ); + } + + /** + * Tests when the total of the tsid is more than 32k. + */ + public void testVeryLarge() throws IOException { + // By default, only 16 dimension fields are allowed. To support 100 dimension fields + // we must increase 'index.mapping.dimension_fields.limit' + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + for (int i = 0; i < 100; i++) { + b.startObject("d" + i).field("type", "keyword").field("time_series_dimension", true).endObject(); + } + })); + + String large = "many words ".repeat(80); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> { + for (int i = 0; i < 100; i++) { + b.field("d" + i, large); + } + return b; + })); + assertThat(e.getCause().getMessage(), equalTo("_tsid longer than [32766] bytes [88691].")); + } + + /** + * Sending the same document twice produces the same value. + */ + public void testSameGenConsistentForSameDoc() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(0, 2); + CheckedFunction fields = d -> d.field("a", a).field("b", b).field("c", (long) c); + ParsedDocument doc1 = parseDocument(docMapper, fields); + ParsedDocument doc2 = parseDocument(docMapper, fields); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Non-dimension fields don't influence the value of _tsid. + */ + public void testExtraFieldsDoNotMatter() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(0, 2); + ParsedDocument doc1 = parseDocument( + docMapper, + d -> d.field("a", a).field("b", b).field("c", (long) c).field("e", between(10, 100)) + ); + ParsedDocument doc2 = parseDocument( + docMapper, + d -> d.field("a", a).field("b", b).field("c", (long) c).field("e", between(50, 200)) + ); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * The order that the dimensions appear in the document do not influence the value of _tsid. + */ + public void testOrderDoesNotMatter() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(0, 2); + ParsedDocument doc1 = parseDocument(docMapper, d -> d.field("a", a).field("b", b).field("c", (long) c)); + ParsedDocument doc2 = parseDocument(docMapper, d -> d.field("b", b).field("a", a).field("c", (long) c)); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Dimensions that appear in the mapping but not in the document don't influence the value of _tsid. + */ + public void testUnusedExtraDimensions() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + CheckedFunction fields = d -> d.field("a", a).field("b", b); + ParsedDocument doc1 = parseDocument(docMapper, fields); + ParsedDocument doc2 = parseDocument(docMapper, fields); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Different values for dimensions change the result. + */ + public void testDifferentValues() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + ParsedDocument doc1 = parseDocument(docMapper, d -> d.field("a", a).field("b", between(1, 100))); + ParsedDocument doc2 = parseDocument(docMapper, d -> d.field("a", a + 1).field("b", between(200, 300))); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, not(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Two documents with the same *values* but different dimension keys will generate + * different {@code _tsid}s. + */ + public void testDifferentDimensions() throws IOException { + // First doc mapper has dimension fields a and b + DocumentMapper docMapper1 = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + // Second doc mapper has dimension fields a and c + DocumentMapper docMapper2 = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(5, 500); + CheckedFunction fields = d -> d.field("a", a).field("b", b).field("c", c); + ParsedDocument doc1 = parseDocument(docMapper1, fields); + ParsedDocument doc2 = parseDocument(docMapper2, fields); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, not(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Documents with fewer dimensions have a different value. + */ + public void testFewerDimensions() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(5, 500); + ParsedDocument doc1 = parseDocument(docMapper, d -> d.field("a", a).field("b", b)); + ParsedDocument doc2 = parseDocument(docMapper, d -> d.field("a", a).field("b", b).field("c", c)); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, not(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + public void testEmpty() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, d -> d)); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields are missing.")); + } +} diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index aa92d21591a35..7848cf942eb9d 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; @@ -73,6 +74,7 @@ public Map getMetadataMappers() { IgnoredFieldMapper.NAME, IdFieldMapper.NAME, RoutingFieldMapper.NAME, + TimeSeriesIdFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, NestedPathFieldMapper.NAME, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index fe5fd25dc5927..5db980f7a2cc8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -100,6 +100,10 @@ protected Settings getIndexSettings() { return SETTINGS; } + protected final Settings.Builder getIndexSettingsBuilder() { + return Settings.builder().put(getIndexSettings()); + } + protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) { return createIndexAnalyzers(); } From 6d9aaf82409e0e7da7e393abf1a23a5c01b5e98f Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 29 Nov 2021 13:22:22 +0200 Subject: [PATCH 33/55] =?UTF-8?q?[ML]=20Improve=20error=20msg=20on=20start?= =?UTF-8?q?ing=20scrolling=20datafeed=20with=20no=20matchin=E2=80=A6=20(#8?= =?UTF-8?q?1069)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a scrolling datafeed has an index pattern that matches no indices, starting the datafeed fails with a message about the time field having no mappings. This commit impvoves this by informing the user on the actual cause of the error which is that no index matches the datafeed's indices. Relates #81013 --- .../scroll/ScrollDataExtractorFactory.java | 10 +++++++ .../extractor/DataExtractorFactoryTests.java | 28 +++++++++++++++++++ .../test/ml/start_stop_datafeed.yml | 21 +++++++++++++- 3 files changed, 58 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index dc117ba5ffe04..f343389afb978 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -79,6 +79,16 @@ public static void create( // Step 2. Contruct the factory and notify listener ActionListener fieldCapabilitiesHandler = ActionListener.wrap(fieldCapabilitiesResponse -> { + if (fieldCapabilitiesResponse.getIndices().length == 0) { + listener.onFailure( + ExceptionsHelper.badRequestException( + "datafeed [{}] cannot retrieve data because no index matches datafeed's indices {}", + datafeed.getId(), + datafeed.getIndices() + ) + ); + return; + } TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse); listener.onResponse( new ScrollDataExtractorFactory(client, datafeed, job, extractedFields, xContentRegistry, timingStatsReporter) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index 25abb69f70a37..e68121b8767e5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -81,6 +81,7 @@ public void setUpTests() { when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); fieldsCapabilities = mock(FieldCapabilitiesResponse.class); + when(fieldsCapabilities.getIndices()).thenReturn(new String[] { "test_index_1" }); givenAggregatableField("time", "date"); givenAggregatableField("field", "keyword"); @@ -100,6 +101,33 @@ public void setUpTests() { }).when(client).execute(same(GetRollupIndexCapsAction.INSTANCE), any(), any()); } + public void testCreateDataExtractorFactoryGivenDefaultScrollAndNoMatchingIndices() { + when(fieldsCapabilities.getIndices()).thenReturn(new String[0]); + + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedRunnerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig datafeedConfig = DatafeedRunnerTests.createDatafeedConfig("datafeed1", "foo").build(); + + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> fail("factory creation should have failed as there are no matching indices"), + e -> assertThat( + e.getMessage(), + equalTo("datafeed [datafeed1] cannot retrieve data because no index " + "matches datafeed's indices [myIndex]") + ) + ); + + DataExtractorFactory.create( + client, + datafeedConfig, + jobBuilder.build(new Date()), + xContentRegistry(), + timingStatsReporter, + listener + ); + } + public void testCreateDataExtractorFactoryGivenDefaultScroll() { DataDescription.Builder dataDescription = new DataDescription.Builder(); dataDescription.setTimeField("time"); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml index ed9de0e09b57c..72dc65220d240 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml @@ -177,7 +177,7 @@ setup: end: "2017-02-01T01:00:00Z" --- -"Test start given datafeed index does not exist": +"Test start datafeed given concrete index that does not exist": - do: ml.update_datafeed: datafeed_id: start-stop-datafeed-datafeed-1 @@ -195,6 +195,25 @@ setup: ml.start_datafeed: datafeed_id: "start-stop-datafeed-datafeed-1" +--- +"Test start datafeed given index pattern with no matching indices": + - do: + ml.update_datafeed: + datafeed_id: start-stop-datafeed-datafeed-1 + body: > + { + "indexes":["utopia*"] + } + + - do: + ml.open_job: + job_id: "start-stop-datafeed-job" + + - do: + catch: /datafeed \[start-stop-datafeed-datafeed-1] cannot retrieve data because no index matches datafeed's indices \[utopia\*\]/ + ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + --- "Test start given field without mappings": - do: From d9e73eb441a6a3b1c1a62a4a49b6f7ce8e7d9f4e Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 29 Nov 2021 11:45:26 +0000 Subject: [PATCH 34/55] [ML] Mute ml/inference_crud/Test force delete given model referenced by pipeline (#81093) Due to https://github.com/elastic/elasticsearch/issues/80703 --- .../resources/rest-api-spec/test/ml/inference_crud.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml index 8faf5de9df8d2..71a1ef09943e2 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml @@ -589,6 +589,10 @@ setup: --- "Test force delete given model referenced by pipeline": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + - do: ingest.put_pipeline: id: "pipeline-using-a-classification-model" From e54438004504d543673613fb6bce5ef90dca9fb1 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 29 Nov 2021 14:19:34 +0200 Subject: [PATCH 35/55] =?UTF-8?q?[ML]=20Allow=20datafeed=20start=20with=20?= =?UTF-8?q?remote=20indices=20despite=20local=20index=20pat=E2=80=A6=20(#8?= =?UTF-8?q?1074)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a datafeed is assigned we check that there are indices with fully assigned shards. However, in the scenario where the datafeed has a mix of local and remote index patterns, when the local index patterns do not match any index, results to failure to assign the datafeed. This should not be the behaviour. As the datafeed also has remote indices we should allow starting the datafeed. This commit fixes this by skipping the check that local index patterns produce matching indices when there are remote indices too. Closes #81013 --- .../ml/datafeed/DatafeedNodeSelector.java | 5 +++- .../datafeed/DatafeedNodeSelectorTests.java | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index e0785d3bda106..54bdabb64a3d8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -168,6 +168,7 @@ private AssignmentFailure checkAssignment() { @Nullable private AssignmentFailure verifyIndicesActive() { + boolean hasRemoteIndices = datafeedIndices.stream().anyMatch(RemoteClusterLicenseChecker::isRemoteIndex); String[] index = datafeedIndices.stream() // We cannot verify remote indices .filter(i -> RemoteClusterLicenseChecker.isRemoteIndex(i) == false) @@ -177,7 +178,9 @@ private AssignmentFailure verifyIndicesActive() { try { concreteIndices = resolver.concreteIndexNames(clusterState, indicesOptions, true, index); - if (concreteIndices.length == 0) { + + // If we have remote indices we cannot check those. We should not fail as they may contain data. + if (hasRemoteIndices == false && concreteIndices.length == 0) { return new AssignmentFailure( "cannot start datafeed [" + datafeedId diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index fe6bb9e34683c..c9898e50d997f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -390,6 +390,29 @@ public void testIndexPatternDoesntExist() { .checkDatafeedTaskCanBeCreated(); } + public void testLocalIndexPatternWithoutMatchingIndicesAndRemoteIndexPattern() { + Job job = createScheduledJob("job_id").build(new Date()); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Arrays.asList("missing-*", "remote:index-*")); + + PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); + tasks = tasksBuilder.build(); + + givenClusterState("foo", 1, 0); + + PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + clusterState, + resolver, + df.getId(), + df.getJobId(), + df.getIndices(), + SearchRequest.DEFAULT_INDICES_OPTIONS + ).selectNode(makeCandidateNodes("node_id", "other_node_id")); + assertEquals("node_id", result.getExecutorNode()); + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS) + .checkDatafeedTaskCanBeCreated(); + } + public void testRemoteIndex() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")); From 92b6b6f1b28cc1c98ff7cc15733b4ef8bd26805d Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Nov 2021 13:08:02 +0000 Subject: [PATCH 36/55] [ML] Make inference timeout test more reliable (#81094) --- .../xpack/ml/integration/PyTorchModelIT.java | 11 +++++++++-- .../ml/inference/deployment/DeploymentManager.java | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 573269671498b..776a94254aeb7 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -200,8 +200,15 @@ public void testEvaluateWithMinimalTimeout() throws IOException { putModelDefinition(modelId); putVocabulary(List.of("these", "are", "my", "words"), modelId); startDeployment(modelId); - ResponseException ex = expectThrows(ResponseException.class, () -> infer("my words", modelId, TimeValue.ZERO)); - assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(429)); + // There is a race between inference and timeout so that + // even with a zero timeout a valid inference response may + // be returned. + // The test asserts that if an error occurs it is a timeout error + try { + infer("my words", modelId, TimeValue.ZERO); + } catch (ResponseException ex) { + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(408)); + } stopDeployment(modelId); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 2d3d07d06c81d..27ecbfbe5959b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -297,7 +297,7 @@ void onTimeout() { if (notified.compareAndSet(false, true)) { processContext.getResultProcessor().ignoreResposeWithoutNotifying(String.valueOf(requestId)); listener.onFailure( - new ElasticsearchStatusException("timeout [{}] waiting for inference result", RestStatus.TOO_MANY_REQUESTS, timeout) + new ElasticsearchStatusException("timeout [{}] waiting for inference result", RestStatus.REQUEST_TIMEOUT, timeout) ); return; } From 31011184408a1c3799635c902307f10a10477191 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Nov 2021 14:03:22 +0000 Subject: [PATCH 37/55] [ML] Fix incorrect logging of unexpected model size error (#81089) --- .../pytorch/process/PyTorchStateStreamer.java | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java index a60c681ca2b02..562361bffff51 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java @@ -37,12 +37,16 @@ public class PyTorchStateStreamer { private static final Logger logger = LogManager.getLogger(PyTorchStateStreamer.class); + /** The size of the data written before the model definition */ + private static final int NUM_BYTES_IN_PRELUDE = 4; + private final OriginSettingClient client; private final ExecutorService executorService; private final NamedXContentRegistry xContentRegistry; private volatile boolean isCancelled; private volatile int modelSize = -1; - private final AtomicInteger bytesWritten = new AtomicInteger(); + // model bytes only, does not include the prelude + private final AtomicInteger modelBytesWritten = new AtomicInteger(); public PyTorchStateStreamer(Client client, ExecutorService executorService, NamedXContentRegistry xContentRegistry) { this.client = new OriginSettingClient(Objects.requireNonNull(client), ML_ORIGIN); @@ -59,7 +63,7 @@ public void cancel() { /** * First writes the size of the model so the native process can - * allocated memory then writes the chunks of binary state. + * allocate memory then writes the chunks of binary state. * * @param modelId The model to write * @param index The index to search for the model @@ -72,11 +76,11 @@ public void writeStateToStream(String modelId, String index, OutputStream restor restorer.setSearchSize(1); restorer.restoreModelDefinition(doc -> writeChunk(doc, restoreStream), success -> { logger.debug("model [{}] state restored in [{}] documents from index [{}]", modelId, restorer.getNumDocsWritten(), index); - if (bytesWritten.get() != modelSize) { + if (modelBytesWritten.get() != modelSize) { logger.error( "model [{}] restored state size [{}] does not equal the expected model size [{}]", modelId, - bytesWritten, + modelBytesWritten, modelSize ); } @@ -96,7 +100,7 @@ private boolean writeChunk(TrainedModelDefinitionDoc doc, OutputStream outputStr // The array backing the BytesReference may be bigger than what is // referred to so write only what is after the offset outputStream.write(doc.getBinaryData().array(), doc.getBinaryData().arrayOffset(), doc.getBinaryData().length()); - bytesWritten.addAndGet(doc.getBinaryData().length()); + modelBytesWritten.addAndGet(doc.getBinaryData().length()); return true; } @@ -139,12 +143,10 @@ private int writeModelSize(String modelId, Long modelSizeBytes, OutputStream out throw new IllegalStateException(message); } - final int NUM_BYTES = 4; - ByteBuffer lengthBuffer = ByteBuffer.allocate(NUM_BYTES); + ByteBuffer lengthBuffer = ByteBuffer.allocate(NUM_BYTES_IN_PRELUDE); lengthBuffer.putInt(modelSizeBytes.intValue()); outputStream.write(lengthBuffer.array()); - bytesWritten.addAndGet(NUM_BYTES); return modelSizeBytes.intValue(); } } From 7a04ec68aed6d02fcf205913607c8b3f0406d67f Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 29 Nov 2021 08:06:28 -0600 Subject: [PATCH 38/55] Extending the timeout waiting for snapshot to be ready (#81018) This commit extends the timeout in SnapshotLifecycleRestIT::testBasicTimeBasedRetention for waiting for a snapshot to be ready from 10 seconds to 60 seconds to avoid occasional failures. Closes #79549 --- .../org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index d207c383d652c..b0c05737c5d72 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -437,7 +437,7 @@ public void testBasicTimeBasedRetention() throws Exception { } catch (ResponseException e) { fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); } - }); + }, 60, TimeUnit.SECONDS); // Run retention every second ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(); From 1abbf4b387e74559fde2e20d634fccb38f20947e Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Nov 2021 14:33:42 +0000 Subject: [PATCH 39/55] [ML] Add logging for failing PyTorch test (#81044) For #80819 --- .../xpack/ml/integration/PyTorchModelIT.java | 12 +++++++----- .../ml/action/TransportGetDeploymentStatsAction.java | 12 +++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 776a94254aeb7..ab05d2a2b0527 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -241,20 +241,22 @@ public void testDeploymentStats() throws IOException { CheckedBiConsumer assertAtLeast = (modelId, state) -> { startDeployment(modelId, state.toString()); Response response = getTrainedModelStats(modelId); - List> stats = (List>) entityAsMap(response).get("trained_model_stats"); + var responseMap = entityAsMap(response); + List> stats = (List>) responseMap.get("trained_model_stats"); assertThat(stats, hasSize(1)); String statusState = (String) XContentMapValues.extractValue("deployment_stats.allocation_status.state", stats.get(0)); - assertThat(stats.toString(), statusState, is(not(nullValue()))); + assertThat(responseMap.toString(), statusState, is(not(nullValue()))); assertThat(AllocationStatus.State.fromString(statusState), greaterThanOrEqualTo(state)); Integer byteSize = (Integer) XContentMapValues.extractValue("deployment_stats.model_size_bytes", stats.get(0)); - assertThat(byteSize, is(not(nullValue()))); + assertThat(responseMap.toString(), byteSize, is(not(nullValue()))); assertThat(byteSize, equalTo((int) RAW_MODEL_SIZE)); Response humanResponse = client().performRequest(new Request("GET", "/_ml/trained_models/" + modelId + "/_stats?human")); - stats = (List>) entityAsMap(humanResponse).get("trained_model_stats"); + var humanResponseMap = entityAsMap(humanResponse); + stats = (List>) humanResponseMap.get("trained_model_stats"); assertThat(stats, hasSize(1)); String stringBytes = (String) XContentMapValues.extractValue("deployment_stats.model_size", stats.get(0)); - assertThat(stringBytes, is(not(nullValue()))); + assertThat("stats response: " + responseMap + " human stats response" + humanResponseMap, stringBytes, is(not(nullValue()))); assertThat(stringBytes, equalTo("1.5kb")); stopDeployment(model); }; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 6a995e78ed8b2..5490ded56d7ea 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.ml.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -54,6 +56,8 @@ public class TransportGetDeploymentStatsAction extends TransportTasksAction< GetDeploymentStatsAction.Response, AllocationStats> { + private static final Logger logger = LogManager.getLogger(TransportGetDeploymentStatsAction.class); + @Inject public TransportGetDeploymentStatsAction( TransportService transportService, @@ -129,9 +133,6 @@ protected void doExecute( } } - // check request has been satisfied - ExpandedIdsMatcher requiredIdsMatcher = new ExpandedIdsMatcher(tokenizedRequestIds, true); - requiredIdsMatcher.filterMatchedIds(matchedDeploymentIds); if (matchedDeploymentIds.isEmpty()) { listener.onResponse( new GetDeploymentStatsAction.Response(Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), 0L) @@ -154,8 +155,7 @@ protected void doExecute( .collect(Collectors.toList()); // Set the allocation state and reason if we have it for (AllocationStats stats : updatedResponse.getStats().results()) { - Optional modelAllocation = Optional.ofNullable(allocation.getModelAllocation(stats.getModelId())); - TrainedModelAllocation trainedModelAllocation = modelAllocation.orElse(null); + TrainedModelAllocation trainedModelAllocation = allocation.getModelAllocation(stats.getModelId()); if (trainedModelAllocation != null) { stats.setState(trainedModelAllocation.getAllocationState()).setReason(trainedModelAllocation.getReason().orElse(null)); if (trainedModelAllocation.getAllocationState().isAnyOf(AllocationState.STARTED, AllocationState.STARTING)) { @@ -274,6 +274,8 @@ static GetDeploymentStatsAction.Response addFailedRoutes( nodeStats.sort(Comparator.comparing(n -> n.getNode().getId())); + // debug logging added for https://github.com/elastic/elasticsearch/issues/80819 + logger.debug("[{}] deployment stats for non-started deployment", modelId); updatedAllocationStats.add(new AllocationStats(modelId, null, null, null, null, allocation.getStartTime(), nodeStats)); } } From 34ae3dd5b13e01878e07ecd494d0be2c92676a8f Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 29 Nov 2021 14:49:52 +0000 Subject: [PATCH 40/55] [ML] Fix acceptable model snapshot versions in ML deprecation checker (#81060) This is a followup to #81039. The same requirement to tolerate model snapshots back to 6.4.0 that applies to the job opening code also applies to the deprecation checker. Again, we tell the user that 7.0.0 is the model snapshot version we support, but we actually have to support versions going back to 6.4.0 because we didn't update the constant in the C++ in 7.0.0. Additionally, the wording of the ML deprecation messages is very slightly updated. The messages are different in the 7.16 branch, where they were updated by #79387. This wording is copied forward to master, but with the tiny change that "Snapshot" is changed to "Model snapshot" in one place. This should make it clearer for users that we're talking about ML model snapshots and not cluster snapshots (which are completely different things). Another reason to change the wording is that the UI is looking for the pattern /[Mm]odel snapshot/ to decide when to display the "Fix" button for upgrading ML model snapshots - see elastic/kibana#119745. --- .../xpack/core/ml/MachineLearningField.java | 8 ++++++++ .../xpack/deprecation/MlDeprecationIT.java | 2 +- .../deprecation/MlDeprecationChecker.java | 19 +++++++++++-------- .../ml/action/TransportOpenJobAction.java | 4 ++-- .../task/OpenJobPersistentTasksExecutor.java | 8 ++------ 5 files changed, 24 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java index 3cc7275aafff4..ba24e7eb5a5d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.ml; +import org.elasticsearch.Version; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.settings.Setting; @@ -44,6 +45,13 @@ public final class MachineLearningField { License.OperationMode.PLATINUM ); + // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible + // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. + public static final Version MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = Version.fromString("6.4.0"); + // We tell the user we support model snapshots newer than 7.0.0 as that's the major version + // boundary, even though behind the scenes we have to support back to 6.4.0. + public static final Version MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; + private MachineLearningField() {} public static String valuesToId(String... values) { diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java index b2162179363b4..3fc880adcf235 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java @@ -105,7 +105,7 @@ public void testMlDeprecationChecks() throws Exception { assertThat(response.getMlSettingsIssues(), hasSize(1)); assertThat( response.getMlSettingsIssues().get(0).getMessage(), - containsString("model snapshot [1] for job [deprecation_check_job] needs to be deleted or upgraded") + containsString("Delete model snapshot [1] or update it to 7.0.0 or greater") ); assertThat(response.getMlSettingsIssues().get(0).getMeta(), equalTo(Map.of("job_id", jobId, "snapshot_id", "1"))); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java index f04aa8c582367..36092a820844f 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.deprecation; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; @@ -27,6 +26,9 @@ import java.util.Map; import java.util.Optional; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; + public class MlDeprecationChecker implements DeprecationChecker { static Optional checkDataFeedQuery(DatafeedConfig datafeedConfig, NamedXContentRegistry xContentRegistry) { @@ -67,22 +69,23 @@ static Optional checkDataFeedAggregations(DatafeedConfig dataf } static Optional checkModelSnapshot(ModelSnapshot modelSnapshot) { - if (modelSnapshot.getMinVersion().before(Version.V_7_0_0)) { + if (modelSnapshot.getMinVersion().before(MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION)) { StringBuilder details = new StringBuilder( String.format( Locale.ROOT, - "model snapshot [%s] for job [%s] supports minimum version [%s] and needs to be at least [%s].", + // Important: the Kibana upgrade assistant expects this to match the pattern /[Mm]odel snapshot/ + // and if it doesn't then the expected "Fix" button won't appear for this deprecation. + "Model snapshot [%s] for job [%s] has an obsolete minimum version [%s].", modelSnapshot.getSnapshotId(), modelSnapshot.getJobId(), - modelSnapshot.getMinVersion(), - Version.V_7_0_0 + modelSnapshot.getMinVersion() ) ); if (modelSnapshot.getLatestRecordTimeStamp() != null) { details.append( String.format( Locale.ROOT, - " The model snapshot's latest record timestamp is [%s]", + " The model snapshot's latest record timestamp is [%s].", XContentElasticsearchExtension.DEFAULT_FORMATTER.format(modelSnapshot.getLatestRecordTimeStamp().toInstant()) ) ); @@ -92,9 +95,9 @@ static Optional checkModelSnapshot(ModelSnapshot modelSnapshot DeprecationIssue.Level.CRITICAL, String.format( Locale.ROOT, - "model snapshot [%s] for job [%s] needs to be deleted or upgraded", + "Delete model snapshot [%s] or update it to %s or greater.", modelSnapshot.getSnapshotId(), - modelSnapshot.getJobId() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION ), "https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html", details.toString(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 3cc30e813ccdd..4a58924651e35 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -54,8 +54,8 @@ import java.util.function.Predicate; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; -import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.checkAssignmentState; /* diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 6c8ea314dd153..b182ead4ff869 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -67,6 +67,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; import static org.elasticsearch.xpack.core.ml.MlTasks.PERSISTENT_TASK_MASTER_NODE_TIMEOUT; import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; @@ -74,12 +76,6 @@ public class OpenJobPersistentTasksExecutor extends AbstractJobPersistentTasksExecutor { private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); - // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible - // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. - public static final Version MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = Version.fromString("6.4.0"); - // We tell the user we support model snapshots newer than 7.0.0 as that's the major version - // boundary, even though behind the scenes we have to support back to 6.4.0. - public static final Version MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; // Resuming a job with a running datafeed from its current snapshot was added in 7.11 and // can only be done if the master node is on or after that version. From 3d0c9efb97041ce116233b7151eab5d1c153ad09 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 29 Nov 2021 17:13:28 +0200 Subject: [PATCH 41/55] [ML] Fix datafeed preview with remote indices (#81099) In #77109 a bug was fixed with regard to `date_nanos` time fields and the preview datafeed API. However, that fix introduces a new bug. As we are calling the field caps API to find out whether the time field is `date_nanos`, we are setting the datafeed indices on the request. This may result to erroneous behaviour on local indices and it certainly will result to an error if the datafeed's indices are remote. This commit fixes that problem by setting the datafeed's indices on the field caps request. --- .../TransportPreviewDatafeedAction.java | 11 +++++--- .../TransportPreviewDatafeedActionTests.java | 27 +++++++------------ 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index c7d40b14e51a9..70f6e18d0dc19 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -119,7 +119,7 @@ private void previewDatafeed(DatafeedConfig datafeedConfig, Job job, ActionListe new DatafeedTimingStatsReporter(new DatafeedTimingStats(datafeedConfig.getJobId()), (ts, refreshPolicy) -> {}), listener.delegateFailure((l, dataExtractorFactory) -> { isDateNanos( - previewDatafeedConfig.getHeaders(), + previewDatafeedConfig, job.getDataDescription().getTimeField(), listener.delegateFailure((l2, isDateNanos) -> { DataExtractor dataExtractor = dataExtractorFactory.newExtractor( @@ -151,13 +151,16 @@ static DatafeedConfig.Builder buildPreviewDatafeed(DatafeedConfig datafeed) { return previewDatafeed; } - private void isDateNanos(Map headers, String timeField, ActionListener listener) { + private void isDateNanos(DatafeedConfig datafeed, String timeField, ActionListener listener) { + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest(); + fieldCapabilitiesRequest.indices(datafeed.getIndices().toArray(new String[0])).indicesOptions(datafeed.getIndicesOptions()); + fieldCapabilitiesRequest.fields(timeField); executeWithHeadersAsync( - headers, + datafeed.getHeaders(), ML_ORIGIN, client, FieldCapabilitiesAction.INSTANCE, - new FieldCapabilitiesRequest().fields(timeField), + fieldCapabilitiesRequest, ActionListener.wrap(fieldCapsResponse -> { Map timeFieldCaps = fieldCapsResponse.getField(timeField); listener.onResponse(timeFieldCaps.keySet().contains(DateFieldMapper.DATE_NANOS_CONTENT_TYPE)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java index a28bdf214b43a..efc48fe5d279d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.junit.Before; -import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import java.io.ByteArrayInputStream; @@ -51,21 +50,15 @@ public void setUpTests() { dataExtractor = mock(DataExtractor.class); actionListener = mock(ActionListener.class); - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - PreviewDatafeedAction.Response response = (PreviewDatafeedAction.Response) invocationOnMock.getArguments()[0]; - capturedResponse = response.toString(); - return null; - } + doAnswer((Answer) invocationOnMock -> { + PreviewDatafeedAction.Response response = (PreviewDatafeedAction.Response) invocationOnMock.getArguments()[0]; + capturedResponse = response.toString(); + return null; }).when(actionListener).onResponse(any()); - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - capturedFailure = (Exception) invocationOnMock.getArguments()[0]; - return null; - } + doAnswer((Answer) invocationOnMock -> { + capturedFailure = (Exception) invocationOnMock.getArguments()[0]; + return null; }).when(actionListener).onFailure(any()); } @@ -95,7 +88,7 @@ public void testBuildPreviewDatafeed_GivenAggregations() { assertThat(previewDatafeed.getChunkingConfig(), equalTo(datafeed.build().getChunkingConfig())); } - public void testPreviewDatafed_GivenEmptyStream() throws IOException { + public void testPreviewDatafeed_GivenEmptyStream() throws IOException { when(dataExtractor.next()).thenReturn(Optional.empty()); TransportPreviewDatafeedAction.previewDatafeed(dataExtractor, actionListener); @@ -105,7 +98,7 @@ public void testPreviewDatafed_GivenEmptyStream() throws IOException { verify(dataExtractor).cancel(); } - public void testPreviewDatafed_GivenNonEmptyStream() throws IOException { + public void testPreviewDatafeed_GivenNonEmptyStream() throws IOException { String streamAsString = "{\"a\":1, \"b\":2} {\"c\":3, \"d\":4}\n{\"e\":5, \"f\":6}"; InputStream stream = new ByteArrayInputStream(streamAsString.getBytes(StandardCharsets.UTF_8)); when(dataExtractor.next()).thenReturn(Optional.of(stream)); @@ -117,7 +110,7 @@ public void testPreviewDatafed_GivenNonEmptyStream() throws IOException { verify(dataExtractor).cancel(); } - public void testPreviewDatafed_GivenFailure() throws IOException { + public void testPreviewDatafeed_GivenFailure() throws IOException { doThrow(new RuntimeException("failed")).when(dataExtractor).next(); TransportPreviewDatafeedAction.previewDatafeed(dataExtractor, actionListener); From 54e0370b3e29b010a0dc1e0029f9571f1bedff0f Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Nov 2021 15:41:33 +0000 Subject: [PATCH 42/55] Track histogram of transport handling times (#80581) Adds to the transport node stats a record of the distribution of the times for which a transport thread was handling a message, represented as a histogram. Closes #80428 --- docs/reference/cluster/nodes-stats.asciidoc | 48 ++++++++++ .../test/nodes.stats/60_transport_stats.yml | 45 +++++++++ .../common/network/HandlingTimeTracker.java | 65 +++++++++++++ .../common/network/NetworkService.java | 5 + .../http/AbstractHttpServerTransport.java | 5 +- .../transport/InboundHandler.java | 11 ++- .../transport/OutboundHandler.java | 17 +++- .../elasticsearch/transport/TcpTransport.java | 11 ++- .../transport/TransportStats.java | 91 ++++++++++++++++++- .../cluster/node/stats/NodeStatsTests.java | 14 ++- .../network/HandlingTimeTrackerTests.java | 83 +++++++++++++++++ .../transport/InboundHandlerTests.java | 7 +- .../transport/OutboundHandlerTests.java | 3 +- .../transport/TcpTransportTests.java | 4 +- .../transport/TestTransportChannels.java | 3 +- 15 files changed, 393 insertions(+), 19 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java create mode 100644 server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 253890cd2a175..a909335fd30ee 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -1899,6 +1899,54 @@ Size of TX packets sent by the node during internal cluster communication. (integer) Size, in bytes, of TX packets sent by the node during internal cluster communication. + +`inbound_handling_time_histogram`:: +(array) +The distribution of the time spent handling each inbound message on a transport +thread, represented as a histogram. ++ +.Properties of `inbound_handling_time_histogram` +[%collapsible] +======= +`ge_millis`:: +(integer) +The inclusive lower bound of the bucket in milliseconds. Omitted on the first +bucket since this bucket has no lower bound. + +`lt_millis`:: +(integer) +The exclusive upper bound of the bucket in milliseconds. Omitted on the last +bucket since this bucket has no upper bound. + +`count`:: +(integer) +The number of times a transport thread took a period of time within the bounds +of this bucket to handle an inbound message. +======= + +`outbound_handling_time_histogram`:: +(array) +The distribution of the time spent sending each outbound transport message on a +transport thread, represented as a histogram. ++ +.Properties of `outbound_handling_time_histogram` +[%collapsible] +======= +`ge_millis`:: +(integer) +The inclusive lower bound of the bucket in milliseconds. Omitted on the first +bucket since this bucket has no lower bound. + +`lt_millis`:: +(integer) +The exclusive upper bound of the bucket in milliseconds. Omitted on the last +bucket since this bucket has no upper bound. + +`count`:: +(integer) +The number of times a transport thread took a period of time within the bounds +of this bucket to send a transport message. +======= ====== [[cluster-nodes-stats-api-response-body-http]] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml index 4f4b97bbcd521..3c3b4e6dacdf5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml @@ -20,3 +20,48 @@ - gte: { nodes.$node_id.transport.tx_count: 0 } - gte: { nodes.$node_id.transport.rx_size_in_bytes: 0 } - gte: { nodes.$node_id.transport.tx_size_in_bytes: 0 } + +--- +"Transport handling time histogram": + - skip: + version: " - 8.0.99" + reason: "handling_time_histograms were added in 8.1" + features: [arbitrary_key] + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: + metric: [ transport ] + + - length: { nodes.$node_id.transport.inbound_handling_time_histogram: 18 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.0.count: 0 } + - is_false: nodes.$node_id.transport.inbound_handling_time_histogram.0.ge_millis + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.0.lt_millis: 1 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.1.count: 0 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.1.ge_millis: 1 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.1.lt_millis: 2 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.17.count: 0 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.17.ge_millis: 65536 } + - is_false: nodes.$node_id.transport.inbound_handling_time_histogram.17.lt_millis + + + - length: { nodes.$node_id.transport.outbound_handling_time_histogram: 18 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.0.count: 0 } + - is_false: nodes.$node_id.transport.outbound_handling_time_histogram.0.ge_millis + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.0.lt_millis: 1 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.1.count: 0 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.1.ge_millis: 1 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.1.lt_millis: 2 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.17.count: 0 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.17.ge_millis: 65536 } + - is_false: nodes.$node_id.transport.outbound_handling_time_histogram.17.lt_millis diff --git a/server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java b/server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java new file mode 100644 index 0000000000000..a2787cb2d5332 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.network; + +import java.util.concurrent.atomic.LongAdder; + +/** + * Tracks how long message handling takes on a transport thread as a histogram with fixed buckets. + */ +public class HandlingTimeTracker { + + public static int[] getBucketUpperBounds() { + int[] bounds = new int[17]; + for (int i = 0; i < bounds.length; i++) { + bounds[i] = 1 << i; + } + return bounds; + } + + private static int getBucket(long handlingTimeMillis) { + if (handlingTimeMillis <= 0) { + return 0; + } else if (LAST_BUCKET_LOWER_BOUND <= handlingTimeMillis) { + return BUCKET_COUNT - 1; + } else { + return Long.SIZE - Long.numberOfLeadingZeros(handlingTimeMillis); + } + } + + public static final int BUCKET_COUNT = getBucketUpperBounds().length + 1; + + private static final long LAST_BUCKET_LOWER_BOUND = getBucketUpperBounds()[BUCKET_COUNT - 2]; + + private final LongAdder[] buckets; + + public HandlingTimeTracker() { + buckets = new LongAdder[BUCKET_COUNT]; + for (int i = 0; i < BUCKET_COUNT; i++) { + buckets[i] = new LongAdder(); + } + } + + public void addHandlingTime(long handlingTimeMillis) { + buckets[getBucket(handlingTimeMillis)].increment(); + } + + /** + * @return An array of frequencies of handling times in buckets with upper bounds as returned by {@link #getBucketUpperBounds()}, plus + * an extra bucket for handling times longer than the longest upper bound. + */ + public long[] getHistogram() { + final long[] histogram = new long[BUCKET_COUNT]; + for (int i = 0; i < BUCKET_COUNT; i++) { + histogram[i] = buckets[i].longValue(); + } + return histogram; + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java index f816d9446ae9b..25c6aeea4e2db 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -90,11 +90,16 @@ public interface CustomNameResolver { } private final List customNameResolvers; + private final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker(); public NetworkService(List customNameResolvers) { this.customNameResolvers = Objects.requireNonNull(customNameResolvers, "customNameResolvers must be non null"); } + public HandlingTimeTracker getHandlingTimeTracker() { + return handlingTimeTracker; + } + /** * Resolves {@code bindHosts} to a list of internet addresses. The list will * not contain duplicate addresses. diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index 5dcffec12bc90..125feb2c9fc77 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -355,11 +355,12 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { httpClientStatsTracker.updateClientStats(httpRequest, httpChannel); - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.rawRelativeTimeInMillis(); try { handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); } finally { - final long took = threadPool.relativeTimeInMillis() - startTime; + final long took = threadPool.rawRelativeTimeInMillis() - startTime; + networkService.getHandlingTimeTracker().addHandlingTime(took); final long logThreshold = slowLogThresholdMs; if (logThreshold > 0 && took > logThreshold) { logger.warn( diff --git a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java index 78ec5eb377326..ad2e3a9e38a3c 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; @@ -40,6 +41,7 @@ public class InboundHandler { private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; private final Transport.ResponseHandlers responseHandlers; + private final HandlingTimeTracker handlingTimeTracker; private final Transport.RequestHandlers requestHandlers; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; @@ -53,7 +55,8 @@ public class InboundHandler { TransportHandshaker handshaker, TransportKeepAlive keepAlive, Transport.RequestHandlers requestHandlers, - Transport.ResponseHandlers responseHandlers + Transport.ResponseHandlers responseHandlers, + HandlingTimeTracker handlingTimeTracker ) { this.threadPool = threadPool; this.outboundHandler = outboundHandler; @@ -62,6 +65,7 @@ public class InboundHandler { this.keepAlive = keepAlive; this.requestHandlers = requestHandlers; this.responseHandlers = responseHandlers; + this.handlingTimeTracker = handlingTimeTracker; } void setMessageListener(TransportMessageListener listener) { @@ -77,7 +81,7 @@ void setSlowLogThreshold(TimeValue slowLogThreshold) { } void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception { - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.rawRelativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); TransportLogger.logInboundMessage(channel, message); @@ -155,7 +159,8 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st } } } finally { - final long took = threadPool.relativeTimeInMillis() - startTime; + final long took = threadPool.rawRelativeTimeInMillis() - startTime; + handlingTimeTracker.addHandlingTime(took); final long logThreshold = slowLogThresholdMs; if (logThreshold > 0 && took > logThreshold) { if (isRequest) { diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java index de46d631ac73b..18aeb12b81645 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.network.CloseableChannel; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.transport.NetworkExceptionHelper; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -37,17 +38,26 @@ final class OutboundHandler { private final StatsTracker statsTracker; private final ThreadPool threadPool; private final Recycler recycler; + private final HandlingTimeTracker handlingTimeTracker; private volatile long slowLogThresholdMs = Long.MAX_VALUE; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; - OutboundHandler(String nodeName, Version version, StatsTracker statsTracker, ThreadPool threadPool, Recycler recycler) { + OutboundHandler( + String nodeName, + Version version, + StatsTracker statsTracker, + ThreadPool threadPool, + Recycler recycler, + HandlingTimeTracker handlingTimeTracker + ) { this.nodeName = nodeName; this.version = version; this.statsTracker = statsTracker; this.threadPool = threadPool; this.recycler = recycler; + this.handlingTimeTracker = handlingTimeTracker; } void setSlowLogThreshold(TimeValue slowLogThreshold) { @@ -168,7 +178,7 @@ private void internalSend( @Nullable OutboundMessage message, ActionListener listener ) { - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.rawRelativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); final long messageSize = reference.length(); TransportLogger.logOutboundMessage(channel, reference); @@ -196,7 +206,8 @@ public void onFailure(Exception e) { private void maybeLogSlowMessage(boolean success) { final long logThreshold = slowLogThresholdMs; if (logThreshold > 0) { - final long took = threadPool.relativeTimeInMillis() - startTime; + final long took = threadPool.rawRelativeTimeInMillis() - startTime; + handlingTimeTracker.addHandlingTime(took); if (took > logThreshold) { logger.warn( "sending transport message [{}] of size [{}] on [{}] took [{}ms] which is above the warn " diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 50361bdda2b7b..6462701265383 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.CloseableChannel; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; @@ -116,6 +117,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; + private final HandlingTimeTracker outboundHandlingTimeTracker = new HandlingTimeTracker(); private final OutboundHandler outboundHandler; private final InboundHandler inboundHandler; private final ResponseHandlers responseHandlers = new ResponseHandlers(); @@ -141,7 +143,7 @@ public TcpTransport( String nodeName = Node.NODE_NAME_SETTING.get(settings); this.recycler = createRecycler(settings, pageCacheRecycler); - this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, recycler); + this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, recycler, outboundHandlingTimeTracker); this.handshaker = new TransportHandshaker( version, threadPool, @@ -165,7 +167,8 @@ public TcpTransport( handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + networkService.getHandlingTimeTracker() ); } @@ -918,7 +921,9 @@ public final TransportStats getStats() { messagesReceived, bytesRead, messagesSent, - bytesWritten + bytesWritten, + networkService.getHandlingTimeTracker().getHistogram(), + outboundHandlingTimeTracker.getHistogram() ); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index 7caf3c241615c..d578c8437da97 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -8,14 +8,17 @@ package org.elasticsearch.transport; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Arrays; public class TransportStats implements Writeable, ToXContentFragment { @@ -25,14 +28,28 @@ public class TransportStats implements Writeable, ToXContentFragment { private final long rxSize; private final long txCount; private final long txSize; - - public TransportStats(long serverOpen, long totalOutboundConnections, long rxCount, long rxSize, long txCount, long txSize) { + private final long[] inboundHandlingTimeBucketFrequencies; + private final long[] outboundHandlingTimeBucketFrequencies; + + public TransportStats( + long serverOpen, + long totalOutboundConnections, + long rxCount, + long rxSize, + long txCount, + long txSize, + long[] inboundHandlingTimeBucketFrequencies, + long[] outboundHandlingTimeBucketFrequencies + ) { this.serverOpen = serverOpen; this.totalOutboundConnections = totalOutboundConnections; this.rxCount = rxCount; this.rxSize = rxSize; this.txCount = txCount; this.txSize = txSize; + this.inboundHandlingTimeBucketFrequencies = inboundHandlingTimeBucketFrequencies; + this.outboundHandlingTimeBucketFrequencies = outboundHandlingTimeBucketFrequencies; + assert assertHistogramsConsistent(); } public TransportStats(StreamInput in) throws IOException { @@ -42,6 +59,20 @@ public TransportStats(StreamInput in) throws IOException { rxSize = in.readVLong(); txCount = in.readVLong(); txSize = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_8_1_0) && in.readBoolean()) { + inboundHandlingTimeBucketFrequencies = new long[HandlingTimeTracker.BUCKET_COUNT]; + for (int i = 0; i < inboundHandlingTimeBucketFrequencies.length; i++) { + inboundHandlingTimeBucketFrequencies[i] = in.readVLong(); + } + outboundHandlingTimeBucketFrequencies = new long[HandlingTimeTracker.BUCKET_COUNT]; + for (int i = 0; i < inboundHandlingTimeBucketFrequencies.length; i++) { + outboundHandlingTimeBucketFrequencies[i] = in.readVLong(); + } + } else { + inboundHandlingTimeBucketFrequencies = new long[0]; + outboundHandlingTimeBucketFrequencies = new long[0]; + } + assert assertHistogramsConsistent(); } @Override @@ -52,6 +83,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(rxSize); out.writeVLong(txCount); out.writeVLong(txSize); + if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + assert (inboundHandlingTimeBucketFrequencies.length > 0) == (outboundHandlingTimeBucketFrequencies.length > 0); + out.writeBoolean(inboundHandlingTimeBucketFrequencies.length > 0); + for (long handlingTimeBucketFrequency : inboundHandlingTimeBucketFrequencies) { + out.writeVLong(handlingTimeBucketFrequency); + } + for (long handlingTimeBucketFrequency : outboundHandlingTimeBucketFrequencies) { + out.writeVLong(handlingTimeBucketFrequency); + } + } } public long serverOpen() { @@ -94,6 +135,25 @@ public ByteSizeValue getTxSize() { return txSize(); } + public long[] getInboundHandlingTimeBucketFrequencies() { + return Arrays.copyOf(inboundHandlingTimeBucketFrequencies, inboundHandlingTimeBucketFrequencies.length); + } + + public long[] getOutboundHandlingTimeBucketFrequencies() { + return Arrays.copyOf(outboundHandlingTimeBucketFrequencies, outboundHandlingTimeBucketFrequencies.length); + } + + private boolean assertHistogramsConsistent() { + assert inboundHandlingTimeBucketFrequencies.length == outboundHandlingTimeBucketFrequencies.length; + if (inboundHandlingTimeBucketFrequencies.length == 0) { + // Stats came from before v8.1 + assert Version.CURRENT.major == Version.V_8_0_0.major; + } else { + assert inboundHandlingTimeBucketFrequencies.length == HandlingTimeTracker.BUCKET_COUNT; + } + return true; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.TRANSPORT); @@ -103,10 +163,35 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.RX_SIZE_IN_BYTES, Fields.RX_SIZE, new ByteSizeValue(rxSize)); builder.field(Fields.TX_COUNT, txCount); builder.humanReadableField(Fields.TX_SIZE_IN_BYTES, Fields.TX_SIZE, new ByteSizeValue(txSize)); + if (inboundHandlingTimeBucketFrequencies.length > 0) { + histogramToXContent(builder, inboundHandlingTimeBucketFrequencies, Fields.INBOUND_HANDLING_TIME_HISTOGRAM); + histogramToXContent(builder, outboundHandlingTimeBucketFrequencies, Fields.OUTBOUND_HANDLING_TIME_HISTOGRAM); + } else { + // Stats came from before v8.1 + assert Version.CURRENT.major == Version.V_8_0_0.major; + } builder.endObject(); return builder; } + private void histogramToXContent(XContentBuilder builder, long[] bucketFrequencies, String fieldName) throws IOException { + final int[] bucketBounds = HandlingTimeTracker.getBucketUpperBounds(); + assert bucketFrequencies.length == bucketBounds.length + 1; + builder.startArray(fieldName); + for (int i = 0; i < bucketFrequencies.length; i++) { + builder.startObject(); + if (i > 0 && i <= bucketBounds.length) { + builder.field("ge_millis", bucketBounds[i - 1]); + } + if (i < bucketBounds.length) { + builder.field("lt_millis", bucketBounds[i]); + } + builder.field("count", bucketFrequencies[i]); + builder.endObject(); + } + builder.endArray(); + } + static final class Fields { static final String TRANSPORT = "transport"; static final String SERVER_OPEN = "server_open"; @@ -117,5 +202,7 @@ static final class Fields { static final String TX_COUNT = "tx_count"; static final String TX_SIZE = "tx_size"; static final String TX_SIZE_IN_BYTES = "tx_size_in_bytes"; + static final String INBOUND_HANDLING_TIME_HISTOGRAM = "inbound_handling_time_histogram"; + static final String OUTBOUND_HANDLING_TIME_HISTOGRAM = "outbound_handling_time_histogram"; } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 144c9f0843441..2467aded6292f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.service.ClusterStateUpdateStats; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.http.HttpStats; @@ -47,6 +48,7 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -238,6 +240,14 @@ public void testSerialization() throws IOException { assertEquals(nodeStats.getTransport().getServerOpen(), deserializedNodeStats.getTransport().getServerOpen()); assertEquals(nodeStats.getTransport().getTxCount(), deserializedNodeStats.getTransport().getTxCount()); assertEquals(nodeStats.getTransport().getTxSize(), deserializedNodeStats.getTransport().getTxSize()); + assertArrayEquals( + nodeStats.getTransport().getInboundHandlingTimeBucketFrequencies(), + deserializedNodeStats.getTransport().getInboundHandlingTimeBucketFrequencies() + ); + assertArrayEquals( + nodeStats.getTransport().getOutboundHandlingTimeBucketFrequencies(), + deserializedNodeStats.getTransport().getOutboundHandlingTimeBucketFrequencies() + ); } if (nodeStats.getHttp() == null) { assertNull(deserializedNodeStats.getHttp()); @@ -672,7 +682,9 @@ public static NodeStats createNodeStats() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong() + randomNonNegativeLong(), + IntStream.range(0, HandlingTimeTracker.BUCKET_COUNT).mapToLong(i -> randomNonNegativeLong()).toArray(), + IntStream.range(0, HandlingTimeTracker.BUCKET_COUNT).mapToLong(i -> randomNonNegativeLong()).toArray() ) : null; HttpStats httpStats = null; diff --git a/server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java b/server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java new file mode 100644 index 0000000000000..b999cf8ff4875 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.network; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.greaterThan; + +public class HandlingTimeTrackerTests extends ESTestCase { + + public void testHistogram() { + final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker(); + + assertArrayEquals(new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(0L); + assertArrayEquals(new long[] { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(1L); + assertArrayEquals(new long[] { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(2L); + assertArrayEquals(new long[] { 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(3L); + assertArrayEquals(new long[] { 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(4L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(127L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(128L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(65535L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(65536L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(Long.MAX_VALUE); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(randomLongBetween(65536L, Long.MAX_VALUE)); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(randomLongBetween(Long.MIN_VALUE, 0L)); + assertArrayEquals(new long[] { 2, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3 }, handlingTimeTracker.getHistogram()); + } + + public void testHistogramRandom() { + final int[] upperBounds = HandlingTimeTracker.getBucketUpperBounds(); + final long[] expectedCounts = new long[upperBounds.length + 1]; + final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker(); + for (int i = between(0, 1000); i > 0; i--) { + final int bucket = between(0, expectedCounts.length - 1); + expectedCounts[bucket] += 1; + + final int lowerBound = bucket == 0 ? 0 : upperBounds[bucket - 1]; + final int upperBound = bucket == upperBounds.length ? randomBoolean() ? 100000 : Integer.MAX_VALUE : upperBounds[bucket] - 1; + handlingTimeTracker.addHandlingTime(between(lowerBound, upperBound)); + } + + assertArrayEquals(expectedCounts, handlingTimeTracker.getHistogram()); + } + + public void testBoundsConsistency() { + final int[] upperBounds = HandlingTimeTracker.getBucketUpperBounds(); + assertThat(upperBounds[0], greaterThan(0)); + for (int i = 1; i < upperBounds.length; i++) { + assertThat(upperBounds[i], greaterThan(upperBounds[i - 1])); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index a482a6bd713eb..fda0090125ab8 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.TimeValue; @@ -69,7 +70,8 @@ public void setUp() throws Exception { version, new StatsTracker(), threadPool, - new BytesRefRecycler(PageCacheRecycler.NON_RECYCLING_INSTANCE) + new BytesRefRecycler(PageCacheRecycler.NON_RECYCLING_INSTANCE), + new HandlingTimeTracker() ); requestHandlers = new Transport.RequestHandlers(); responseHandlers = new Transport.ResponseHandlers(); @@ -80,7 +82,8 @@ public void setUp() throws Exception { handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + new HandlingTimeTracker() ); } diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 7808831447e6b..4a85ab868d890 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.PageCacheRecycler; @@ -71,7 +72,7 @@ public void setUp() throws Exception { node = new DiscoveryNode("", transportAddress, Version.CURRENT); StatsTracker statsTracker = new StatsTracker(); compressionScheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); - handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, recycler); + handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, recycler, new HandlingTimeTracker()); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, this.recycler); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 47252ceb8a124..814afba514217 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; @@ -541,7 +542,8 @@ private void testExceptionHandling( Version.CURRENT, new StatsTracker(), testThreadPool, - new BytesRefRecycler(new MockPageCacheRecycler(Settings.EMPTY)) + new BytesRefRecycler(new MockPageCacheRecycler(Settings.EMPTY)), + new HandlingTimeTracker() ) ); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java index 85a45cd7a691c..15b24b0a77e6f 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.threadpool.ThreadPool; @@ -24,7 +25,7 @@ public static TcpTransportChannel newFakeTcpTransportChannel( ) { BytesRefRecycler recycler = new BytesRefRecycler(PageCacheRecycler.NON_RECYCLING_INSTANCE); return new TcpTransportChannel( - new OutboundHandler(nodeName, version, new StatsTracker(), threadPool, recycler), + new OutboundHandler(nodeName, version, new StatsTracker(), threadPool, recycler, new HandlingTimeTracker()), channel, action, requestId, From 498f581bfc747e8442ea2f3dfc20513ad6b0c95d Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 29 Nov 2021 08:13:22 -0800 Subject: [PATCH 43/55] [ML] Updates visiblity of validate API (#81061) --- .../src/main/resources/rest-api-spec/api/ml.validate.json | 2 +- .../main/resources/rest-api-spec/api/ml.validate_detector.json | 2 +- .../resources/rest-api-spec/api/xpack-ml.validate.json | 2 +- .../resources/rest-api-spec/api/xpack-ml.validate_detector.json | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json index 5db5f91ddc527..b57f1bb69ffa1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection job." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json index 30a24b1c6074a..1400da1ccee09 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection detector." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json index 7c9bbf70f4469..ad337c3c1ad82 100644 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json +++ b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection job." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], "content_type": ["application/json"] diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json index fe5fdd7a7b7a1..5a06df8977dfc 100644 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json +++ b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection detector." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], "content_type": ["application/json"] From 4d19702221d57522669f7b6a34fc291c0b184037 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 29 Nov 2021 11:52:02 -0500 Subject: [PATCH 44/55] [DOCS] Update xrefs for snapshot restore docs (#81023) Changes: * Removes a leading slash from the restore snapshot API's prerequisites. * Updates several xrefs that point to redirected pages. --- docs/reference/settings/snapshot-settings.asciidoc | 4 ++-- .../snapshot-restore/apis/restore-snapshot-api.asciidoc | 2 +- docs/reference/snapshot-restore/index.asciidoc | 6 +++--- docs/reference/snapshot-restore/restore-snapshot.asciidoc | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/reference/settings/snapshot-settings.asciidoc b/docs/reference/settings/snapshot-settings.asciidoc index 5af62143ffdd6..1f1acc3653d0d 100644 --- a/docs/reference/settings/snapshot-settings.asciidoc +++ b/docs/reference/settings/snapshot-settings.asciidoc @@ -15,7 +15,7 @@ limit. ==== {slm-init} settings -The following cluster settings configure <>. [[slm-history-index-enabled]] @@ -27,7 +27,7 @@ to the `slm-history-*` indices. Defaults to `true`. [[slm-retention-schedule]] `slm.retention_schedule`:: (<>, <>) -Controls when the <> runs. +Controls when the <> runs. Can be a periodic or absolute time schedule. Supports all values supported by the <>. Defaults to daily at 1:30am UTC: `0 30 1 * * ?`. diff --git a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc index 3ae765a83cb39..23d7168a5e384 100644 --- a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc @@ -92,7 +92,7 @@ the <>: + [source,console] ---- -GET /_index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ---- + If no such template exists, you can <> or diff --git a/docs/reference/snapshot-restore/index.asciidoc b/docs/reference/snapshot-restore/index.asciidoc index 52bc5b23d66f4..66e93cb213903 100644 --- a/docs/reference/snapshot-restore/index.asciidoc +++ b/docs/reference/snapshot-restore/index.asciidoc @@ -23,9 +23,9 @@ repository. Before you can take or restore snapshots, you must * Microsoft Azure After you register a snapshot repository, you can use -<> to automatically take and -manage snapshots. You can then <> -to recover or transfer its data. +<> to automatically take and manage +snapshots. You can then <> to +recover or transfer its data. [discrete] [[snapshot-contents]] diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc index 7f320de5ebca6..0a12f5966617d 100644 --- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc @@ -303,7 +303,7 @@ specific indices or data streams instead. If you're restoring to a different cluster, see <> before you start. -. If you <>, you can restore them to each node. This step is optional and requires a <>. + From 5c8e7c686e96bef5193a4b8d7f756e93a3b30a7e Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 29 Nov 2021 17:06:44 +0000 Subject: [PATCH 45/55] [ML] Switch message and detail for model snapshot deprecations (#81108) This is a fix to the fix of #81060. In the original fix where I tried to port the changes from #79387 to master I didn't notice that the text of the message and detail of the deprecation had been largely switched around. My tweaks to the wording in #81060 did not make this major switch. This PR switches the two strings we generate. This is only for 8.0 and 8.1. For 7.16 the discrepancy became obvious in the backport of #81060 to that branch, so it's already correct there. --- .../xpack/deprecation/MlDeprecationIT.java | 2 +- .../xpack/deprecation/MlDeprecationChecker.java | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java index 3fc880adcf235..dad4d25afe62b 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java @@ -105,7 +105,7 @@ public void testMlDeprecationChecks() throws Exception { assertThat(response.getMlSettingsIssues(), hasSize(1)); assertThat( response.getMlSettingsIssues().get(0).getMessage(), - containsString("Delete model snapshot [1] or update it to 7.0.0 or greater") + containsString("Model snapshot [1] for job [deprecation_check_job] has an obsolete minimum version") ); assertThat(response.getMlSettingsIssues().get(0).getMeta(), equalTo(Map.of("job_id", jobId, "snapshot_id", "1"))); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java index 36092a820844f..2988af7a2dab6 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java @@ -73,12 +73,9 @@ static Optional checkModelSnapshot(ModelSnapshot modelSnapshot StringBuilder details = new StringBuilder( String.format( Locale.ROOT, - // Important: the Kibana upgrade assistant expects this to match the pattern /[Mm]odel snapshot/ - // and if it doesn't then the expected "Fix" button won't appear for this deprecation. - "Model snapshot [%s] for job [%s] has an obsolete minimum version [%s].", + "Delete model snapshot [%s] or update it to %s or greater.", modelSnapshot.getSnapshotId(), - modelSnapshot.getJobId(), - modelSnapshot.getMinVersion() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION ) ); if (modelSnapshot.getLatestRecordTimeStamp() != null) { @@ -95,9 +92,12 @@ static Optional checkModelSnapshot(ModelSnapshot modelSnapshot DeprecationIssue.Level.CRITICAL, String.format( Locale.ROOT, - "Delete model snapshot [%s] or update it to %s or greater.", + // Important: the Kibana upgrade assistant expects this to match the pattern /[Mm]odel snapshot/ + // and if it doesn't then the expected "Fix" button won't appear for this deprecation. + "Model snapshot [%s] for job [%s] has an obsolete minimum version [%s].", modelSnapshot.getSnapshotId(), - MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION + modelSnapshot.getJobId(), + modelSnapshot.getMinVersion() ), "https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html", details.toString(), From 1adb59c041c000f8005d8af2eb6f06f87fc82f2a Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 29 Nov 2021 09:41:03 -0800 Subject: [PATCH 46/55] Split off the values supplier for ScriptDocValues (#80635) This change makes all ScriptDocValues purely a wrapper around a supplier. (Similar to what FieldValues was.) However, there are some important differences: * This is meant to be transitory. As more DocValuesFields are completed, more of the simple suppliers (ones that aren't DocValuesFields) can be removed. * ScriptDocValues is the wrapper rather than the supplier. DocValuesFields are eventually the target suppliers which makes it really easy to remove the simple suppliers once they are no longer necessary. * ScriptDocValues can be easily deprecated and removed without having to move their code to DocValuesFields. Once ScriptDocValues is removed we can remove the supplier code from DocValuesFields. * DelegateDocValuesField ensures that any ScriptDocValues field are not supplied by another DocValuesField with an assert statement. This helps us to identify bugs during testing. * ScriptDocValues no longer have setNextDocId. This helps us identify bugs during compilation. * Conversions will not share/wrap suppliers since the suppliers are transitory. --- .../mapper/extras/ScaledFloatFieldMapper.java | 3 +- .../mapper/murmur3/Murmur3FieldMapper.java | 6 +- .../index/fielddata/IpScriptFieldData.java | 15 +- .../index/fielddata/ScriptDocValues.java | 289 ++++++++++++------ .../fielddata/StringScriptFieldData.java | 3 +- .../plain/AbstractLeafGeoPointFieldData.java | 3 +- .../plain/AbstractLeafOrdinalsFieldData.java | 2 +- .../plain/BinaryDVLeafFieldData.java | 2 +- .../plain/StringBinaryDVLeafFieldData.java | 2 +- .../index/mapper/DateFieldMapper.java | 9 +- .../index/mapper/DoubleScriptFieldType.java | 3 +- .../index/mapper/IdFieldMapper.java | 2 +- .../index/mapper/IpFieldMapper.java | 72 +++-- .../index/mapper/LongScriptFieldType.java | 3 +- .../index/mapper/NumberFieldMapper.java | 16 +- .../index/mapper/SeqNoFieldMapper.java | 3 +- .../index/mapper/VersionFieldMapper.java | 3 +- .../script/ScoreScriptUtils.java | 2 +- .../script/field/BinaryDocValuesField.java | 5 +- .../script/field/BooleanDocValuesField.java | 64 ++-- .../script/field/DelegateDocValuesField.java | 5 +- .../ScriptDocValuesGeoPointsTests.java | 19 +- .../fielddata/ScriptDocValuesLongsTests.java | 7 +- .../plain/HalfFloatFielddataTests.java | 5 +- .../index/mapper/DateFieldTypeTests.java | 3 +- .../index/mapper/IpScriptFieldTypeTests.java | 4 +- .../query/SearchExecutionContextTests.java | 16 +- .../sampler/DiversifiedSamplerTests.java | 3 +- .../search/lookup/LeafDocLookupTests.java | 12 +- .../AggregateDoubleMetricFieldMapper.java | 6 +- .../UnsignedLongDocValuesField.java | 35 ++- .../UnsignedLongScriptDocValues.java | 23 +- .../org.elasticsearch.xpack.unsignedlong.txt | 7 +- .../versionfield/VersionScriptDocValues.java | 57 ++-- .../VersionStringFieldMapper.java | 7 +- .../AbstractAtomicGeoShapeShapeFieldData.java | 55 +++- .../BinaryDenseVectorScriptDocValues.java | 70 +++-- .../query/DenseVectorScriptDocValues.java | 25 +- .../query/KnnDenseVectorScriptDocValues.java | 73 +++-- .../xpack/vectors/query/ScoreScriptUtils.java | 2 +- .../vectors/query/VectorDVLeafFieldData.java | 29 +- ...BinaryDenseVectorScriptDocValuesTests.java | 25 +- .../query/DenseVectorFunctionTests.java | 7 +- .../KnnDenseVectorScriptDocValuesTests.java | 21 +- 44 files changed, 666 insertions(+), 357 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 0b2040c10e3ed..7589d196fd5a1 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.LeafNumericFieldData; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -269,7 +270,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S return new ScaledFloatIndexFieldData( scaledValues, scalingFactor, - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); }; } diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index bdbb251e1e478..dafc303dae601 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; @@ -83,7 +84,10 @@ public Murmur3FieldMapper build(MapperBuilderContext context) { // this only exists so a check can be done to match the field type to using murmur3 hashing... public static class Murmur3FieldType extends MappedFieldType { - public static final ToScriptField TO_SCRIPT_FIELD = (dv, n) -> new DelegateDocValuesField(new Longs(dv), n); + public static final ToScriptField TO_SCRIPT_FIELD = (dv, n) -> new DelegateDocValuesField( + new Longs(new LongsSupplier(dv)), + n + ); private Murmur3FieldType(String name, boolean isStored, Map meta) { super(name, false, isStored, true, TextSearchInfo.NONE, meta); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java index 6075d5db84106..a0a34ed610288 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java @@ -14,6 +14,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.index.fielddata.ScriptDocValues.Strings; +import org.elasticsearch.index.fielddata.ScriptDocValues.StringsSupplier; import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.script.IpFieldScript; @@ -53,7 +55,7 @@ public BinaryScriptLeafFieldData loadDirect(LeafReaderContext context) throws Ex return new BinaryScriptLeafFieldData() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new IpScriptDocValues(getBytesValues()), name); + return new DelegateDocValuesField(new Strings(new IpSupplier(getBytesValues())), name); } @Override @@ -69,18 +71,19 @@ public ValuesSourceType getValuesSourceType() { } /** - * Doc values implementation for ips. We can't share + * Doc values supplier implementation for ips. We can't share * {@link IpFieldMapper.IpFieldType.IpScriptDocValues} because it is based * on global ordinals and we don't have those. */ - public static class IpScriptDocValues extends ScriptDocValues.Strings { - public IpScriptDocValues(SortedBinaryDocValues in) { + public static class IpSupplier extends StringsSupplier { + + public IpSupplier(SortedBinaryDocValues in) { super(in); } @Override - protected String bytesToString(BytesRef bytes) { - InetAddress addr = InetAddressPoint.decode(BytesReference.toBytes(new BytesArray(bytes))); + protected String bytesToString(BytesRef bytesRef) { + InetAddress addr = InetAddressPoint.decode(BytesReference.toBytes(new BytesArray(bytesRef))); return InetAddresses.toAddrString(addr); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 9003a32db09f0..b0a769800825c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -17,8 +17,7 @@ import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.geometry.utils.Geohash; -import org.elasticsearch.script.field.BinaryDocValuesField; -import org.elasticsearch.script.field.BooleanDocValuesField; +import org.elasticsearch.script.field.DocValuesField; import java.io.IOException; import java.time.Instant; @@ -39,9 +38,30 @@ public abstract class ScriptDocValues extends AbstractList { /** - * Set the current doc ID. + * Supplies values to different ScriptDocValues as we + * convert them to wrappers around {@link DocValuesField}. + * This allows for different {@link DocValuesField} to implement + * this supplier class in many-to-one relationship since + * {@link DocValuesField} are more specific where + * ({byte, short, int, long, _version, murmur3, etc.} -> {long}) */ - public abstract void setNextDocId(int docId) throws IOException; + public interface Supplier { + void setNextDocId(int docId) throws IOException; + + T getInternal(int index); + + int size(); + } + + protected final Supplier supplier; + + public ScriptDocValues(Supplier supplier) { + this.supplier = supplier; + } + + public Supplier getSupplier() { + return supplier; + } // Throw meaningful exceptions if someone tries to modify the ScriptDocValues. @Override @@ -77,15 +97,13 @@ protected void throwIfEmpty() { } } - public static final class Longs extends ScriptDocValues { + public static class LongsSupplier implements Supplier { + private final SortedNumericDocValues in; private long[] values = new long[0]; private int count; - /** - * Standard constructor. - */ - public Longs(SortedNumericDocValues in) { + public LongsSupplier(SortedNumericDocValues in) { this.in = in; } @@ -105,11 +123,28 @@ public void setNextDocId(int docId) throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; values = ArrayUtil.grow(values, count); } + @Override + public Long getInternal(int index) { + return values[index]; + } + + @Override + public int size() { + return count; + } + } + + public static class Longs extends ScriptDocValues { + + public Longs(Supplier supplier) { + super(supplier); + } + public long getValue() { return get(0); } @@ -117,16 +152,16 @@ public long getValue() { @Override public Long get(int index) { throwIfEmpty(); - return values[index]; + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } - public static final class Dates extends ScriptDocValues { + public static class DatesSupplier implements Supplier { private final SortedNumericDocValues in; private final boolean isNanos; @@ -137,32 +172,13 @@ public static final class Dates extends ScriptDocValues { private ZonedDateTime[] dates; private int count; - public Dates(SortedNumericDocValues in, boolean isNanos) { + public DatesSupplier(SortedNumericDocValues in, boolean isNanos) { this.in = in; this.isNanos = isNanos; } - /** - * Fetch the first field value or 0 millis after epoch if there are no - * in. - */ - public ZonedDateTime getValue() { - return get(0); - } - @Override - public ZonedDateTime get(int index) { - if (count == 0) { - throw new IllegalStateException( - "A document doesn't have a value for a field! " - + "Use doc[].size()==0 to check if a document is missing a field!" - ); - } - if (index >= count) { - throw new IndexOutOfBoundsException( - "attempted to fetch the [" + index + "] date when there are only [" + count + "] dates." - ); - } + public ZonedDateTime getInternal(int index) { return dates[index]; } @@ -184,7 +200,7 @@ public void setNextDocId(int docId) throws IOException { /** * Refresh the backing array. Package private so it can be called when {@link Longs} loads dates. */ - void refreshArray() throws IOException { + private void refreshArray() throws IOException { if (count == 0) { return; } @@ -202,13 +218,49 @@ void refreshArray() throws IOException { } } - public static final class Doubles extends ScriptDocValues { + public static class Dates extends ScriptDocValues { + + public Dates(Supplier supplier) { + super(supplier); + } + + /** + * Fetch the first field value or 0 millis after epoch if there are no + * in. + */ + public ZonedDateTime getValue() { + return get(0); + } + + @Override + public ZonedDateTime get(int index) { + if (supplier.size() == 0) { + throw new IllegalStateException( + "A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!" + ); + } + if (index >= supplier.size()) { + throw new IndexOutOfBoundsException( + "attempted to fetch the [" + index + "] date when there are only [" + supplier.size() + "] dates." + ); + } + return supplier.getInternal(index); + } + + @Override + public int size() { + return supplier.size(); + } + } + + public static class DoublesSupplier implements Supplier { private final SortedNumericDoubleValues in; private double[] values = new double[0]; private int count; - public Doubles(SortedNumericDoubleValues in) { + public DoublesSupplier(SortedNumericDoubleValues in) { this.in = in; } @@ -228,13 +280,26 @@ public void setNextDocId(int docId) throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; values = ArrayUtil.grow(values, count); } - public SortedNumericDoubleValues getInternalValues() { - return this.in; + @Override + public Double getInternal(int index) { + return values[index]; + } + + @Override + public int size() { + return count; + } + } + + public static class Doubles extends ScriptDocValues { + + public Doubles(Supplier supplier) { + super(supplier); } public double getValue() { @@ -243,22 +308,27 @@ public double getValue() { @Override public Double get(int index) { - if (count == 0) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - return values[index]; + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } public abstract static class Geometry extends ScriptDocValues { + + public Geometry(Supplier supplier) { + super(supplier); + } + /** Returns the dimensional type of this geometry */ public abstract int getDimensionalType(); @@ -275,7 +345,14 @@ public abstract static class Geometry extends ScriptDocValues { public abstract double getMercatorHeight(); } - public static final class GeoPoints extends Geometry { + public interface GeometrySupplier extends Supplier { + + GeoPoint getCentroid(); + + GeoBoundingBox getBoundingBox(); + } + + public static class GeoPointsSupplier implements GeometrySupplier { private final MultiGeoPointValues in; private GeoPoint[] values = new GeoPoint[0]; @@ -283,7 +360,7 @@ public static final class GeoPoints extends Geometry { private final GeoBoundingBox boundingBox = new GeoBoundingBox(new GeoPoint(), new GeoPoint()); private int count; - public GeoPoints(MultiGeoPointValues in) { + public GeoPointsSupplier(MultiGeoPointValues in) { this.in = in; } @@ -335,7 +412,7 @@ private void setMultiValue() throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; if (newSize > values.length) { int oldLength = values.length; @@ -346,6 +423,36 @@ protected void resize(int newSize) { } } + @Override + public GeoPoint getInternal(int index) { + return values[index]; + } + + @Override + public GeoPoint getCentroid() { + return centroid; + } + + @Override + public GeoBoundingBox getBoundingBox() { + return boundingBox; + } + + @Override + public int size() { + return count; + } + } + + public static class GeoPoints extends Geometry { + + private final GeometrySupplier geometrySupplier; + + public GeoPoints(GeometrySupplier supplier) { + super(supplier); + geometrySupplier = supplier; + } + public GeoPoint getValue() { return get(0); } @@ -376,19 +483,19 @@ public double getLon() { @Override public GeoPoint get(int index) { - if (count == 0) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - final GeoPoint point = values[index]; + final GeoPoint point = supplier.getInternal(index); return new GeoPoint(point.lat(), point.lon()); } @Override public int size() { - return count; + return supplier.size(); } public double arcDistance(double lat, double lon) { @@ -434,7 +541,7 @@ public int getDimensionalType() { @Override public GeoPoint getCentroid() { - return size() == 0 ? null : centroid; + return size() == 0 ? null : geometrySupplier.getCentroid(); } @Override @@ -449,21 +556,14 @@ public double getMercatorHeight() { @Override public GeoBoundingBox getBoundingBox() { - return size() == 0 ? null : boundingBox; + return size() == 0 ? null : geometrySupplier.getBoundingBox(); } } - public static final class Booleans extends ScriptDocValues { - - private final BooleanDocValuesField booleanDocValuesField; + public static class Booleans extends ScriptDocValues { - public Booleans(BooleanDocValuesField booleanDocValuesField) { - this.booleanDocValuesField = booleanDocValuesField; - } - - @Override - public void setNextDocId(int docId) throws IOException { - throw new UnsupportedOperationException(); + public Booleans(Supplier supplier) { + super(supplier); } public boolean getValue() { @@ -474,22 +574,22 @@ public boolean getValue() { @Override public Boolean get(int index) { throwIfEmpty(); - return booleanDocValuesField.getInternal(index); + return supplier.getInternal(index); } @Override public int size() { - return booleanDocValuesField.size(); + return supplier.size(); } } - abstract static class BinaryScriptDocValues extends ScriptDocValues { + public static class StringsSupplier implements Supplier { private final SortedBinaryDocValues in; - protected BytesRefBuilder[] values = new BytesRefBuilder[0]; - protected int count; + private BytesRefBuilder[] values = new BytesRefBuilder[0]; + private int count; - BinaryScriptDocValues(SortedBinaryDocValues in) { + public StringsSupplier(SortedBinaryDocValues in) { this.in = in; } @@ -512,7 +612,7 @@ public void setNextDocId(int docId) throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; if (newSize > values.length) { final int oldLength = values.length; @@ -523,51 +623,52 @@ protected void resize(int newSize) { } } + protected String bytesToString(BytesRef bytesRef) { + return bytesRef.utf8ToString(); + } + + @Override + public String getInternal(int index) { + return bytesToString(values[index].toBytesRef()); + } + @Override public int size() { return count; } } - public static class Strings extends BinaryScriptDocValues { - public Strings(SortedBinaryDocValues in) { - super(in); + public static class Strings extends ScriptDocValues { + + public Strings(Supplier supplier) { + super(supplier); + } + + public String getValue() { + return get(0); } @Override - public final String get(int index) { - if (count == 0) { + public String get(int index) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - return bytesToString(values[index].get()); - } - - /** - * Convert the stored bytes to a String. - */ - protected String bytesToString(BytesRef bytes) { - return bytes.utf8ToString(); + return supplier.getInternal(index); } - public final String getValue() { - return get(0); + @Override + public int size() { + return supplier.size(); } } public static final class BytesRefs extends ScriptDocValues { - private final BinaryDocValuesField binaryDocValuesField; - - public BytesRefs(BinaryDocValuesField binaryDocValuesField) { - this.binaryDocValuesField = binaryDocValuesField; - } - - @Override - public void setNextDocId(int docId) throws IOException { - throw new UnsupportedOperationException(); + public BytesRefs(Supplier supplier) { + super(supplier); } public BytesRef getValue() { @@ -578,12 +679,12 @@ public BytesRef getValue() { @Override public BytesRef get(int index) { throwIfEmpty(); - return binaryDocValuesField.getInternal(index); + return supplier.getInternal(index); } @Override public int size() { - return binaryDocValuesField.size(); + return supplier.size(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java index 641801ad32a5e..5b7486352a7f9 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.index.fielddata.ScriptDocValues.StringsSupplier; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.script.StringFieldScript; import org.elasticsearch.script.field.DelegateDocValuesField; @@ -45,7 +46,7 @@ public BinaryScriptLeafFieldData loadDirect(LeafReaderContext context) throws Ex return new BinaryScriptLeafFieldData() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new StringsSupplier(getBytesValues())), name); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java index fd5018dff3bae..b115c57cd191d 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java @@ -12,6 +12,7 @@ import org.elasticsearch.index.fielddata.LeafGeoPointFieldData; import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.ScriptDocValues.GeoPointsSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.script.field.DelegateDocValuesField; import org.elasticsearch.script.field.DocValuesField; @@ -28,7 +29,7 @@ public final SortedBinaryDocValues getBytesValues() { @Override public final DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.GeoPoints(getGeoPointValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.GeoPoints(new GeoPointsSupplier(getGeoPointValues())), name); } public static LeafGeoPointFieldData empty(final int maxDoc) { diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java index ff479cde06669..0303db063455c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java @@ -26,7 +26,7 @@ public abstract class AbstractLeafOrdinalsFieldData implements LeafOrdinalsField public static final Function> DEFAULT_SCRIPT_FUNCTION = ((Function< SortedSetDocValues, - SortedBinaryDocValues>) FieldData::toString).andThen(ScriptDocValues.Strings::new); + SortedBinaryDocValues>) FieldData::toString).andThen(ScriptDocValues.StringsSupplier::new).andThen(ScriptDocValues.Strings::new); private final Function> scriptFunction; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java index b7db7ba8ee54a..5e245a5de6c8f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java @@ -46,7 +46,7 @@ public SortedBinaryDocValues getBytesValues() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(getBytesValues())), name); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java index d1861a23c16e7..bbd5dd486e034 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java @@ -20,6 +20,6 @@ final class StringBinaryDVLeafFieldData extends AbstractBinaryDVLeafFieldData { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(getBytesValues())), name); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index fa9d406f2ad55..71fca6ccec7e0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Dates; +import org.elasticsearch.index.fielddata.ScriptDocValues.DatesSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.DateRangeIncludingNowQuery; import org.elasticsearch.index.query.QueryRewriteContext; @@ -80,7 +81,7 @@ public final class DateFieldMapper extends FieldMapper { private static final DateMathParser EPOCH_MILLIS_PARSER = DateFormatter.forPattern("epoch_millis").toDateMathParser(); public enum Resolution { - MILLISECONDS(CONTENT_TYPE, NumericType.DATE, (dv, n) -> new DelegateDocValuesField(new Dates(dv, false), n)) { + MILLISECONDS(CONTENT_TYPE, NumericType.DATE, (dv, n) -> new DelegateDocValuesField(new Dates(new DatesSupplier(dv, false)), n)) { @Override public long convert(Instant instant) { return instant.toEpochMilli(); @@ -111,7 +112,11 @@ protected Query distanceFeatureQuery(String field, float boost, long origin, Tim return LongPoint.newDistanceFeatureQuery(field, boost, origin, pivot.getMillis()); } }, - NANOSECONDS(DATE_NANOS_CONTENT_TYPE, NumericType.DATE_NANOSECONDS, (dv, n) -> new DelegateDocValuesField(new Dates(dv, true), n)) { + NANOSECONDS( + DATE_NANOS_CONTENT_TYPE, + NumericType.DATE_NANOSECONDS, + (dv, n) -> new DelegateDocValuesField(new Dates(new DatesSupplier(dv, true)), n) + ) { @Override public long convert(Instant instant) { return toLong(instant); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java index c28a4c4de9727..9979124c2d7e5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.fielddata.DoubleScriptFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.CompositeFieldScript; @@ -103,7 +104,7 @@ public DoubleScriptFieldData.Builder fielddataBuilder(String fullyQualifiedIndex return new DoubleScriptFieldData.Builder( name(), leafFactory(searchLookup.get()), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index df0c2ae158a13..13f672407ef67 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -220,7 +220,7 @@ public long ramBytesUsed() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(getBytesValues())), name); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index b1497c8e988dc..f52baf430783d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.IpScriptDocValues.IpSupplier; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.IpFieldScript; import org.elasticsearch.script.Script; @@ -351,27 +352,52 @@ public static Query rangeQuery( public static final class IpScriptDocValues extends ScriptDocValues { - private final SortedSetDocValues in; - private long[] ords = new long[0]; - private int count; + public static final class IpSupplier implements ScriptDocValues.Supplier { - public IpScriptDocValues(SortedSetDocValues in) { - this.in = in; - } + private final SortedSetDocValues in; + private long[] ords = new long[0]; + private int count; - @Override - public void setNextDocId(int docId) throws IOException { - count = 0; - if (in.advanceExact(docId)) { - for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { - ords = ArrayUtil.grow(ords, count + 1); - ords[count++] = ord; + public IpSupplier(SortedSetDocValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + count = 0; + if (in.advanceExact(docId)) { + for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { + ords = ArrayUtil.grow(ords, count + 1); + ords[count++] = ord; + } + } + } + + @Override + public String getInternal(int index) { + try { + BytesRef encoded = in.lookupOrd(ords[index]); + InetAddress address = InetAddressPoint.decode( + Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length) + ); + return InetAddresses.toAddrString(address); + } catch (IOException e) { + throw new RuntimeException(e); } } + + @Override + public int size() { + return count; + } + } + + public IpScriptDocValues(IpSupplier supplier) { + super(supplier); } public String getValue() { - if (count == 0) { + if (supplier.size() == 0) { return null; } else { return get(0); @@ -380,27 +406,23 @@ public String getValue() { @Override public String get(int index) { - try { - BytesRef encoded = in.lookupOrd(ords[index]); - InetAddress address = InetAddressPoint.decode( - Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length) - ); - return InetAddresses.toAddrString(address); - } catch (IOException e) { - throw new RuntimeException(e); - } + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { failIfNoDocValues(); - return new SortedSetOrdinalsIndexFieldData.Builder(name(), IpScriptDocValues::new, CoreValuesSourceType.IP); + return new SortedSetOrdinalsIndexFieldData.Builder( + name(), + s -> new IpScriptDocValues(new IpSupplier(s)), + CoreValuesSourceType.IP + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java index 03abc9c7c0ae9..4791eb5398ff5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.fielddata.LongScriptFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.CompositeFieldScript; @@ -98,7 +99,7 @@ public LongScriptFieldData.Builder fielddataBuilder(String fullyQualifiedIndexNa return new LongScriptFieldData.Builder( name(), leafFactory(searchLookup.get()), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index b8253c76ceac2..075d475029930 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -36,7 +36,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedDoublesIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.TimeSeriesParams.MetricType; @@ -335,7 +337,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } @@ -446,7 +448,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } @@ -540,7 +542,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } @@ -621,7 +623,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }, @@ -692,7 +694,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }, @@ -822,7 +824,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }, @@ -922,7 +924,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index a30aa20d0d1e7..e21186ca678bd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -18,6 +18,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -188,7 +189,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S return new SortedNumericIndexFieldData.Builder( name(), NumericType.LONG, - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index 404c0ca7d7cb3..aa82c3371c508 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; @@ -62,7 +63,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S return new SortedNumericIndexFieldData.Builder( name(), NumericType.LONG, - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } } diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java index 51d81c87fcddb..6837281bf1523 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java @@ -57,7 +57,7 @@ public RandomScoreField(ScoreScript scoreScript, int seed, String fieldName) { public double randomScore() { try { - docValues.setNextDocId(scoreScript._getDocId()); + docValues.getSupplier().setNextDocId(scoreScript._getDocId()); String seedValue = String.valueOf(docValues.get(0)); int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), saltedSeed); return (hash & 0x00FFFFFF) / (float) (1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 diff --git a/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java index 565afea2b3ade..8db429a13639d 100644 --- a/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java @@ -20,7 +20,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -public class BinaryDocValuesField implements DocValuesField { +public class BinaryDocValuesField implements DocValuesField, ScriptDocValues.Supplier { private final SortedBinaryDocValues input; private final String name; @@ -74,8 +74,9 @@ public ScriptDocValues getScriptDocValues() { return bytesRefs; } - // this method is required to support the ByteRef return values + // this method is required to support the Boolean return values // for the old-style "doc" access in ScriptDocValues + @Override public BytesRef getInternal(int index) { return values[index].toBytesRef(); } diff --git a/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java index 6eed4eef37e3a..8dd3674feb470 100644 --- a/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java @@ -17,7 +17,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -public class BooleanDocValuesField implements DocValuesField { +public class BooleanDocValuesField implements DocValuesField, ScriptDocValues.Supplier { private final SortedNumericDocValues input; private final String name; @@ -25,6 +25,8 @@ public class BooleanDocValuesField implements DocValuesField { private boolean[] values = new boolean[0]; private int count; + // used for backwards compatibility for old-style "doc" access + // as a delegate to this field class private ScriptDocValues.Booleans booleans = null; public BooleanDocValuesField(SortedNumericDocValues input, String name) { @@ -32,11 +34,6 @@ public BooleanDocValuesField(SortedNumericDocValues input, String name) { this.name = name; } - /** - * Set the current document ID. - * - * @param docId - */ @Override public void setNextDocId(int docId) throws IOException { if (input.advanceExact(docId)) { @@ -58,11 +55,6 @@ private void resize(int newSize) { } } - /** - * Returns a {@code ScriptDocValues} of the appropriate type for this field. - * This is used to support backwards compatibility for accessing field values - * through the {@code doc} variable. - */ @Override public ScriptDocValues getScriptDocValues() { if (booleans == null) { @@ -72,35 +64,40 @@ public ScriptDocValues getScriptDocValues() { return booleans; } - /** - * Returns the name of this field. - */ + // this method is required to support the Boolean return values + // for the old-style "doc" access in ScriptDocValues + @Override + public Boolean getInternal(int index) { + return values[index]; + } + @Override public String getName() { return name; } - /** - * Returns {@code true} if this field has no values, otherwise {@code false}. - */ @Override public boolean isEmpty() { return count == 0; } - /** - * Returns the number of values this field has. - */ @Override public int size() { return count; } - /** - * Returns an iterator over elements of type {@code T}. - * - * @return an Iterator. - */ + public boolean get(boolean defaultValue) { + return get(0, defaultValue); + } + + public boolean get(int index, boolean defaultValue) { + if (isEmpty() || index < 0 || index >= count) { + return defaultValue; + } + + return values[index]; + } + @Override public Iterator iterator() { return new Iterator() { @@ -120,21 +117,4 @@ public Boolean next() { } }; } - - public boolean get(boolean defaultValue) { - return get(0, defaultValue); - } - - public boolean get(int index, boolean defaultValue) { - if (isEmpty() || index < 0 || index >= count) { - return defaultValue; - } - - return values[index]; - } - - // this method is required to support the old-style "doc" access in ScriptDocValues - public boolean getInternal(int index) { - return values[index]; - } } diff --git a/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java index b90920f3fec1f..db0dbe5d07a5c 100644 --- a/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java @@ -23,13 +23,16 @@ public class DelegateDocValuesField implements DocValuesField { private final String name; public DelegateDocValuesField(ScriptDocValues scriptDocValues, String name) { + // Suppliers provided via ScriptDocValues should never be a DocValuesField + // as we expect DelegateDocValuesField to only support old-style ScriptDocValues + assert scriptDocValues.getSupplier() instanceof DocValuesField == false; this.scriptDocValues = scriptDocValues; this.name = name; } @Override public void setNextDocId(int docId) throws IOException { - scriptDocValues.setNextDocId(docId); + scriptDocValues.getSupplier().setNextDocId(docId); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java index 0428804bc08fc..3d15faa3146bd 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints; +import org.elasticsearch.index.fielddata.ScriptDocValues.GeoPointsSupplier; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -62,11 +63,11 @@ public void testGeoGetLatLon() throws IOException { GeoPoint[][] points = { { new GeoPoint(lat1, lon1), new GeoPoint(lat2, lon2) } }; final MultiGeoPointValues values = wrap(points); - final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values); + final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(new GeoPointsSupplier(values)); - script.setNextDocId(1); + script.getSupplier().setNextDocId(1); assertEquals(true, script.isEmpty()); - script.setNextDocId(0); + script.getSupplier().setNextDocId(0); assertEquals(false, script.isEmpty()); assertEquals(new GeoPoint(lat1, lon1), script.getValue()); assertEquals(lat1, script.getLat(), 0); @@ -80,12 +81,12 @@ public void testGeoDistance() throws IOException { final double lon = randomLon(); GeoPoint[][] points = { { new GeoPoint(lat, lon) } }; final MultiGeoPointValues values = wrap(points); - final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values); - script.setNextDocId(0); + final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(new GeoPointsSupplier(values)); + script.getSupplier().setNextDocId(0); GeoPoint[][] points2 = { new GeoPoint[0] }; - final ScriptDocValues.GeoPoints emptyScript = new ScriptDocValues.GeoPoints(wrap(points2)); - emptyScript.setNextDocId(0); + final ScriptDocValues.GeoPoints emptyScript = new ScriptDocValues.GeoPoints(new GeoPointsSupplier(wrap(points2))); + emptyScript.getSupplier().setNextDocId(0); final double otherLat = randomLat(); final double otherLon = randomLon(); @@ -115,9 +116,9 @@ public void testMissingValues() throws IOException { points[d][i] = new GeoPoint(randomLat(), randomLon()); } } - final ScriptDocValues.GeoPoints geoPoints = new GeoPoints(wrap(points)); + final ScriptDocValues.GeoPoints geoPoints = new GeoPoints(new GeoPointsSupplier(wrap(points))); for (int d = 0; d < points.length; d++) { - geoPoints.setNextDocId(d); + geoPoints.getSupplier().setNextDocId(d); if (points[d].length > 0) { assertEquals(points[d][0], geoPoints.getValue()); } else { diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java index 0e8fe7772fbe7..e2460614e275a 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.fielddata; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -27,7 +28,7 @@ public void testLongs() throws IOException { for (int round = 0; round < 10; round++) { int d = between(0, values.length - 1); - longs.setNextDocId(d); + longs.getSupplier().setNextDocId(d); if (values[d].length > 0) { assertEquals(values[d][0], longs.getValue()); assertEquals(values[d][0], (long) longs.get(0)); @@ -56,7 +57,7 @@ public void testLongs() throws IOException { } private Longs wrap(long[][] values) { - return new Longs(new AbstractSortedNumericDocValues() { + return new Longs(new LongsSupplier(new AbstractSortedNumericDocValues() { long[] current; int i; @@ -76,6 +77,6 @@ public int docValueCount() { public long nextValue() { return current[i++]; } - }); + })); } } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java index 0860702068a6e..9790ac29f457d 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.script.field.DelegateDocValuesField; @@ -42,7 +43,7 @@ public void testSingleValued() throws IOException { SortedNumericDoubleValues values = new SortedDoublesIndexFieldData.SortedNumericHalfFloatFieldData( reader, "half_float", - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ).getDoubleValues(); assertNotNull(FieldData.unwrapSingleton(values)); assertTrue(values.advanceExact(0)); @@ -67,7 +68,7 @@ public void testMultiValued() throws IOException { SortedNumericDoubleValues values = new SortedDoublesIndexFieldData.SortedNumericHalfFloatFieldData( reader, "half_float", - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ).getDoubleValues(); assertNull(FieldData.unwrapSingleton(values)); assertTrue(values.advanceExact(0)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 3547b6b89925d..1a49f2aea0f4c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.LeafNumericFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Dates; +import org.elasticsearch.index.fielddata.ScriptDocValues.DatesSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.DateFieldMapper.Resolution; @@ -338,7 +339,7 @@ public void testDateNanoDocValues() throws IOException { SortedNumericIndexFieldData fieldData = new SortedNumericIndexFieldData( "my_date", IndexNumericFieldData.NumericType.DATE_NANOSECONDS, - (dv, n) -> new DelegateDocValuesField(new Dates(dv, true), n) + (dv, n) -> new DelegateDocValuesField(new Dates(new DatesSupplier(dv, true)), n) ); // Read index and check the doc values DirectoryReader reader = DirectoryReader.open(w); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index 76cdfd6fff7b4..ef4a863b45a3b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.lucene.search.function.ScriptScoreQuery; import org.elasticsearch.index.fielddata.BinaryScriptFieldData; -import org.elasticsearch.index.fielddata.IpScriptFieldData; +import org.elasticsearch.index.fielddata.ScriptDocValues.Strings; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.DocReader; @@ -145,7 +145,7 @@ public ScoreScript newInstance(DocReader docReader) { return new ScoreScript(Map.of(), searchContext.lookup(), docReader) { @Override public double execute(ExplanationHolder explanation) { - IpScriptFieldData.IpScriptDocValues bytes = (IpScriptFieldData.IpScriptDocValues) getDoc().get("test"); + Strings bytes = (Strings) getDoc().get("test"); return Integer.parseInt(bytes.getValue().substring(bytes.getValue().lastIndexOf(".") + 1)); } }; diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 262b7cc062a82..f123deb7f8d8a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -500,7 +500,7 @@ public LeafFieldData load(LeafReaderContext context) { return new LeafFieldData() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues() { + return new DelegateDocValuesField(new ScriptDocValues(new ScriptDocValues.Supplier() { String value; @Override @@ -509,7 +509,7 @@ public int size() { } @Override - public String get(int index) { + public String getInternal(int index) { assert index == 0; return value; } @@ -521,6 +521,16 @@ public void setNextDocId(int docId) { leafLookup.setDocument(docId); value = runtimeDocValues.apply(leafLookup, docId); } + }) { + @Override + public int size() { + return supplier.size(); + } + + @Override + public String get(int i) { + return supplier.getInternal(i); + } }, name); } @@ -616,7 +626,7 @@ public void collect(int doc) throws IOException { scriptDocValues = indexFieldData.load(context).getScriptField("test").getScriptDocValues(); ; } - scriptDocValues.setNextDocId(doc); + scriptDocValues.getSupplier().setNextDocId(doc); for (int i = 0; i < scriptDocValues.size(); i++) { result.add(scriptDocValues.get(i).toString()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java index d50922274ea74..8b2aa19546e47 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.plain.SortedDoublesIndexFieldData; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -167,7 +168,7 @@ private void testCase( SortedDoublesIndexFieldData fieldData = new SortedDoublesIndexFieldData( "price", IndexNumericFieldData.NumericType.DOUBLE, - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); FunctionScoreQuery query = new FunctionScoreQuery( new MatchAllDocsQuery(), diff --git a/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java b/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java index f642d1a41a904..4b85905fdc423 100644 --- a/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java +++ b/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.io.IOException; import java.util.function.Function; import static org.mockito.AdditionalAnswers.returnsFirstArg; @@ -64,7 +65,7 @@ public void testFieldAliases() { assertEquals(docValues, fetchedDocValues); } - public void testFlattenedField() { + public void testFlattenedField() throws IOException { ScriptDocValues docValues1 = mock(ScriptDocValues.class); IndexFieldData fieldData1 = createFieldData(docValues1, "flattened.key1"); @@ -95,8 +96,13 @@ public void testFlattenedField() { assertEquals(docValues2, docLookup.get("flattened.key2")); } - private IndexFieldData createFieldData(ScriptDocValues scriptDocValues, String name) { - DelegateDocValuesField delegateDocValuesField = new DelegateDocValuesField(scriptDocValues, name); + private IndexFieldData createFieldData(ScriptDocValues scriptDocValues, String name) throws IOException { + DelegateDocValuesField delegateDocValuesField = new DelegateDocValuesField(scriptDocValues, name) { + @Override + public void setNextDocId(int id) { + // do nothing + } + }; LeafFieldData leafFieldData = mock(LeafFieldData.class); doReturn(delegateDocValuesField).when(leafFieldData).getScriptField(name); diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 995e5f70de845..cbb65425f942c 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.DocumentParserContext; @@ -423,7 +424,10 @@ public double nextValue() throws IOException { @Override public DocValuesField getScriptField(String name) { // getAggregateMetricValues returns all metric as doubles, including `value_count` - return new DelegateDocValuesField(new ScriptDocValues.Doubles(getAggregateMetricValues(defaultMetric)), name); + return new DelegateDocValuesField( + new ScriptDocValues.Doubles(new DoublesSupplier(getAggregateMetricValues(defaultMetric))), + name + ); } @Override diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java index 3633b423bdc72..e2f792981e369 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java @@ -23,7 +23,7 @@ import static org.elasticsearch.search.DocValueFormat.MASK_2_63; import static org.elasticsearch.xpack.unsignedlong.UnsignedLongFieldMapper.BIGINTEGER_2_64_MINUS_ONE; -public class UnsignedLongDocValuesField implements UnsignedLongField, DocValuesField { +public class UnsignedLongDocValuesField implements DocValuesField, ScriptDocValues.Supplier { private final SortedNumericDocValues input; private final String name; @@ -76,6 +76,13 @@ public boolean isEmpty() { return count == 0; } + // this method is required to support the Long return values + // for the old-style "doc" access in ScriptDocValues + @Override + public Long getInternal(int index) { + return toFormatted(index); + } + @Override public int size() { return count; @@ -89,7 +96,7 @@ protected long toFormatted(int index) { return values[index] ^ MASK_2_63; } - @Override + /** Return all the values as a {@code List}. */ public List getValues() { if (isEmpty()) { return Collections.emptyList(); @@ -104,13 +111,13 @@ public List getValues() { return values; } - @Override - public long getValue(long defaultValue) { + /** Returns the 0th index value as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long get(long defaultValue) { return getValue(0, defaultValue); } - @Override - public long getValue(int index, long defaultValue) { + /** Returns the value at {@code index} as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long get(int index, long defaultValue) { if (isEmpty() || index < 0 || index >= count) { return defaultValue; } @@ -118,6 +125,16 @@ public long getValue(int index, long defaultValue) { return toFormatted(index); } + /** Returns the 0th index value as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long getValue(long defaultValue) { + return get(0, defaultValue); + } + + /** Returns the value at {@code index} as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long getValue(int index, long defaultValue) { + return get(index, defaultValue); + } + @Override public PrimitiveIterator.OfLong iterator() { return new PrimitiveIterator.OfLong() { @@ -148,7 +165,7 @@ protected BigInteger toBigInteger(int index) { return BigInteger.valueOf(toFormatted(index)).and(BIGINTEGER_2_64_MINUS_ONE); } - @Override + /** Converts all the values to {@code BigInteger} and returns them as a {@code List}. */ public List asBigIntegers() { if (isEmpty()) { return Collections.emptyList(); @@ -163,12 +180,12 @@ public List asBigIntegers() { return values; } - @Override + /** Returns the 0th index value as a {@code BigInteger} if it exists, otherwise {@code defaultValue}. */ public BigInteger asBigInteger(BigInteger defaultValue) { return asBigInteger(0, defaultValue); } - @Override + /** Returns the value at {@code index} as a {@code BigInteger} if it exists, otherwise {@code defaultValue}. */ public BigInteger asBigInteger(int index, BigInteger defaultValue) { if (isEmpty() || index < 0 || index >= count) { return defaultValue; diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java index 3278e4f165338..dfc1fd23c30eb 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java @@ -9,37 +9,24 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; -import java.io.IOException; - public class UnsignedLongScriptDocValues extends ScriptDocValues { - private final UnsignedLongDocValuesField unsignedLongDocValuesField; - - /** - * Standard constructor. - */ - public UnsignedLongScriptDocValues(UnsignedLongDocValuesField unsignedLongDocValuesField) { - this.unsignedLongDocValuesField = unsignedLongDocValuesField; - } - - @Override - public void setNextDocId(int docId) throws IOException { - throw new UnsupportedOperationException(); + public UnsignedLongScriptDocValues(Supplier supplier) { + super(supplier); } public long getValue() { - throwIfEmpty(); - return unsignedLongDocValuesField.getValue(0L); // default is ignored + return get(0); } @Override public Long get(int index) { throwIfEmpty(); - return unsignedLongDocValuesField.getValue(0L); // default is ignored + return supplier.getInternal(index); } @Override public int size() { - return unsignedLongDocValuesField.size(); + return supplier.size(); } } diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt b/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt index 2baa2107b7472..bce3a098a69dc 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt +++ b/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt @@ -11,14 +11,13 @@ class org.elasticsearch.xpack.unsignedlong.UnsignedLongScriptDocValues { long getValue() } -class org.elasticsearch.xpack.unsignedlong.UnsignedLongField @dynamic_type { +class org.elasticsearch.xpack.unsignedlong.UnsignedLongDocValuesField @dynamic_type { + long get(long) + long get(int, long) long getValue(long) long getValue(int, long) List getValues() BigInteger asBigInteger(BigInteger) BigInteger asBigInteger(int, BigInteger) List asBigIntegers() -} - -class org.elasticsearch.xpack.unsignedlong.UnsignedLongDocValuesField @dynamic_type { } \ No newline at end of file diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java index 25cd66a0eb212..f20db9bcd852e 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java @@ -15,23 +15,44 @@ public final class VersionScriptDocValues extends ScriptDocValues { - private final SortedSetDocValues in; - private long[] ords = new long[0]; - private int count; + public static final class VersionScriptSupplier implements ScriptDocValues.Supplier { - public VersionScriptDocValues(SortedSetDocValues in) { - this.in = in; - } + private final SortedSetDocValues in; + private long[] ords = new long[0]; + private int count; - @Override - public void setNextDocId(int docId) throws IOException { - count = 0; - if (in.advanceExact(docId)) { - for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { - ords = ArrayUtil.grow(ords, count + 1); - ords[count++] = ord; + public VersionScriptSupplier(SortedSetDocValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + count = 0; + if (in.advanceExact(docId)) { + for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { + ords = ArrayUtil.grow(ords, count + 1); + ords[count++] = ord; + } + } + } + + @Override + public String getInternal(int index) { + try { + return VersionEncoder.decodeVersion(in.lookupOrd(ords[index])); + } catch (IOException e) { + throw new RuntimeException(e); } } + + @Override + public int size() { + return count; + } + } + + public VersionScriptDocValues(VersionScriptSupplier supplier) { + super(supplier); } public String getValue() { @@ -40,20 +61,16 @@ public String getValue() { @Override public String get(int index) { - if (count == 0) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - try { - return VersionEncoder.decodeVersion(in.lookupOrd(ords[index])); - } catch (IOException e) { - throw new RuntimeException(e); - } + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java index fa8a692749b25..97dc728352051 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java @@ -49,6 +49,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.versionfield.VersionEncoder.EncodedVersion; +import org.elasticsearch.xpack.versionfield.VersionScriptDocValues.VersionScriptSupplier; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -279,7 +280,11 @@ protected BytesRef indexedValueForSearch(Object value) { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { - return new SortedSetOrdinalsIndexFieldData.Builder(name(), VersionScriptDocValues::new, CoreValuesSourceType.KEYWORD); + return new SortedSetOrdinalsIndexFieldData.Builder( + name(), + dv -> new VersionScriptDocValues(new VersionScriptSupplier(dv)), + CoreValuesSourceType.KEYWORD + ); } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java index 69018c3ed0803..5bef8d33fb6db 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java @@ -15,6 +15,7 @@ import org.elasticsearch.script.field.DelegateDocValuesField; import org.elasticsearch.script.field.DocValuesField; import org.elasticsearch.xpack.spatial.index.fielddata.GeoShapeValues; +import org.elasticsearch.xpack.spatial.index.fielddata.GeoShapeValues.GeoShapeValue; import org.elasticsearch.xpack.spatial.index.fielddata.LeafGeoShapeFieldData; import java.io.IOException; @@ -33,7 +34,7 @@ public final SortedBinaryDocValues getBytesValues() { @Override public final DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new GeoShapeScriptValues(getGeoShapeValues()), name); + return new DelegateDocValuesField(new GeoShapeScriptValues(new GeoShapeSupplier(getGeoShapeValues())), name); } public static LeafGeoShapeFieldData empty(final int maxDoc) { @@ -59,14 +60,14 @@ public GeoShapeValues getGeoShapeValues() { }; } - private static final class GeoShapeScriptValues extends ScriptDocValues.Geometry { + private static final class GeoShapeSupplier implements ScriptDocValues.GeometrySupplier { private final GeoShapeValues in; private final GeoPoint centroid = new GeoPoint(); private final GeoBoundingBox boundingBox = new GeoBoundingBox(new GeoPoint(), new GeoPoint()); private GeoShapeValues.GeoShapeValue value; - private GeoShapeScriptValues(GeoShapeValues in) { + private GeoShapeSupplier(GeoShapeValues in) { this.in = in; } @@ -82,39 +83,73 @@ public void setNextDocId(int docId) throws IOException { } } + @Override + public GeoShapeValue getInternal(int index) { + throw new UnsupportedOperationException(); + } + + public GeoShapeValue getInternal() { + return value; + } + + @Override + public int size() { + return value == null ? 0 : 1; + } + + @Override + public GeoPoint getCentroid() { + return centroid; + } + + @Override + public GeoBoundingBox getBoundingBox() { + return boundingBox; + } + } + + private static final class GeoShapeScriptValues extends ScriptDocValues.Geometry { + + private final GeoShapeSupplier gsSupplier; + + private GeoShapeScriptValues(GeoShapeSupplier supplier) { + super(supplier); + this.gsSupplier = supplier; + } + @Override public int getDimensionalType() { - return value == null ? -1 : value.dimensionalShapeType().ordinal(); + return gsSupplier.getInternal() == null ? -1 : gsSupplier.getInternal().dimensionalShapeType().ordinal(); } @Override public GeoPoint getCentroid() { - return value == null ? null : centroid; + return gsSupplier.getInternal() == null ? null : gsSupplier.getCentroid(); } @Override public double getMercatorWidth() { - return lonToSphericalMercator(boundingBox.right()) - lonToSphericalMercator(boundingBox.left()); + return lonToSphericalMercator(getBoundingBox().right()) - lonToSphericalMercator(getBoundingBox().left()); } @Override public double getMercatorHeight() { - return latToSphericalMercator(boundingBox.top()) - latToSphericalMercator(boundingBox.bottom()); + return latToSphericalMercator(getBoundingBox().top()) - latToSphericalMercator(getBoundingBox().bottom()); } @Override public GeoBoundingBox getBoundingBox() { - return value == null ? null : boundingBox; + return gsSupplier.getInternal() == null ? null : gsSupplier.getBoundingBox(); } @Override public GeoShapeValues.GeoShapeValue get(int index) { - return value; + return gsSupplier.getInternal(); } @Override public int size() { - return value == null ? 0 : 1; + return supplier.size(); } } } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java index 6df3ed449bd16..852b63500a9bf 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java @@ -17,49 +17,73 @@ public class BinaryDenseVectorScriptDocValues extends DenseVectorScriptDocValues { - private final BinaryDocValues in; + public static class BinaryDenseVectorSupplier implements DenseVectorSupplier { + + private final BinaryDocValues in; + private BytesRef value; + + public BinaryDenseVectorSupplier(BinaryDocValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + if (in.advanceExact(docId)) { + value = in.binaryValue(); + } else { + value = null; + } + } + + @Override + public BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + public BytesRef getInternal() { + return value; + } + + @Override + public int size() { + if (value == null) { + return 0; + } else { + return 1; + } + } + } + + private final BinaryDenseVectorSupplier bdvSupplier; private final Version indexVersion; private final float[] vector; - private BytesRef value; - BinaryDenseVectorScriptDocValues(BinaryDocValues in, Version indexVersion, int dims) { - super(dims); - this.in = in; + BinaryDenseVectorScriptDocValues(BinaryDenseVectorSupplier supplier, Version indexVersion, int dims) { + super(supplier, dims); + this.bdvSupplier = supplier; this.indexVersion = indexVersion; this.vector = new float[dims]; } @Override - public void setNextDocId(int docId) throws IOException { - if (in.advanceExact(docId)) { - value = in.binaryValue(); - } else { - value = null; - } + public int size() { + return supplier.size(); } @Override public float[] getVectorValue() { - VectorEncoderDecoder.decodeDenseVector(value, vector); + VectorEncoderDecoder.decodeDenseVector(bdvSupplier.getInternal(), vector); return vector; } @Override public float getMagnitude() { - return VectorEncoderDecoder.getMagnitude(indexVersion, value); - } - - @Override - public int size() { - if (value == null) { - return 0; - } else { - return 1; - } + return VectorEncoderDecoder.getMagnitude(indexVersion, bdvSupplier.getInternal()); } @Override public double dotProduct(float[] queryVector) { + BytesRef value = bdvSupplier.getInternal(); ByteBuffer byteBuffer = ByteBuffer.wrap(value.bytes, value.offset, value.length); double dotProduct = 0; @@ -71,6 +95,7 @@ public double dotProduct(float[] queryVector) { @Override public double l1Norm(float[] queryVector) { + BytesRef value = bdvSupplier.getInternal(); ByteBuffer byteBuffer = ByteBuffer.wrap(value.bytes, value.offset, value.length); double l1norm = 0; @@ -82,6 +107,7 @@ public double l1Norm(float[] queryVector) { @Override public double l2Norm(float[] queryVector) { + BytesRef value = bdvSupplier.getInternal(); ByteBuffer byteBuffer = ByteBuffer.wrap(value.bytes, value.offset, value.length); double l2norm = 0; for (float queryValue : queryVector) { diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java index 6ebce8541d308..650ebca1d5ee5 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java @@ -11,11 +11,23 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; public abstract class DenseVectorScriptDocValues extends ScriptDocValues { + + public interface DenseVectorSupplier extends Supplier { + + @Override + default BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + T getInternal(); + } + public static final String MISSING_VECTOR_FIELD_MESSAGE = "A document doesn't have a value for a vector field!"; private final int dims; - public DenseVectorScriptDocValues(int dims) { + public DenseVectorScriptDocValues(DenseVectorSupplier supplier, int dims) { + super(supplier); this.dims = dims; } @@ -46,8 +58,8 @@ public BytesRef get(int index) { ); } - public static DenseVectorScriptDocValues empty(int dims) { - return new DenseVectorScriptDocValues(dims) { + public static DenseVectorScriptDocValues empty(DenseVectorSupplier supplier, int dims) { + return new DenseVectorScriptDocValues(supplier, dims) { @Override public float[] getVectorValue() { throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); @@ -73,14 +85,9 @@ public double l2Norm(float[] queryVector) { throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); } - @Override - public void setNextDocId(int docId) { - // do nothing - } - @Override public int size() { - return 0; + return supplier.size(); } }; } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java index 03afcbf0dd685..fc6f1bdb59906 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.vectors.query; import org.apache.lucene.index.VectorValues; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.VectorUtil; import java.io.IOException; @@ -16,36 +17,64 @@ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; public class KnnDenseVectorScriptDocValues extends DenseVectorScriptDocValues { - private final VectorValues in; - private float[] vector; - KnnDenseVectorScriptDocValues(VectorValues in, int dims) { - super(dims); - this.in = in; - } + public static class KnnDenseVectorSupplier implements DenseVectorSupplier { - @Override - public void setNextDocId(int docId) throws IOException { - int currentDoc = in.docID(); - if (currentDoc == NO_MORE_DOCS || docId < currentDoc) { - vector = null; - } else if (docId == currentDoc) { - vector = in.vectorValue(); - } else { - currentDoc = in.advance(docId); - if (currentDoc == docId) { + private final VectorValues in; + private float[] vector; + + public KnnDenseVectorSupplier(VectorValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + int currentDoc = in.docID(); + if (currentDoc == NO_MORE_DOCS || docId < currentDoc) { + vector = null; + } else if (docId == currentDoc) { vector = in.vectorValue(); } else { - vector = null; + currentDoc = in.advance(docId); + if (currentDoc == docId) { + vector = in.vectorValue(); + } else { + vector = null; + } + } + } + + @Override + public BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + public float[] getInternal() { + return vector; + } + + @Override + public int size() { + if (vector == null) { + return 0; + } else { + return 1; } } } + private final KnnDenseVectorSupplier kdvSupplier; + + KnnDenseVectorScriptDocValues(KnnDenseVectorSupplier supplier, int dims) { + super(supplier, dims); + this.kdvSupplier = supplier; + } + private float[] getVectorChecked() { - if (vector == null) { + if (kdvSupplier.getInternal() == null) { throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); } - return vector; + return kdvSupplier.getInternal(); } @Override @@ -88,10 +117,6 @@ public double l2Norm(float[] queryVector) { @Override public int size() { - if (vector == null) { - return 0; - } else { - return 1; - } + return supplier.size(); } } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java index 511985b62a58e..e97daf4c2f397 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java @@ -63,7 +63,7 @@ public DenseVectorFunction(ScoreScript scoreScript, List queryVector, St void setNextVector() { try { - docValues.setNextDocId(scoreScript._getDocId()); + docValues.getSupplier().setNextDocId(scoreScript._getDocId()); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java index e0184303e65f7..1d8c45e9c60c2 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java @@ -17,11 +17,16 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.script.field.DelegateDocValuesField; import org.elasticsearch.script.field.DocValuesField; +import org.elasticsearch.xpack.vectors.query.BinaryDenseVectorScriptDocValues.BinaryDenseVectorSupplier; +import org.elasticsearch.xpack.vectors.query.DenseVectorScriptDocValues.DenseVectorSupplier; +import org.elasticsearch.xpack.vectors.query.KnnDenseVectorScriptDocValues.KnnDenseVectorSupplier; import java.io.IOException; import java.util.Collection; import java.util.Collections; +import static org.elasticsearch.xpack.vectors.query.DenseVectorScriptDocValues.MISSING_VECTOR_FIELD_MESSAGE; + final class VectorDVLeafFieldData implements LeafFieldData { private final LeafReader reader; @@ -59,12 +64,30 @@ public DocValuesField getScriptField(String name) { if (indexed) { VectorValues values = reader.getVectorValues(field); if (values == null || values == VectorValues.EMPTY) { - return new DelegateDocValuesField(DenseVectorScriptDocValues.empty(dims), name); + return new DelegateDocValuesField(DenseVectorScriptDocValues.empty(new DenseVectorSupplier() { + @Override + public float[] getInternal() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public void setNextDocId(int docId) throws IOException { + // do nothing + } + + @Override + public int size() { + return 0; + } + }, dims), name); } - return new DelegateDocValuesField(new KnnDenseVectorScriptDocValues(values, dims), name); + return new DelegateDocValuesField(new KnnDenseVectorScriptDocValues(new KnnDenseVectorSupplier(values), dims), name); } else { BinaryDocValues values = DocValues.getBinary(reader, field); - return new DelegateDocValuesField(new BinaryDenseVectorScriptDocValues(values, indexVersion, dims), name); + return new DelegateDocValuesField( + new BinaryDenseVectorScriptDocValues(new BinaryDenseVectorSupplier(values), indexVersion, dims), + name + ); } } catch (IOException e) { throw new IllegalStateException("Cannot load doc values for vector field!", e); diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java index 6541ccbd01c4e..2761364e51505 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Version; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.vectors.mapper.VectorEncoderDecoder; +import org.elasticsearch.xpack.vectors.query.BinaryDenseVectorScriptDocValues.BinaryDenseVectorSupplier; import java.io.IOException; import java.nio.ByteBuffer; @@ -28,9 +29,10 @@ public void testGetVectorValueAndGetMagnitude() throws IOException { for (Version indexVersion : Arrays.asList(Version.V_7_4_0, Version.CURRENT)) { BinaryDocValues docValues = wrap(vectors, indexVersion); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, indexVersion, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, indexVersion, dims); for (int i = 0; i < vectors.length; i++) { - scriptDocValues.setNextDocId(i); + supplier.setNextDocId(i); assertArrayEquals(vectors[i], scriptDocValues.getVectorValue(), 0.0001f); assertEquals(expectedMagnitudes[i], scriptDocValues.getMagnitude(), 0.0001f); } @@ -41,13 +43,14 @@ public void testMissingValues() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; BinaryDocValues docValues = wrap(vectors, Version.CURRENT); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, Version.CURRENT, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, Version.CURRENT, dims); - scriptDocValues.setNextDocId(3); - Exception e = expectThrows(IllegalArgumentException.class, () -> scriptDocValues.getVectorValue()); + supplier.setNextDocId(3); + Exception e = expectThrows(IllegalArgumentException.class, scriptDocValues::getVectorValue); assertEquals("A document doesn't have a value for a vector field!", e.getMessage()); - e = expectThrows(IllegalArgumentException.class, () -> scriptDocValues.getMagnitude()); + e = expectThrows(IllegalArgumentException.class, scriptDocValues::getMagnitude); assertEquals("A document doesn't have a value for a vector field!", e.getMessage()); } @@ -55,9 +58,10 @@ public void testGetFunctionIsNotAccessible() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; BinaryDocValues docValues = wrap(vectors, Version.CURRENT); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, Version.CURRENT, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, Version.CURRENT, dims); - scriptDocValues.setNextDocId(0); + supplier.setNextDocId(0); Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); assertThat(e.getMessage(), containsString("accessing a vector field's value through 'get' or 'value' is not supported!")); } @@ -69,9 +73,10 @@ public void testSimilarityFunctions() throws IOException { for (Version indexVersion : Arrays.asList(Version.V_7_4_0, Version.CURRENT)) { BinaryDocValues docValues = wrap(new float[][] { docVector }, indexVersion); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, Version.CURRENT, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, Version.CURRENT, dims); - scriptDocValues.setNextDocId(0); + supplier.setNextDocId(0); assertEquals( "dotProduct result is not equal to the expected value!", diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java index 1cd89e4993c7e..0ecd26f08c20c 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.Version; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.vectors.query.BinaryDenseVectorScriptDocValues.BinaryDenseVectorSupplier; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.CosineSimilarity; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.DotProduct; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.L1Norm; @@ -36,7 +37,11 @@ public void testVectorFunctions() { for (Version indexVersion : Arrays.asList(Version.V_7_4_0, Version.CURRENT)) { BinaryDocValues docValues = BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, indexVersion); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, indexVersion, dims); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues( + new BinaryDenseVectorSupplier(docValues), + indexVersion, + dims + ); ScoreScript scoreScript = mock(ScoreScript.class); when(scoreScript.getDoc()).thenReturn(Collections.singletonMap(field, scriptDocValues)); diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java index 319a98a619bf6..7005e4d7bd531 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.VectorValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.vectors.query.KnnDenseVectorScriptDocValues.KnnDenseVectorSupplier; import java.io.IOException; @@ -22,9 +23,10 @@ public void testGetVectorValueAndGetMagnitude() throws IOException { float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; float[] expectedMagnitudes = { 1.7320f, 2.4495f, 3.3166f }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(vectors), dims); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(vectors)); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); for (int i = 0; i < vectors.length; i++) { - scriptDocValues.setNextDocId(i); + supplier.setNextDocId(i); assertArrayEquals(vectors[i], scriptDocValues.getVectorValue(), 0.0001f); assertEquals(expectedMagnitudes[i], scriptDocValues.getMagnitude(), 0.0001f); } @@ -33,9 +35,10 @@ public void testGetVectorValueAndGetMagnitude() throws IOException { public void testMissingValues() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(vectors), dims); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(vectors)); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); - scriptDocValues.setNextDocId(3); + supplier.setNextDocId(3); Exception e = expectThrows(IllegalArgumentException.class, () -> scriptDocValues.getVectorValue()); assertEquals("A document doesn't have a value for a vector field!", e.getMessage()); @@ -46,9 +49,10 @@ public void testMissingValues() throws IOException { public void testGetFunctionIsNotAccessible() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(vectors), dims); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(vectors)); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); - scriptDocValues.setNextDocId(0); + supplier.setNextDocId(0); Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); assertThat(e.getMessage(), containsString("accessing a vector field's value through 'get' or 'value' is not supported!")); } @@ -58,8 +62,9 @@ public void testSimilarityFunctions() throws IOException { float[] docVector = new float[] { 230.0f, 300.33f, -34.8988f, 15.555f, -200.0f }; float[] queryVector = new float[] { 0.5f, 111.3f, -13.0f, 14.8f, -156.0f }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(new float[][] { docVector }), dims); - scriptDocValues.setNextDocId(0); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(new float[][] { docVector })); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); + supplier.setNextDocId(0); assertEquals("dotProduct result is not equal to the expected value!", 65425.624, scriptDocValues.dotProduct(queryVector), 0.001); assertEquals("l1norm result is not equal to the expected value!", 485.184, scriptDocValues.l1Norm(queryVector), 0.001); From 43e6cacdb0225e7d92e33fdccab233c6a9c27c21 Mon Sep 17 00:00:00 2001 From: William Brafford Date: Mon, 29 Nov 2021 13:29:03 -0500 Subject: [PATCH 47/55] Strip blocks from settings for reindex targets (#80887) When migrating system features, we copy settings from old indices into the new indices we create before reindexing. However, if we happen to copy a write block, this causes the reindexing to fail. Here, we strip the index block settings before applying settings to new indices. Fixes #80654 --- .../migration/FeatureMigrationIT.java | 24 +++++++++++++++++++ .../upgrades/SystemIndexMigrator.java | 9 ++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index fc7a70e3498ec..daa88c8284620 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -242,6 +242,30 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { ); } + public void testMigrateIndexWithWriteBlock() throws Exception { + createSystemIndexForDescriptor(INTERNAL_UNMANAGED); + + String indexName = Optional.ofNullable(INTERNAL_UNMANAGED.getPrimaryIndex()) + .orElse(INTERNAL_UNMANAGED.getIndexPattern().replace("*", "old")); + client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put("index.blocks.write", true)).get(); + + TestPlugin.preMigrationHook.set((state) -> Collections.emptyMap()); + TestPlugin.postMigrationHook.set((state, metadata) -> {}); + + ensureGreen(); + + client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()).get(); + + assertBusy(() -> { + GetFeatureUpgradeStatusResponse statusResp = client().execute( + GetFeatureUpgradeStatusAction.INSTANCE, + new GetFeatureUpgradeStatusRequest() + ).get(); + logger.info(Strings.toString(statusResp)); + assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); + }); + } + public void assertIndexHasCorrectProperties( Metadata metadata, String indexName, diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java index 6e70c387dd5b1..190b6e9e2148f 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java @@ -459,9 +459,16 @@ private void createIndex(SystemIndexMigrationInfo migrationInfo, ActionListener< migrationInfo.getNextIndexName() ); + Settings.Builder settingsBuilder = Settings.builder(); + if (Objects.nonNull(migrationInfo.getSettings())) { + settingsBuilder.put(migrationInfo.getSettings()); + settingsBuilder.remove("index.blocks.write"); + settingsBuilder.remove("index.blocks.read"); + settingsBuilder.remove("index.blocks.metadata"); + } createRequest.waitForActiveShards(ActiveShardCount.ALL) .mappings(migrationInfo.getMappings()) - .settings(Objects.requireNonNullElse(migrationInfo.getSettings(), Settings.EMPTY)); + .settings(Objects.requireNonNullElse(settingsBuilder.build(), Settings.EMPTY)); metadataCreateIndexService.createIndex(createRequest, listener); } From ca65718923bc886ab0aa5934ecc1cd3b2e3090ba Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Nov 2021 18:47:01 +0000 Subject: [PATCH 48/55] Clarify `unassigned.reason` docs (#81017) Today we indicate that the `unassigned.reason` field in various APIs indicates the reason why a shard is unassigned. This isn't really true, it tells you some information about the event that caused the shard to _become_ unassigned (or which most recently changed its routing table entry while remaining unassigned) but tells you almost nothing about why the shard _is now_ unassigned and how to fix it. That's what the allocation explain API is for. This commit clarifies this point in the docs. Closes #80892 Co-authored-by: James Rodewig --- docs/reference/cat/shards.asciidoc | 14 ++++++++++++-- .../how-to/fix-common-cluster-issues.asciidoc | 9 ++++----- .../rest/action/cat/RestShardsAction.java | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 3ff2839c8501d..ed9c915e7b7d7 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -249,7 +249,9 @@ Time at which the shard became unassigned in Time (UTC)]. `unassigned.details`, `ud`:: -Details about why the shard became unassigned. +Details about why the shard became unassigned. This does not explain why the +shard is currently unassigned. To understand why a shard is not assigned, use +the <> API. `unassigned.for`, `uf`:: Time at which the shard was requested to be unassigned in @@ -258,16 +260,24 @@ Time (UTC)]. [[reason-unassigned]] `unassigned.reason`, `ur`:: -Reason the shard is unassigned. Returned values are: +Indicates the reason for the last change to the state of this unassigned shard. +This does not explain why the shard is currently unassigned. To understand why +a shard is not assigned, use the <> API. Returned +values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. +* `FORCED_EMPTY_PRIMARY`: The shard's allocation was last modified by forcing an empty primary using the <> API. +* `INDEX_CLOSED`: Unassigned because the index was closed. * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. +* `MANUAL_ALLOCATION`: The shard's allocation was last modified by the <> API. * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. +* `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the <>. +* `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. * `REINITIALIZED`: When a shard moves from started back to initializing. * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. diff --git a/docs/reference/how-to/fix-common-cluster-issues.asciidoc b/docs/reference/how-to/fix-common-cluster-issues.asciidoc index 9750e37d3be8a..cc778ea0d79e4 100644 --- a/docs/reference/how-to/fix-common-cluster-issues.asciidoc +++ b/docs/reference/how-to/fix-common-cluster-issues.asciidoc @@ -419,12 +419,11 @@ GET _cat/shards?v=true&h=index,shard,prirep,state,node,unassigned.reason&s=state ---- Unassigned shards have a `state` of `UNASSIGNED`. The `prirep` value is `p` for -primary shards and `r` for replicas. The `unassigned.reason` describes why the -shard remains unassigned. +primary shards and `r` for replicas. -To get a more in-depth explanation of an unassigned shard's allocation status, -use the <>. You -can often use details in the response to resolve the issue. +To understand why an unassigned shard is not being assigned and what action +you must take to allow {es} to assign it, use the +<>. [source,console] ---- diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 29aba3d25d869..efac1431236cd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -110,7 +110,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("sync_id", "alias:sync_id;default:false;desc:sync id"); - table.addCell("unassigned.reason", "alias:ur;default:false;desc:reason shard is unassigned"); + table.addCell("unassigned.reason", "alias:ur;default:false;desc:reason shard became unassigned"); table.addCell("unassigned.at", "alias:ua;default:false;desc:time shard became unassigned (UTC)"); table.addCell("unassigned.for", "alias:uf;default:false;text-align:right;desc:time has been unassigned"); table.addCell("unassigned.details", "alias:ud;default:false;desc:additional details as to why the shard became unassigned"); From 256521eafe1af7c363f5cfbbd64a22adbd3760bb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 29 Nov 2021 19:49:00 +0100 Subject: [PATCH 49/55] Delegate Ref Counting to ByteBuf in Netty Transport (#81096) Tracking down recent memory leaks was made unnecessarily hard by wrapping the `ByteBuf` ref couting with our own counter. This way, we would not record the increments and decrements on the Netty leak tracker, making it useless as far as identifying the concrete source of a request with the logged leak only containing touch points up until our inbound handler code. --- .../main/java/org/elasticsearch/nio/Page.java | 4 +- .../netty4/Netty4MessageChannelHandler.java | 42 ++++++++++++++++++- .../bytes/ReleasableBytesReference.java | 42 +++++++++---------- .../transport/InboundAggregatorTests.java | 18 ++++---- .../transport/InboundDecoderTests.java | 28 ++++++++----- .../transport/InboundPipelineTests.java | 2 +- .../GetCcrRestoreFileChunkAction.java | 3 +- 7 files changed, 90 insertions(+), 49 deletions(-) diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/Page.java b/libs/nio/src/main/java/org/elasticsearch/nio/Page.java index bc85e7dfb27f2..388ac35ea4ad3 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/Page.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/Page.java @@ -28,7 +28,7 @@ public Page(ByteBuffer byteBuffer, Releasable closeable) { } private Page(ByteBuffer byteBuffer, RefCountedCloseable refCountedCloseable) { - assert refCountedCloseable.refCount() > 0; + assert refCountedCloseable.hasReferences(); this.byteBuffer = byteBuffer; this.refCountedCloseable = refCountedCloseable; } @@ -51,7 +51,7 @@ public Page duplicate() { * @return the byte buffer */ public ByteBuffer byteBuffer() { - assert refCountedCloseable.refCount() > 0; + assert refCountedCloseable.hasReferences(); return byteBuffer; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java index 63ccf33561b88..516889b29f8ce 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasables; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.InboundPipeline; @@ -68,7 +69,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception final ByteBuf buffer = (ByteBuf) msg; Netty4TcpChannel channel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get(); final BytesReference wrapped = Netty4Utils.toBytesReference(buffer); - try (ReleasableBytesReference reference = new ReleasableBytesReference(wrapped, buffer::release)) { + try (ReleasableBytesReference reference = new ReleasableBytesReference(wrapped, new ByteBufRefCounted(buffer))) { pipeline.handleBytes(channel, reference); } } @@ -211,4 +212,43 @@ void failAsClosedChannel() { buf.release(); } } + + private static final class ByteBufRefCounted implements RefCounted { + + private final ByteBuf buffer; + + ByteBufRefCounted(ByteBuf buffer) { + this.buffer = buffer; + } + + @Override + public void incRef() { + buffer.retain(); + } + + @Override + public boolean tryIncRef() { + if (hasReferences() == false) { + return false; + } + try { + buffer.retain(); + } catch (RuntimeException e) { + assert hasReferences() == false; + return false; + } + return true; + } + + @Override + public boolean decRef() { + return buffer.release(); + } + + @Override + public boolean hasReferences() { + return buffer.refCnt() > 0; + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java index c1dce4db7cf5c..07723ef5bcffe 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -31,7 +31,7 @@ public final class ReleasableBytesReference implements RefCounted, Releasable, B private static final ReleasableBytesReference EMPTY = new ReleasableBytesReference(BytesArray.EMPTY, NO_OP); private final BytesReference delegate; - private final AbstractRefCounted refCounted; + private final RefCounted refCounted; public static ReleasableBytesReference empty() { EMPTY.incRef(); @@ -42,10 +42,10 @@ public ReleasableBytesReference(BytesReference delegate, Releasable releasable) this(delegate, new RefCountedReleasable(releasable)); } - public ReleasableBytesReference(BytesReference delegate, AbstractRefCounted refCounted) { + public ReleasableBytesReference(BytesReference delegate, RefCounted refCounted) { this.delegate = delegate; this.refCounted = refCounted; - assert refCounted.refCount() > 0; + assert refCounted.hasReferences(); } public static ReleasableBytesReference wrap(BytesReference reference) { @@ -53,10 +53,6 @@ public static ReleasableBytesReference wrap(BytesReference reference) { return reference.length() == 0 ? empty() : new ReleasableBytesReference(reference, NO_OP); } - public int refCount() { - return refCounted.refCount(); - } - @Override public void incRef() { refCounted.incRef(); @@ -98,19 +94,19 @@ public void close() { @Override public byte get(int index) { - assert refCount() > 0; + assert hasReferences(); return delegate.get(index); } @Override public int getInt(int index) { - assert refCount() > 0; + assert hasReferences(); return delegate.getInt(index); } @Override public int indexOf(byte marker, int from) { - assert refCount() > 0; + assert hasReferences(); return delegate.indexOf(marker, from); } @@ -121,7 +117,7 @@ public int length() { @Override public BytesReference slice(int from, int length) { - assert refCount() > 0; + assert hasReferences(); return delegate.slice(from, length); } @@ -132,7 +128,7 @@ public long ramBytesUsed() { @Override public StreamInput streamInput() throws IOException { - assert refCount() > 0; + assert hasReferences(); return new BytesReferenceStreamInput(this) { @Override public ReleasableBytesReference readReleasableBytesReference() throws IOException { @@ -148,37 +144,37 @@ public ReleasableBytesReference readReleasableBytesReference() throws IOExceptio @Override public void writeTo(OutputStream os) throws IOException { - assert refCount() > 0; + assert hasReferences(); delegate.writeTo(os); } @Override public String utf8ToString() { - assert refCount() > 0; + assert hasReferences(); return delegate.utf8ToString(); } @Override public BytesRef toBytesRef() { - assert refCount() > 0; + assert hasReferences(); return delegate.toBytesRef(); } @Override public BytesRefIterator iterator() { - assert refCount() > 0; + assert hasReferences(); return delegate.iterator(); } @Override public int compareTo(BytesReference o) { - assert refCount() > 0; + assert hasReferences(); return delegate.compareTo(o); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - assert refCount() > 0; + assert hasReferences(); return delegate.toXContent(builder, params); } @@ -189,31 +185,31 @@ public boolean isFragment() { @Override public boolean equals(Object obj) { - assert refCount() > 0; + assert hasReferences(); return delegate.equals(obj); } @Override public int hashCode() { - assert refCount() > 0; + assert hasReferences(); return delegate.hashCode(); } @Override public boolean hasArray() { - assert refCount() > 0; + assert hasReferences(); return delegate.hasArray(); } @Override public byte[] array() { - assert refCount() > 0; + assert hasReferences(); return delegate.array(); } @Override public int arrayOffset() { - assert refCount() > 0; + assert hasReferences(); return delegate.arrayOffset(); } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java b/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java index ff5a7938a3ead..97e7dddc720ca 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java @@ -91,11 +91,11 @@ public void testInboundAggregation() throws IOException { assertThat(aggregated.getHeader().getRequestId(), equalTo(requestId)); assertThat(aggregated.getHeader().getVersion(), equalTo(Version.CURRENT)); for (ReleasableBytesReference reference : references) { - assertEquals(1, reference.refCount()); + assertTrue(reference.hasReferences()); } aggregated.close(); for (ReleasableBytesReference reference : references) { - assertEquals(0, reference.refCount()); + assertFalse(reference.hasReferences()); } } @@ -111,7 +111,7 @@ public void testInboundUnknownAction() throws IOException { final ReleasableBytesReference content = ReleasableBytesReference.wrap(bytes); aggregator.aggregate(content); content.close(); - assertEquals(0, content.refCount()); + assertFalse(content.hasReferences()); // Signal EOS InboundMessage aggregated = aggregator.finishAggregation(); @@ -139,7 +139,7 @@ public void testCircuitBreak() throws IOException { // Signal EOS InboundMessage aggregated1 = aggregator.finishAggregation(); - assertEquals(0, content1.refCount()); + assertFalse(content1.hasReferences()); assertThat(aggregated1, notNullValue()); assertTrue(aggregated1.isShortCircuit()); assertThat(aggregated1.getException(), instanceOf(CircuitBreakingException.class)); @@ -158,7 +158,7 @@ public void testCircuitBreak() throws IOException { // Signal EOS InboundMessage aggregated2 = aggregator.finishAggregation(); - assertEquals(1, content2.refCount()); + assertTrue(content2.hasReferences()); assertThat(aggregated2, notNullValue()); assertFalse(aggregated2.isShortCircuit()); @@ -177,7 +177,7 @@ public void testCircuitBreak() throws IOException { // Signal EOS InboundMessage aggregated3 = aggregator.finishAggregation(); - assertEquals(1, content3.refCount()); + assertTrue(content3.hasReferences()); assertThat(aggregated3, notNullValue()); assertFalse(aggregated3.isShortCircuit()); } @@ -211,7 +211,7 @@ public void testCloseWillCloseContent() { aggregator.close(); for (ReleasableBytesReference reference : references) { - assertEquals(0, reference.refCount()); + assertFalse(reference.hasReferences()); } } @@ -244,10 +244,10 @@ public void testFinishAggregationWillFinishHeader() throws IOException { assertFalse(header.needsToReadVariableHeader()); assertEquals(actionName, header.getActionName()); if (unknownAction) { - assertEquals(0, content.refCount()); + assertFalse(content.hasReferences()); assertTrue(aggregated.isShortCircuit()); } else { - assertEquals(1, content.refCount()); + assertTrue(content.hasReferences()); assertFalse(aggregated.isShortCircuit()); } } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index ed828607732ef..65e3cb1ad4325 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -80,7 +80,7 @@ public void testDecode() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -108,7 +108,10 @@ public void testDecode() throws IOException { assertEquals(messageBytes, content); // Ref count is incremented since the bytes are forwarded as a fragment - assertEquals(2, releasable2.refCount()); + assertTrue(releasable2.hasReferences()); + releasable2.decRef(); + assertTrue(releasable2.hasReferences()); + assertTrue(releasable2.decRef()); assertEquals(InboundDecoder.END_CONTENT, endMarker); } @@ -141,7 +144,7 @@ public void testDecodePreHeaderSizeVariableInt() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(partialHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -198,7 +201,7 @@ public void testDecodeHandshakeCompatibility() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -247,7 +250,7 @@ public void testCompressedDecode() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -279,7 +282,7 @@ public void testCompressedDecode() throws IOException { assertThat(content, instanceOf(ReleasableBytesReference.class)); ((ReleasableBytesReference) content).close(); // Ref count is not incremented since the bytes are immediately consumed on decompression - assertEquals(1, releasable2.refCount()); + assertTrue(releasable2.hasReferences()); assertEquals(InboundDecoder.END_CONTENT, endMarker); } @@ -311,7 +314,7 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -339,16 +342,19 @@ public void testVersionIncompatibilityDecodeException() throws IOException { Compression.Scheme.DEFLATE ); + final ReleasableBytesReference releasable1; try (RecyclerBytesStreamOutput os = new RecyclerBytesStreamOutput(recycler)) { final BytesReference bytes = message.serialize(os); InboundDecoder decoder = new InboundDecoder(Version.CURRENT, recycler); final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); - expectThrows(IllegalStateException.class, () -> decoder.decode(releasable1, fragments::add)); - // No bytes are retained - assertEquals(1, releasable1.refCount()); + try (ReleasableBytesReference r = ReleasableBytesReference.wrap(bytes)) { + releasable1 = r; + expectThrows(IllegalStateException.class, () -> decoder.decode(releasable1, fragments::add)); + } } + // No bytes are retained + assertFalse(releasable1.hasReferences()); } public void testEnsureVersionCompatibility() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index ad26b60518c3c..2de79ce854187 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -184,7 +184,7 @@ public void testPipelineHandling() throws IOException { } for (ReleasableBytesReference released : toRelease) { - assertEquals(0, released.refCount()); + assertFalse(released.hasReferences()); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 271f275c88514..2698a3dfccb6a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; -import org.elasticsearch.core.RefCounted; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportActionProxy; @@ -80,7 +79,7 @@ protected void doExecute( } } - public static class GetCcrRestoreFileChunkResponse extends ActionResponse implements RefCounted { + public static class GetCcrRestoreFileChunkResponse extends ActionResponse { private final long offset; private final ReleasableBytesReference chunk; From 76e935efd8328bf4124af77b73663110f4816f93 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 29 Nov 2021 14:55:56 -0500 Subject: [PATCH 50/55] Fail shards early when we can detect a type missmatch (#79869) Resolves #72276 Generally speaking, we can't detect field type mismatches between different shards until reduce time, which then causes us to fail the whole aggregation. There is an exception though when the user has specified a value type. Since the value type gets pushed out to all the shards, we can detect on the shard if the field type doesn't match the specified value type, and fail only that shard allowing for a partial result on the aggregation. In the case where the user supplies a script as well, we don't fail the shard, because it's possible the script changes the type (this was a pattern before runtime fields) --- .../test/search.aggregation/20_terms.yml | 140 ++++++++++++ .../aggregations/support/FieldContext.java | 4 + .../support/ValuesSourceConfig.java | 21 +- .../terms/BinaryTermsAggregatorTests.java | 2 +- .../support/ValuesSourceConfigTests.java | 200 +++++++++++++++++- .../index/mapper/MapperServiceTestCase.java | 54 ++++- 6 files changed, 409 insertions(+), 12 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 7d4ad735fa96d..eb871da38db0b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -1368,3 +1368,143 @@ huge size: - match: { aggregations.str_terms.buckets.1.doc_count: 2 } - match: { aggregations.str_terms.buckets.2.key: c } - match: { aggregations.str_terms.buckets.2.doc_count: 3 } + +--- +Value type mismatch fails shard: + - skip: + version: " - 8.0.99" + reason: "Fixed in 8.1" + + - do: + indices.create: + index: valuetype_test_1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: keyword + - do: + indices.create: + index: valuetype_test_2 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: ip + + - do: + bulk: + index: valuetype_test_1 + refresh: true + body: | + { "index": {} } + { "ip": "192.168.7.1" } + { "index": {} } + { "ip": "192.168.7.2" } + { "index": {} } + { "ip": "192.168.7.3" } + + - do: + bulk: + index: valuetype_test_2 + refresh: true + body: | + { "index": {} } + { "ip": "127.0.0.1" } + { "index": {} } + { "ip": "192.168.0.1" } + { "index": {} } + { "ip": "192.168.0.2" } + { "index": {} } + { "ip": "192.168.0.3" } + - do: + search: + index: valuetype_test_1,valuetype_test_2 + body: + size: 0 + aggs: + str_terms: + terms: + field: ip + value_type: ip + + - match: { _shards.failed: 1 } + - length: { aggregations.str_terms.buckets: 4 } + - match: { aggregations.str_terms.buckets.0.key: "127.0.0.1" } + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + - match: { aggregations.str_terms.buckets.1.key: "192.168.0.1" } + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + - match: { aggregations.str_terms.buckets.2.key: "192.168.0.2" } + - match: { aggregations.str_terms.buckets.2.doc_count: 1 } + - match: { aggregations.str_terms.buckets.3.key: "192.168.0.3" } + - match: { aggregations.str_terms.buckets.3.doc_count: 1 } + +--- +Value type mismatch fails shard with no docs: + - skip: + version: " - 8.0.99" + reason: "Fixed in 8.1" + + - do: + indices.create: + index: valuetype_test_1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: keyword + - do: + indices.create: + index: valuetype_test_2 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: ip + + - do: + bulk: + index: valuetype_test_2 + refresh: true + body: | + { "index": {} } + { "ip": "127.0.0.1" } + { "index": {} } + { "ip": "192.168.0.1" } + { "index": {} } + { "ip": "192.168.0.2" } + { "index": {} } + { "ip": "192.168.0.3" } + - do: + search: + index: valuetype_test_1,valuetype_test_2 + body: + size: 0 + aggs: + str_terms: + terms: + field: ip + value_type: ip + + - match: { _shards.failed: 1 } + - length: { aggregations.str_terms.buckets: 4 } + - match: { aggregations.str_terms.buckets.0.key: "127.0.0.1" } + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + - match: { aggregations.str_terms.buckets.1.key: "192.168.0.1" } + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + - match: { aggregations.str_terms.buckets.2.key: "192.168.0.2" } + - match: { aggregations.str_terms.buckets.2.doc_count: 1 } + - match: { aggregations.str_terms.buckets.3.key: "192.168.0.3" } + - match: { aggregations.str_terms.buckets.3.doc_count: 1 } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java index 87c1f3d645293..101e94b6717c4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java @@ -47,4 +47,8 @@ public MappedFieldType fieldType() { return fieldType; } + public String getTypeName() { + return fieldType.typeName(); + } + } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 7483419872ef3..12a20da6ae5cc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -148,7 +148,26 @@ private static ValuesSourceConfig internalResolve( if (valuesSourceType == null) { // We have a field, and the user didn't specify a type, so get the type from the field valuesSourceType = fieldResolver.getValuesSourceType(fieldContext, userValueTypeHint, defaultValueSourceType); - } + } else if (valuesSourceType != fieldResolver.getValuesSourceType(fieldContext, userValueTypeHint, defaultValueSourceType) + && script == null) { + /* + * This is the case where the user has specified the type they expect, but we found a field of a different type. + * Usually this happens because of a mapping error, e.g. an older index mapped an IP address as a keyword. If + * the aggregation proceeds, it will usually break during reduction and return no results. So instead, we fail the + * shard with the conflict at this point, allowing the correctly mapped shards to return results with a partial + * failure. + * + * Note that if a script is specified, the assumption is that the script adapts the field into the specified type, + * and we allow the aggregation to continue. + */ + throw new IllegalArgumentException( + "Field type [" + + fieldContext.getTypeName() + + "] is incompatible with specified value_type [" + + userValueTypeHint + + "]" + ); + } } } if (valuesSourceType == null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java index 80c8909cd2129..cc5abceb5c588 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java @@ -124,7 +124,7 @@ public void testBadUserValueTypeHint() throws IOException { ValueType.NUMERIC // numeric type hint ) ); - assertThat(e.getMessage(), equalTo("Expected numeric type on field [binary], but got [binary]")); + assertThat(e.getMessage(), equalTo("Field type [binary] is incompatible with specified value_type [numeric]")); } private void testSearchCase( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java index 027c0a9a3cc26..928c8c77e0e42 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java @@ -14,11 +14,209 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.script.AggregationScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.mockito.Mockito; import java.util.List; -// TODO: This whole set of tests needs to be rethought. public class ValuesSourceConfigTests extends MapperServiceTestCase { + + @Override + @SuppressWarnings("unchecked") + protected T compileScript(Script script, ScriptContext context) { + AggregationScript.Factory mockFactory = Mockito.mock(AggregationScript.Factory.class); + Mockito.when(mockFactory.newFactory(Mockito.any(), Mockito.any())).thenReturn(Mockito.mock(AggregationScript.LeafFactory.class)); + return (T) mockFactory; + } + + /** + * Attempting to resolve a config with neither a field nor a script specified throws an error + */ + public void testNoFieldNoScript() { + expectThrows( + IllegalStateException.class, + () -> ValuesSourceConfig.resolve(null, null, null, null, null, null, null, CoreValuesSourceType.KEYWORD) + ); + } + + /** + * When there's an unmapped field with no script, we should use the user value type hint if available, and fall back to the default + * value source type if it's not available. + */ + public void testUnmappedFieldNoScript() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // No value type hint + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve(context, null, "UnmappedField", null, null, null, null, CoreValuesSourceType.KEYWORD); + assertEquals(CoreValuesSourceType.KEYWORD, config.valueSourceType()); + }); + + // With value type hint + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + "UnmappedField", + null, + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }); + } + + /** + * When the field is mapped and there's no script and no hint, use the field type + */ + public void testMappedFieldNoScriptNoHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve(context, null, "field", null, null, null, null, CoreValuesSourceType.KEYWORD); + assertEquals(CoreValuesSourceType.NUMERIC, config.valueSourceType()); + }); + } + + /** + * When we have a mapped field and a hint, but no script, we should throw if the hint doesn't match the field, + * and use the type of both if they do match. Note that when there is a script, we just use the user value type + * regardless of the field type. This is to allow for scripts that adapt types, even though runtime fields are + * a better solution for that in every way. + */ + public void testMappedFieldNoScriptWithHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // not matching case + expectThrows( + IllegalArgumentException.class, + () -> withAggregationContext( + mapperService, + List.of(source(b -> b.field("field", 42))), + context -> ValuesSourceConfig.resolve(context, ValueType.IP, "field", null, null, null, null, CoreValuesSourceType.KEYWORD) + ) + ); + + // matching case + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve(context, ValueType.NUMBER, "field", null, null, null, null, CoreValuesSourceType.KEYWORD); + assertEquals(CoreValuesSourceType.NUMERIC, config.valueSourceType()); + }); + } + + /** + * When there's a script and the user tells us what type it produces, always use that type, regardless of if there's also a field + */ + public void testScriptWithHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // With field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + "field", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }, () -> null); + + // With unmapped field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + "unmappedField", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }, () -> null); + + // Without field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + null, + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }, () -> null); + } + + /** + * If there's a script and the user didn't tell us what type it produces, use the field if possible, otherwise the default + */ + public void testScriptNoHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // With field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + null, + "field", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.NUMERIC, config.valueSourceType()); + }, () -> null); + + // With unmapped field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + null, + "unmappedField", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.KEYWORD, config.valueSourceType()); + }, () -> null); + + // Without field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + null, + null, + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.KEYWORD, config.valueSourceType()); + }, () -> null); + } + public void testKeyword() throws Exception { MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "keyword"))); withAggregationContext(mapperService, List.of(source(b -> b.field("field", "abc"))), context -> { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 5db980f7a2cc8..c8e45b619bf92 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -177,10 +177,6 @@ protected final MapperService createMapperService( return mapperService; } - protected T compileScript(Script script, ScriptContext context) { - throw new UnsupportedOperationException("Cannot compile script " + Strings.toString(script)); - } - protected final MapperService createMapperService(Version version, Settings settings, BooleanSupplier idFieldDataEnabled) { IndexSettings indexSettings = createIndexSettings(version, settings); MapperRegistry mapperRegistry = new IndicesModule( @@ -200,6 +196,14 @@ protected final MapperService createMapperService(Version version, Settings sett ); } + /** + * This is the injection point for tests that require mock scripts. Test cases should override this to return the + * mock script factory of their choice. + */ + protected T compileScript(Script script, ScriptContext context) { + throw new UnsupportedOperationException("Cannot compile script " + Strings.toString(script)); + } + protected static IndexSettings createIndexSettings(Version version, Settings settings) { settings = Settings.builder() .put("index.number_of_replicas", 0) @@ -329,7 +333,8 @@ private AggregationContext aggregationContext( ValuesSourceRegistry valuesSourceRegistry, MapperService mapperService, IndexSearcher searcher, - Query query + Query query, + Supplier lookupSupplier ) { return new AggregationContext() { private final CircuitBreaker breaker = mock(CircuitBreaker.class); @@ -383,7 +388,7 @@ public boolean isFieldMapped(String field) { @Override public SearchLookup lookup() { - throw new UnsupportedOperationException(); + return lookupSupplier.get(); } @Override @@ -407,8 +412,9 @@ public Set getMatchingFieldNames(String pattern) { } @Override + @SuppressWarnings("unchecked") public FactoryType compile(Script script, ScriptContext context) { - throw new UnsupportedOperationException(); + return compileScript(script, context); } @Override @@ -518,7 +524,16 @@ protected final void withAggregationContext( List docs, CheckedConsumer test ) throws IOException { - withAggregationContext(null, mapperService, docs, null, test); + withAggregationContext(mapperService, docs, test, () -> { throw new UnsupportedOperationException(); }); + } + + protected final void withAggregationContext( + MapperService mapperService, + List docs, + CheckedConsumer test, + Supplier lookupSupplier + ) throws IOException { + withAggregationContext(null, mapperService, docs, null, test, lookupSupplier); } protected final void withAggregationContext( @@ -527,12 +542,33 @@ protected final void withAggregationContext( List docs, Query query, CheckedConsumer test + ) throws IOException { + withAggregationContext( + valuesSourceRegistry, + mapperService, + docs, + query, + test, + () -> { throw new UnsupportedOperationException(); } + ); + } + + protected final void withAggregationContext( + ValuesSourceRegistry valuesSourceRegistry, + MapperService mapperService, + List docs, + Query query, + CheckedConsumer test, + Supplier lookupSupplier ) throws IOException { withLuceneIndex(mapperService, writer -> { for (SourceToParse doc : docs) { writer.addDocuments(mapperService.documentMapper().parse(doc).docs()); + } - }, reader -> test.accept(aggregationContext(valuesSourceRegistry, mapperService, new IndexSearcher(reader), query))); + }, + reader -> test.accept(aggregationContext(valuesSourceRegistry, mapperService, new IndexSearcher(reader), query, lookupSupplier)) + ); } protected SearchExecutionContext createSearchExecutionContext(MapperService mapperService) { From 85b3435100282508a866ebbcb15b4f99d4df23f8 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Mon, 29 Nov 2021 20:10:16 +0000 Subject: [PATCH 51/55] Fix shadowed vars pt7 (#80996) Part of #19752. Fix more instances where local variable names were shadowing field names. Also modify our fork of HiddenFieldCheck to add the ignoreConstructorBody and ignoredMethodNames parameters, so that the check can ignore more matches. --- .../internal/checkstyle/HiddenFieldCheck.java | 62 ++++++++++++++++++- .../src/main/resources/checkstyle.xml | 4 +- .../ccr/action/ShardFollowNodeTaskTests.java | 6 +- .../index/engine/FollowingEngineTests.java | 49 ++++++++------- .../AutoFollowStatsMonitoringDocTests.java | 2 +- .../ccr/FollowStatsMonitoringDocTests.java | 8 +-- .../action/GetRollupIndexCapsAction.java | 2 +- .../xpack/core/scheduler/Cron.java | 2 +- .../search/action/AsyncSearchResponse.java | 8 +-- .../core/security/CommandLineHttpClient.java | 4 +- .../action/InvalidateApiKeyRequest.java | 10 +-- .../security/action/role/PutRoleRequest.java | 12 ++-- .../accesscontrol/IndicesAccessControl.java | 28 +++++---- .../authz/permission/FieldPermissions.java | 4 +- .../authz/permission/IndicesPermission.java | 10 +-- .../authz/permission/ResourcePrivileges.java | 4 +- .../core/slm/SnapshotLifecyclePolicy.java | 10 +-- .../slm/SnapshotLifecyclePolicyMetadata.java | 8 +-- .../core/slm/SnapshotLifecycleStats.java | 6 +- .../core/ssl/SSLConfigurationSettings.java | 4 +- .../xpack/core/ssl/SSLService.java | 16 ++--- .../xpack/core/ssl/SslSettingsLoader.java | 4 +- .../termsenum/action/MultiShardTermsEnum.java | 6 +- .../core/transform/TransformMetadata.java | 4 +- .../transform/action/StopTransformAction.java | 4 +- .../transform/transforms/SettingsConfig.java | 6 +- .../transform/transforms/TransformConfig.java | 4 +- .../transforms/TransformProgress.java | 6 +- .../pivot/DateHistogramGroupSource.java | 6 +- .../pivot/HistogramGroupSource.java | 4 +- .../actions/throttler/PeriodThrottler.java | 16 ++--- .../watcher/client/WatchSourceBuilder.java | 16 +++-- .../core/watcher/common/stats/Counters.java | 4 +- .../execution/WatchExecutionContext.java | 26 ++++---- .../transform/chain/ChainTransform.java | 8 +-- .../actions/execute/ExecuteWatchRequest.java | 3 +- .../xpack/core/watcher/watch/Watch.java | 4 +- 37 files changed, 227 insertions(+), 153 deletions(-) diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java index 23155cc2971e7..a27558f046698 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java @@ -71,9 +71,15 @@ public class HiddenFieldCheck extends AbstractCheck { /** Control whether to ignore constructor parameters. */ private boolean ignoreConstructorParameter; + /** Control whether to ignore variables in constructor bodies. */ + private boolean ignoreConstructorBody; + /** Control whether to ignore parameters of abstract methods. */ private boolean ignoreAbstractMethods; + /** If set, specifies a regex of method names that should be ignored */ + private String ignoredMethodNames; + @Override public int[] getDefaultTokens() { return getAcceptableTokens(); @@ -224,7 +230,8 @@ private void processVariable(DetailAST ast) { if ((frame.containsStaticField(name) || isInstanceField(ast, name)) && isMatchingRegexp(name) == false - && isIgnoredParam(ast, name) == false) { + && isIgnoredParam(ast, name) == false + && isIgnoredVariable(ast, name) == false) { log(nameAST, MSG_KEY, name); } } @@ -238,7 +245,14 @@ && isIgnoredParam(ast, name) == false) { * @return true if parameter is ignored. */ private boolean isIgnoredParam(DetailAST ast, String name) { - return isIgnoredSetterParam(ast, name) || isIgnoredConstructorParam(ast) || isIgnoredParamOfAbstractMethod(ast); + return isVariableInIgnoredMethod(ast, name) + || isIgnoredSetterParam(ast, name) + || isIgnoredConstructorParam(ast) + || isIgnoredParamOfAbstractMethod(ast); + } + + private boolean isIgnoredVariable(DetailAST ast, String name) { + return isIgnoredVariableInConstructorBody(ast, name); } /** @@ -410,6 +424,42 @@ private boolean isIgnoredParamOfAbstractMethod(DetailAST ast) { return result; } + /** + * Decides whether to ignore an AST node that is the parameter of a method whose + * name matches the {@link #ignoredMethodNames} regex, if set. + * @param ast the AST to check + * @return true is the ast should be ignored because the parameter belongs to a + * method whose name matches the regex. + */ + private boolean isVariableInIgnoredMethod(DetailAST ast, String name) { + boolean result = false; + if (ignoredMethodNames != null && (ast.getType() == TokenTypes.PARAMETER_DEF || ast.getType() == TokenTypes.VARIABLE_DEF)) { + DetailAST method = ast.getParent(); + while (method != null && method.getType() != TokenTypes.METHOD_DEF) { + method = method.getParent(); + } + if (method != null && method.getType() == TokenTypes.METHOD_DEF) { + final String methodName = method.findFirstToken(TokenTypes.IDENT).getText(); + result = methodName.matches(ignoredMethodNames); + } + } + return result; + } + + private boolean isIgnoredVariableInConstructorBody(DetailAST ast, String name) { + boolean result = false; + + if (ignoreConstructorBody && ast.getType() == TokenTypes.VARIABLE_DEF) { + DetailAST method = ast.getParent(); + while (method != null && method.getType() != TokenTypes.CTOR_DEF) { + method = method.getParent(); + } + result = method != null && method.getType() == TokenTypes.CTOR_DEF; + } + + return result; + } + /** * Setter to define the RegExp for names of variables and parameters to ignore. * @@ -463,6 +513,14 @@ public void setIgnoreAbstractMethods(boolean ignoreAbstractMethods) { this.ignoreAbstractMethods = ignoreAbstractMethods; } + public void setIgnoredMethodNames(String ignoredMethodNames) { + this.ignoredMethodNames = ignoredMethodNames; + } + + public void setIgnoreConstructorBody(boolean ignoreConstructorBody) { + this.ignoreConstructorBody = ignoreConstructorBody; + } + /** * Holds the names of static and instance fields of a type. */ diff --git a/build-tools-internal/src/main/resources/checkstyle.xml b/build-tools-internal/src/main/resources/checkstyle.xml index abaab3a1a8ae0..c9ce78ef06b6b 100644 --- a/build-tools-internal/src/main/resources/checkstyle.xml +++ b/build-tools-internal/src/main/resources/checkstyle.xml @@ -111,10 +111,12 @@ diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 47e40b126d044..5ea943a1828ab 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -1421,8 +1421,8 @@ protected void innerSendShardChangesRequest( @Override protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) { if (scheduleRetentionLeaseRenewal.get()) { - final ScheduledThreadPoolExecutor scheduler = Scheduler.initScheduler(Settings.EMPTY, "test-scheduler"); - final ScheduledFuture future = scheduler.scheduleWithFixedDelay( + final ScheduledThreadPoolExecutor testScheduler = Scheduler.initScheduler(Settings.EMPTY, "test-scheduler"); + final ScheduledFuture future = testScheduler.scheduleWithFixedDelay( () -> retentionLeaseRenewal.accept(followerGlobalCheckpoint.getAsLong()), 0, TimeValue.timeValueMillis(200).millis(), @@ -1433,7 +1433,7 @@ protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final Lo @Override public boolean cancel() { final boolean cancel = future.cancel(true); - scheduler.shutdown(); + testScheduler.shutdown(); return cancel; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 970be2675f9a2..a4c44a1a749b7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -109,8 +109,8 @@ public void testFollowingEngineRejectsNonFollowingIndex() throws IOException { public void testIndexSeqNoIsMaintained() throws IOException { final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); - runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, index) -> { - final Engine.IndexResult result = followingEngine.index(index); + runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, indexToTest) -> { + final Engine.IndexResult result = followingEngine.index(indexToTest); assertThat(result.getSeqNo(), equalTo(seqNo)); }); } @@ -156,8 +156,8 @@ public void runIndexTest( try (Store store = createStore(shardId, indexSettings, newDirectory())) { final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store); try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { - final Engine.Index index = indexForFollowing("id", seqNo, origin); - consumer.accept(followingEngine, index); + final Engine.Index indexToTest = indexForFollowing("id", seqNo, origin); + consumer.accept(followingEngine, indexToTest); } } } @@ -226,16 +226,21 @@ public void testDoNotFillSeqNoGaps() throws Exception { } private EngineConfig engineConfig( - final ShardId shardId, + final ShardId shardIdValue, final IndexSettings indexSettings, final ThreadPool threadPool, final Store store ) { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final Path translogPath = createTempDir("translog"); - final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + final TranslogConfig translogConfig = new TranslogConfig( + shardIdValue, + translogPath, + indexSettings, + BigArrays.NON_RECYCLING_INSTANCE + ); return new EngineConfig( - shardId, + shardIdValue, threadPool, indexSettings, null, @@ -331,26 +336,26 @@ private Engine.Delete deleteForPrimary(String id) { return new Engine.Delete(parsedDoc.id(), EngineTestCase.newUid(parsedDoc), primaryTerm.get()); } - private Engine.Result applyOperation(Engine engine, Engine.Operation op, long primaryTerm, Engine.Operation.Origin origin) + private Engine.Result applyOperation(Engine engine, Engine.Operation op, long primaryTermValue, Engine.Operation.Origin origin) throws IOException { final VersionType versionType = origin == Engine.Operation.Origin.PRIMARY ? VersionType.EXTERNAL : null; final Engine.Result result; if (op instanceof Engine.Index) { - Engine.Index index = (Engine.Index) op; + Engine.Index engineIndex = (Engine.Index) op; result = engine.index( new Engine.Index( - index.uid(), - index.parsedDoc(), - index.seqNo(), - primaryTerm, - index.version(), + engineIndex.uid(), + engineIndex.parsedDoc(), + engineIndex.seqNo(), + primaryTermValue, + engineIndex.version(), versionType, origin, - index.startTime(), - index.getAutoGeneratedIdTimestamp(), - index.isRetry(), - index.getIfSeqNo(), - index.getIfPrimaryTerm() + engineIndex.startTime(), + engineIndex.getAutoGeneratedIdTimestamp(), + engineIndex.isRetry(), + engineIndex.getIfSeqNo(), + engineIndex.getIfPrimaryTerm() ) ); } else if (op instanceof Engine.Delete) { @@ -360,7 +365,7 @@ private Engine.Result applyOperation(Engine engine, Engine.Operation op, long pr delete.id(), delete.uid(), delete.seqNo(), - primaryTerm, + primaryTermValue, delete.version(), versionType, origin, @@ -371,7 +376,7 @@ private Engine.Result applyOperation(Engine engine, Engine.Operation op, long pr ); } else { Engine.NoOp noOp = (Engine.NoOp) op; - result = engine.noOp(new Engine.NoOp(noOp.seqNo(), primaryTerm, origin, noOp.startTime(), noOp.reason())); + result = engine.noOp(new Engine.NoOp(noOp.seqNo(), primaryTermValue, origin, noOp.startTime(), noOp.reason())); } return result; } @@ -828,7 +833,7 @@ public void testProcessOnceOnPrimary() throws Exception { */ public void testVerifyShardBeforeIndexClosingIsNoOp() throws IOException { final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); - runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, index) -> { + runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, indexToTest) -> { globalCheckpoint.set(randomNonNegativeLong()); try { followingEngine.verifyEngineBeforeIndexClosing(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java index 5a86f44bb5b90..c5e10371381c1 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java @@ -88,7 +88,7 @@ public void testToXContent() throws IOException { final NavigableMap trackingClusters = new TreeMap<>( Collections.singletonMap(randomAlphaOfLength(4), new AutoFollowedCluster(1L, 1L)) ); - final AutoFollowStats autoFollowStats = new AutoFollowStats( + autoFollowStats = new AutoFollowStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java index a561e4d79e0e4..72502c044bf59 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java @@ -117,7 +117,7 @@ public void testToXContent() throws IOException { ) ); final long timeSinceLastReadMillis = randomNonNegativeLong(); - final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + final ShardFollowNodeTaskStatus taskStatus = new ShardFollowNodeTaskStatus( "leader_cluster", "leader_index", "follower_index", @@ -148,7 +148,7 @@ public void testToXContent() throws IOException { timeSinceLastReadMillis, new ElasticsearchException("fatal error") ); - final FollowStatsMonitoringDoc document = new FollowStatsMonitoringDoc("_cluster", timestamp, intervalMillis, node, status); + final FollowStatsMonitoringDoc document = new FollowStatsMonitoringDoc("_cluster", timestamp, intervalMillis, node, taskStatus); final BytesReference xContent = XContentHelper.toXContent(document, XContentType.JSON, false); assertThat( xContent.utf8ToString(), @@ -273,7 +273,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { final NavigableMap> fetchExceptions = new TreeMap<>( Collections.singletonMap(1L, Tuple.tuple(2, new ElasticsearchException("shard is sad"))) ); - final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + final ShardFollowNodeTaskStatus taskStatus = new ShardFollowNodeTaskStatus( "remote_cluster", "leader_index", "follower_index", @@ -305,7 +305,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { new ElasticsearchException("fatal error") ); XContentBuilder builder = jsonBuilder(); - builder.value(status); + builder.value(taskStatus); Map serializedStatus = XContentHelper.convertToMap(XContentType.JSON.xContent(), Strings.toString(builder), false); byte[] loadedTemplate = MonitoringTemplateRegistry.getTemplateConfigForMonitoredSystem(MonitoredSystem.ES).loadBytes(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java index 56336ce8d5ab0..54b67535b38a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java @@ -74,7 +74,7 @@ public String[] indices() { } @Override - public IndicesRequest indices(String... indices) { + public IndicesRequest indices(@SuppressWarnings("HiddenField") String... indices) { Objects.requireNonNull(indices, "indices must not be null"); for (String index : indices) { Objects.requireNonNull(index, "index must not be null"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java index 935430656a72d..a9777213faf8f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java @@ -787,7 +787,7 @@ public static void validate(String expression) throws IllegalArgumentException { // //////////////////////////////////////////////////////////////////////////// - private void buildExpression(String expression) { + private void buildExpression(@SuppressWarnings("HiddenField") String expression) { try { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java index 22d4fe9644ec4..b05d30289b528 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java @@ -100,8 +100,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(expirationTimeMillis); } - public AsyncSearchResponse clone(String id) { - return new AsyncSearchResponse(id, searchResponse, error, isPartial, false, startTimeMillis, expirationTimeMillis); + public AsyncSearchResponse clone(String searchId) { + return new AsyncSearchResponse(searchId, searchResponse, error, isPartial, false, startTimeMillis, expirationTimeMillis); } /** @@ -165,8 +165,8 @@ public long getExpirationTime() { } @Override - public AsyncSearchResponse withExpirationTime(long expirationTimeMillis) { - return new AsyncSearchResponse(id, searchResponse, error, isPartial, isRunning, startTimeMillis, expirationTimeMillis); + public AsyncSearchResponse withExpirationTime(long expirationTime) { + return new AsyncSearchResponse(id, searchResponse, error, isPartial, isRunning, startTimeMillis, expirationTime); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java index bb64ff90af63e..a74313121501e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java @@ -343,7 +343,7 @@ public static String apiKeyHeaderValue(SecureString apiKey) { * Returns a TrustManager to be used in a client SSLContext, which trusts all certificates that are signed * by a specific CA certificate ( identified by its SHA256 fingerprint, {@code pinnedCaCertFingerPrint} ) */ - private TrustManager fingerprintTrustingTrustManager(String pinnedCaCertFingerprint) { + private TrustManager fingerprintTrustingTrustManager(String caCertFingerprint) { final TrustManager trustManager = new X509TrustManager() { public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {} @@ -354,7 +354,7 @@ public void checkServerTrusted(X509Certificate[] chain, String authType) throws final Certificate caCertFromChain = chain[1]; MessageDigest sha256 = MessageDigests.sha256(); sha256.update(caCertFromChain.getEncoded()); - if (MessageDigests.toHexString(sha256.digest()).equals(pinnedCaCertFingerprint) == false) { + if (MessageDigests.toHexString(sha256.digest()).equals(caCertFingerprint) == false) { throw new CertificateException(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java index 4bc503246c1c1..2ee259ba52d9f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java @@ -249,14 +249,16 @@ public int hashCode() { return Objects.hash(realmName, userName, ids, name, ownedByAuthenticatedUser); } - private void validateIds(@Nullable String[] ids) { - if (ids != null) { - if (ids.length == 0) { + private void validateIds(@Nullable String[] idsToValidate) { + if (idsToValidate != null) { + if (idsToValidate.length == 0) { final ActionRequestValidationException validationException = new ActionRequestValidationException(); validationException.addValidationError("Field [ids] cannot be an empty array"); throw validationException; } else { - final int[] idxOfBlankIds = IntStream.range(0, ids.length).filter(i -> Strings.hasText(ids[i]) == false).toArray(); + final int[] idxOfBlankIds = IntStream.range(0, idsToValidate.length) + .filter(i -> Strings.hasText(idsToValidate[i]) == false) + .toArray(); if (idxOfBlankIds.length > 0) { final ActionRequestValidationException validationException = new ActionRequestValidationException(); validationException.addValidationError( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index 26881286c9723..5c5170cd41d6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -95,9 +95,9 @@ public ActionRequestValidationException validate() { } catch (IllegalArgumentException e) { validationException = addValidationError(e.getMessage(), validationException); } - for (String name : privilege.getPrivileges()) { + for (String privilegeName : privilege.getPrivileges()) { try { - ApplicationPrivilege.validatePrivilegeOrActionName(name); + ApplicationPrivilege.validatePrivilegeOrActionName(privilegeName); } catch (IllegalArgumentException e) { validationException = addValidationError(e.getMessage(), validationException); } @@ -117,12 +117,12 @@ public void name(String name) { this.name = name; } - public void cluster(String... clusterPrivileges) { - this.clusterPrivileges = clusterPrivileges; + public void cluster(String... clusterPrivilegesArray) { + this.clusterPrivileges = clusterPrivilegesArray; } - public void conditionalCluster(ConfigurableClusterPrivilege... configurableClusterPrivileges) { - this.configurableClusterPrivileges = configurableClusterPrivileges; + public void conditionalCluster(ConfigurableClusterPrivilege... configurableClusterPrivilegesArray) { + this.configurableClusterPrivileges = configurableClusterPrivilegesArray; } public void addIndex(RoleDescriptor.IndicesPrivileges... privileges) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java index 15ea0ba298038..4cee1a3006b10 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java @@ -182,7 +182,7 @@ public DocumentPermissions getDocumentPermissions() { } /** - * Returns a instance of {@link IndexAccessControl}, where the privileges for {@code this} object are constrained by the privileges + * Returns an instance of {@link IndexAccessControl}, where the privileges for {@code this} object are constrained by the privileges * contained in the provided parameter.
* Allowed fields for this index permission would be an intersection of allowed fields.
* Allowed documents for this index permission would be an intersection of allowed documents.
@@ -193,17 +193,19 @@ public DocumentPermissions getDocumentPermissions() { * @see DocumentPermissions#limitDocumentPermissions(DocumentPermissions) */ public IndexAccessControl limitIndexAccessControl(IndexAccessControl limitedByIndexAccessControl) { - final boolean granted; + final boolean isGranted; if (this.granted == limitedByIndexAccessControl.granted) { - granted = this.granted; + isGranted = this.granted; } else { - granted = false; + isGranted = false; } - FieldPermissions fieldPermissions = getFieldPermissions().limitFieldPermissions(limitedByIndexAccessControl.fieldPermissions); - DocumentPermissions documentPermissions = getDocumentPermissions().limitDocumentPermissions( + FieldPermissions constrainedFieldPermissions = getFieldPermissions().limitFieldPermissions( + limitedByIndexAccessControl.fieldPermissions + ); + DocumentPermissions constrainedDocumentPermissions = getDocumentPermissions().limitDocumentPermissions( limitedByIndexAccessControl.getDocumentPermissions() ); - return new IndexAccessControl(granted, fieldPermissions, documentPermissions); + return new IndexAccessControl(isGranted, constrainedFieldPermissions, constrainedDocumentPermissions); } @Override @@ -264,23 +266,23 @@ public IndicesAccessControl limitIndicesAccessControl(IndicesAccessControl limit return this; } - final boolean granted; + final boolean isGranted; if (this.granted == limitedByIndicesAccessControl.granted) { - granted = this.granted; + isGranted = this.granted; } else { - granted = false; + isGranted = false; } Set indexes = indexPermissions.keySet(); Set otherIndexes = limitedByIndicesAccessControl.indexPermissions.keySet(); Set commonIndexes = Sets.intersection(indexes, otherIndexes); - Map indexPermissions = new HashMap<>(commonIndexes.size()); + Map indexPermissionsMap = new HashMap<>(commonIndexes.size()); for (String index : commonIndexes) { IndexAccessControl indexAccessControl = getIndexPermissions(index); IndexAccessControl limitedByIndexAccessControl = limitedByIndicesAccessControl.getIndexPermissions(index); - indexPermissions.put(index, indexAccessControl.limitIndexAccessControl(limitedByIndexAccessControl)); + indexPermissionsMap.put(index, indexAccessControl.limitIndexAccessControl(limitedByIndexAccessControl)); } - return new IndicesAccessControl(granted, indexPermissions); + return new IndicesAccessControl(isGranted, indexPermissionsMap); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index d673fbb801d6b..bc665e301f095 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -199,8 +199,8 @@ public static Automaton buildPermittedFieldsAutomaton(final String[] grantedFiel */ public FieldPermissions limitFieldPermissions(FieldPermissions limitedBy) { if (hasFieldLevelSecurity() && limitedBy != null && limitedBy.hasFieldLevelSecurity()) { - Automaton permittedFieldsAutomaton = Automatons.intersectAndMinimize(getIncludeAutomaton(), limitedBy.getIncludeAutomaton()); - return new FieldPermissions(fieldPermissionsDefinition, limitedBy.fieldPermissionsDefinition, permittedFieldsAutomaton); + Automaton _permittedFieldsAutomaton = Automatons.intersectAndMinimize(getIncludeAutomaton(), limitedBy.getIncludeAutomaton()); + return new FieldPermissions(fieldPermissionsDefinition, limitedBy.fieldPermissionsDefinition, _permittedFieldsAutomaton); } else if (limitedBy != null && limitedBy.hasFieldLevelSecurity()) { return new FieldPermissions(limitedBy.getFieldPermissionsDefinition(), limitedBy.getIncludeAutomaton()); } else if (hasFieldLevelSecurity()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index efa0b6aff2b22..b666d9f400647 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -109,8 +109,8 @@ private StringMatcher indexMatcher(Collection ordinaryIndices, Collectio } else { matcher = StringMatcher.of(ordinaryIndices); if (restrictedNamesAutomaton != null) { - CharacterRunAutomaton characterRunAutomaton = new CharacterRunAutomaton(restrictedNamesAutomaton); - matcher = matcher.and("", name -> characterRunAutomaton.run(name) == false); + CharacterRunAutomaton automaton = new CharacterRunAutomaton(restrictedNamesAutomaton); + matcher = matcher.and("", name -> automaton.run(name) == false); } if (restrictedIndices.isEmpty() == false) { matcher = StringMatcher.of(restrictedIndices).or(matcher); @@ -331,11 +331,11 @@ public Collection resolveConcreteIndices() { return List.of(indexAbstraction.getName()); } else { final List indices = indexAbstraction.getIndices(); - final List concreteIndices = new ArrayList<>(indices.size()); + final List concreteIndexNames = new ArrayList<>(indices.size()); for (var idx : indices) { - concreteIndices.add(idx.getName()); + concreteIndexNames.add(idx.getName()); } - return concreteIndices; + return concreteIndexNames; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java index 1051f211c2cea..db2e863647f1a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java @@ -80,8 +80,8 @@ public Builder addPrivilege(String privilege, Boolean allowed) { return this; } - public Builder addPrivileges(Map privileges) { - for (Entry entry : privileges.entrySet()) { + public Builder addPrivileges(Map privilegeMap) { + for (Entry entry : privilegeMap.entrySet()) { addPrivilege(entry.getKey(), entry.getValue()); } return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index 5b5b3f8abb9f0..0f9ff0eee78ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -135,8 +135,8 @@ public SnapshotRetentionConfiguration getRetentionPolicy() { } public long calculateNextExecution() { - final Cron schedule = new Cron(this.schedule); - return schedule.getNextValidTimeAfter(System.currentTimeMillis()); + final Cron scheduleEvaluator = new Cron(this.schedule); + return scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); } /** @@ -149,9 +149,9 @@ public long calculateNextExecution() { * if either of the next two times after now is unsupported according to @{@link Cron#getNextValidTimeAfter(long)} */ public TimeValue calculateNextInterval() { - final Cron schedule = new Cron(this.schedule); - long next1 = schedule.getNextValidTimeAfter(System.currentTimeMillis()); - long next2 = schedule.getNextValidTimeAfter(next1); + final Cron scheduleEvaluator = new Cron(this.schedule); + long next1 = scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + long next2 = scheduleEvaluator.getNextValidTimeAfter(next1); if (next1 > 0 && next2 > 0) { return TimeValue.timeValueMillis(next2 - next1); } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java index 27a6a30e8d1f6..1a4ee92023f02 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java @@ -247,13 +247,13 @@ public Builder setModifiedDate(long modifiedDate) { return this; } - public Builder setLastSuccess(SnapshotInvocationRecord lastSuccessDate) { - this.lastSuccessDate = lastSuccessDate; + public Builder setLastSuccess(SnapshotInvocationRecord lastSuccess) { + this.lastSuccessDate = lastSuccess; return this; } - public Builder setLastFailure(SnapshotInvocationRecord lastFailureDate) { - this.lastFailureDate = lastFailureDate; + public Builder setLastFailure(SnapshotInvocationRecord lastFailure) { + this.lastFailureDate = lastFailure; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java index f834f25496080..c7fc42b18dbeb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java @@ -133,14 +133,14 @@ public SnapshotLifecycleStats merge(SnapshotLifecycleStats other) { } public SnapshotLifecycleStats removePolicy(String policyId) { - Map policyStats = new HashMap<>(this.policyStats); - policyStats.remove(policyId); + Map policyStatsCopy = new HashMap<>(this.policyStats); + policyStatsCopy.remove(policyId); return new SnapshotLifecycleStats( this.retentionRunCount.count(), this.retentionFailedCount.count(), this.retentionTimedOut.count(), this.retentionTimeMs.count(), - policyStats + policyStatsCopy ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java index d409519313c88..545fc01470473 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -387,8 +387,8 @@ public Setting.AffixSetting affixSetting(String groupPrefix, String keyPrefix return Setting.affixKeySetting(groupPrefix, keyPrefix + name, template); } - public Setting transportProfile(String name) { - return transportProfile().getConcreteSetting(name); + public Setting transportProfile(String settingName) { + return transportProfile().getConcreteSetting(settingName); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 9c7d34e03731b..05bdc006921a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -185,7 +185,7 @@ public SSLService createDynamicSSLService() { return new SSLService(env, sslConfigurations, sslContexts) { @Override - Map loadSslConfigurations(Map sslConfigurations) { + Map loadSslConfigurations(Map unused) { // we don't need to load anything... return Collections.emptyMap(); } @@ -214,16 +214,16 @@ public static void registerSettings(List> settingList) { * Create a new {@link SSLIOSessionStrategy} based on the provided settings. The settings are used to identify the SSL configuration * that should be used to create the context. * - * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return - * a context created from the default configuration + * @param settingsToUse the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will + * return a context created from the default configuration * @return Never {@code null}. * @deprecated This method will fail if the SSL configuration uses a {@link org.elasticsearch.common.settings.SecureSetting} but the * {@link org.elasticsearch.common.settings.SecureSettings} have been closed. Use {@link #getSSLConfiguration(String)} * and {@link #sslIOSessionStrategy(SslConfiguration)} (Deprecated, but not removed because monitoring uses dynamic SSL settings) */ @Deprecated - public SSLIOSessionStrategy sslIOSessionStrategy(Settings settings) { - SslConfiguration config = sslConfiguration(settings); + public SSLIOSessionStrategy sslIOSessionStrategy(Settings settingsToUse) { + SslConfiguration config = sslConfiguration(settingsToUse); return sslIOSessionStrategy(config); } @@ -395,11 +395,11 @@ SSLContextHolder sslContextHolder(SslConfiguration sslConfiguration) { /** * Returns the existing {@link SslConfiguration} for the given settings * - * @param settings the settings for the ssl configuration + * @param settingsToUse the settings for the ssl configuration * @return the ssl configuration for the provided settings */ - public SslConfiguration sslConfiguration(Settings settings) { - return SslSettingsLoader.load(settings, null, env); + public SslConfiguration sslConfiguration(Settings settingsToUse) { + return SslSettingsLoader.load(settingsToUse, null, env); } public Set getTransportProfileContextNames() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java index e46abca986cf5..216b39c81415f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java @@ -50,8 +50,8 @@ public SslSettingsLoader(Settings settings, String settingPrefix, boolean accept setDefaultClientAuth(SslClientAuthenticationMode.REQUIRED); } - private Map> mapOf(List> settings) { - return settings.stream().collect(Collectors.toMap(s -> s.getKey(), Function.identity())); + private Map> mapOf(List> settingList) { + return settingList.stream().collect(Collectors.toMap(Setting::getKey, Function.identity())); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java index f5b264ebcef57..a240f1fef671d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java @@ -75,9 +75,9 @@ private void pullTop() { private void pushTop() throws IOException { // call next() on each top, and reorder queue for (int i = 0; i < numTop; i++) { - TermsEnumWithCurrent top = queue.top(); - top.current = top.terms.next(); - if (top.current == null) { + TermsEnumWithCurrent termsEnum = queue.top(); + termsEnum.current = termsEnum.terms.next(); + if (termsEnum.current == null) { queue.pop(); } else { queue.updateTop(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java index 38d71d35f59d0..48fc48ab102b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java @@ -153,8 +153,8 @@ public Builder(@Nullable TransformMetadata previous) { } } - public TransformMetadata.Builder isResetMode(boolean resetMode) { - this.resetMode = resetMode; + public TransformMetadata.Builder isResetMode(boolean isResetMode) { + this.resetMode = isResetMode; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java index 62be52fae0ce8..22eaa840686fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java @@ -175,9 +175,9 @@ public boolean equals(Object obj) { @Override public boolean match(Task task) { if (task.getDescription().startsWith(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX)) { - String id = task.getDescription().substring(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX.length()); + String taskId = task.getDescription().substring(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX.length()); if (expandedIds != null) { - return expandedIds.contains(id); + return expandedIds.contains(taskId); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java index 27cd1caec192b..6c33776aa2b03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java @@ -249,11 +249,11 @@ public Builder setMaxPageSearchSize(Integer maxPageSearchSize) { * This setting throttles transform by issuing queries less often, however processing still happens in * batches. A value of 0 disables throttling (default). * - * @param docsPerSecond Integer value + * @param documentsPerSecond Integer value * @return the {@link Builder} with requestsPerSecond set. */ - public Builder setRequestsPerSecond(Float docsPerSecond) { - this.docsPerSecond = docsPerSecond == null ? DEFAULT_DOCS_PER_SECOND : docsPerSecond; + public Builder setRequestsPerSecond(Float documentsPerSecond) { + this.docsPerSecond = documentsPerSecond == null ? DEFAULT_DOCS_PER_SECOND : documentsPerSecond; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index c36b70df6c4b4..90e1e1389b9a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -308,8 +308,8 @@ public Version getVersion() { return transformVersion; } - public TransformConfig setVersion(Version transformVersion) { - this.transformVersion = transformVersion; + public TransformConfig setVersion(Version version) { + this.transformVersion = version; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java index 6b6788c550558..1b79d474d9080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java @@ -114,9 +114,9 @@ public void incrementDocsProcessed(long docsProcessed) { this.documentsProcessed += docsProcessed; } - public void incrementDocsIndexed(long documentsIndexed) { - assert documentsIndexed >= 0; - this.documentsIndexed += documentsIndexed; + public void incrementDocsIndexed(long numDocumentsIndexed) { + assert numDocumentsIndexed >= 0; + this.documentsIndexed += numDocumentsIndexed; } public long getDocumentsProcessed() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java index b4876840e2d91..79c7f4438316a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java @@ -194,9 +194,9 @@ private Interval readInterval(StreamInput in) throws IOException { } } - private void writeInterval(Interval interval, StreamOutput out) throws IOException { - out.write(interval.getIntervalTypeId()); - interval.writeTo(out); + private void writeInterval(Interval anInterval, StreamOutput out) throws IOException { + out.write(anInterval.getIntervalTypeId()); + anInterval.writeTo(out); } private static final String NAME = "data_frame_date_histogram_group"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java index 717ba9af098a9..6d24209ba26be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java @@ -44,8 +44,8 @@ private static ConstructingObjectParser createParser String field = (String) args[0]; ScriptConfig scriptConfig = (ScriptConfig) args[1]; boolean missingBucket = args[2] == null ? false : (boolean) args[2]; - double interval = (double) args[3]; - return new HistogramGroupSource(field, scriptConfig, missingBucket, interval); + double intervalValue = (double) args[3]; + return new HistogramGroupSource(field, scriptConfig, missingBucket, intervalValue); }); declareValuesSourceFields(parser, lenient); parser.declareDouble(optionalConstructorArg(), INTERVAL); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java index efc7274b36f41..27647da6d078f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java @@ -17,7 +17,7 @@ /** * This throttler throttles the action based on its last successful execution time. If the time passed since - * the last successful execution is lower than the given period, the aciton will be throttled. + * the last successful execution is lower than the given period, the action will be throttled. */ public class PeriodThrottler implements Throttler { @@ -36,14 +36,14 @@ public TimeValue period() { @Override public Result throttle(String actionId, WatchExecutionContext ctx) { - TimeValue period = this.period; - if (period == null) { + TimeValue throttlePeriod = this.period; + if (throttlePeriod == null) { // falling back on the throttle period of the watch - period = ctx.watch().throttlePeriod(); + throttlePeriod = ctx.watch().throttlePeriod(); } - if (period == null) { + if (throttlePeriod == null) { // falling back on the default throttle period of watcher - period = ctx.defaultThrottlePeriod(); + throttlePeriod = ctx.defaultThrottlePeriod(); } ActionStatus status = ctx.watch().status().actionStatus(actionId); if (status.lastSuccessfulExecution() == null) { @@ -52,11 +52,11 @@ public Result throttle(String actionId, WatchExecutionContext ctx) { long now = clock.millis(); long executionTime = status.lastSuccessfulExecution().timestamp().toInstant().toEpochMilli(); TimeValue timeElapsed = TimeValue.timeValueMillis(now - executionTime); - if (timeElapsed.getMillis() <= period.getMillis()) { + if (timeElapsed.getMillis() <= throttlePeriod.getMillis()) { return Result.throttle( PERIOD, "throttling interval is set to [{}] but time elapsed since last execution is [{}]", - period, + throttlePeriod, timeElapsed ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java index e21a490b45f13..7597902de6f37 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java @@ -91,12 +91,13 @@ public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Action. public WatchSourceBuilder addAction( String id, - Transform.Builder transform, + Transform.Builder transformBuilder, Action.Builder action ) { - return addAction(id, null, transform.build(), action.build()); + return addAction(id, null, transformBuilder.build(), action.build()); } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction(String id, Condition condition, Action.Builder action) { return addAction(id, null, condition, null, action.build()); } @@ -104,17 +105,18 @@ public WatchSourceBuilder addAction(String id, Condition condition, Action.Build public WatchSourceBuilder addAction( String id, TimeValue throttlePeriod, - Transform.Builder transform, + Transform.Builder transformBuilder, Action.Builder action ) { - return addAction(id, throttlePeriod, transform.build(), action.build()); + return addAction(id, throttlePeriod, transformBuilder.build(), action.build()); } - public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Transform transform, Action action) { - actions.put(id, new TransformedAction(id, action, throttlePeriod, null, transform, null)); + public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Transform aTransform, Action action) { + actions.put(id, new TransformedAction(id, action, throttlePeriod, null, aTransform, null)); return this; } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction( String id, TimeValue throttlePeriod, @@ -125,11 +127,13 @@ public WatchSourceBuilder addAction( return addAction(id, throttlePeriod, condition, transform.build(), action.build()); } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Condition condition, Transform transform, Action action) { actions.put(id, new TransformedAction(id, action, throttlePeriod, condition, transform, null)); return this; } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction( String id, TimeValue throttlePeriod, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java index ea3481511feb9..7863fa1000999 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java @@ -29,8 +29,8 @@ public class Counters implements Writeable { private ObjectLongHashMap counters = new ObjectLongHashMap<>(); public Counters(StreamInput in) throws IOException { - int counters = in.readVInt(); - for (int i = 0; i < counters; i++) { + int numCounters = in.readVInt(); + for (int i = 0; i < numCounters; i++) { inc(in.readString(), in.readVLong()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java index a6e6b593309b3..fa98a6ffa6e67 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java @@ -164,11 +164,11 @@ public void beforeInput() { phase = ExecutionPhase.INPUT; } - public void onInputResult(Input.Result inputResult) { + public void onInputResult(Input.Result result) { assert phase.sealed() == false; - this.inputResult = inputResult; - if (inputResult.status() == Input.Result.Status.SUCCESS) { - this.payload = inputResult.payload(); + this.inputResult = result; + if (result.status() == Input.Result.Status.SUCCESS) { + this.payload = result.payload(); } } @@ -181,10 +181,10 @@ public void beforeCondition() { phase = ExecutionPhase.CONDITION; } - public void onConditionResult(Condition.Result conditionResult) { + public void onConditionResult(Condition.Result result) { assert phase.sealed() == false; - this.conditionResult = conditionResult; - watch().status().onCheck(conditionResult.met(), executionTime); + this.conditionResult = result; + watch().status().onCheck(result.met(), executionTime); } public Condition.Result conditionResult() { @@ -232,8 +232,8 @@ public WatchRecord abortBeforeExecution(ExecutionState state, String message) { public WatchRecord abortFailedExecution(String message) { assert phase.sealed() == false; phase = ExecutionPhase.ABORTED; - long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); - WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + long executionTimeNs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTimeNs); watch().status().setExecutionState(WatchRecord.getState(result)); return new WatchRecord.MessageWatchRecord(this, result, message); } @@ -241,8 +241,8 @@ public WatchRecord abortFailedExecution(String message) { public WatchRecord abortFailedExecution(Exception e) { assert phase.sealed() == false; phase = ExecutionPhase.ABORTED; - long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); - WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + long executionTimeNs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTimeNs); watch().status().setExecutionState(WatchRecord.getState(result)); return new WatchRecord.ExceptionWatchRecord(this, result, e); } @@ -250,8 +250,8 @@ public WatchRecord abortFailedExecution(Exception e) { public WatchRecord finish() { assert phase.sealed() == false; phase = ExecutionPhase.FINISHED; - long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); - WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + long executionTimeNs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTimeNs); watch().status().setExecutionState(WatchRecord.getState(result)); return new WatchRecord.MessageWatchRecord(this, result); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java index d27a255dc0843..1ac2c7f57845d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java @@ -151,13 +151,13 @@ public Builder(Transform... transforms) { add(transforms); } - public Builder add(Transform... transforms) { - Collections.addAll(this.transforms, transforms); + public Builder add(Transform... transformsToAdd) { + Collections.addAll(this.transforms, transformsToAdd); return this; } - public Builder add(Transform.Builder... transforms) { - for (Transform.Builder transform : transforms) { + public Builder add(Transform.Builder... transformsToAdd) { + for (Transform.Builder transform : transformsToAdd) { this.transforms.add(transform.build()); } return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java index 9fe1a41be2a2a..fbb273aa6a8bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java @@ -26,7 +26,7 @@ import java.util.Map; /** - * An execute watch request to execute a watch by id + * A request to execute a watch by id */ public class ExecuteWatchRequest extends ActionRequest { @@ -195,6 +195,7 @@ public XContentType getXContentType() { /** * @param watchSource instead of using an existing watch use this non persisted watch */ + @SuppressWarnings("HiddenField") public void setWatchSource(BytesReference watchSource, XContentType xContentType) { this.watchSource = watchSource; this.xContentType = xContentType; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java index 743f86a3bffa3..26151d4ae01c4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java @@ -138,8 +138,8 @@ public boolean setState(boolean active, ZonedDateTime now) { * * @return {@code true} if the status of this watch changed, {@code false} otherwise. */ - public boolean ack(ZonedDateTime now, String... actions) { - return status.onAck(now, actions); + public boolean ack(ZonedDateTime now, String... actionIds) { + return status.onAck(now, actionIds); } public boolean acked(String actionId) { From 1f933390c146021f22186d2861b199fc9eb5043e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 29 Nov 2021 21:46:54 +0100 Subject: [PATCH 52/55] Less Verbose Serialization of Snapshot Failure in SLM Metadata (#80942) We should not serialize the full exception including cause(s) and stacktraces here. This can be a string of multiple MBs for a very large cluster that has a large subset of indices/shards failing to snapshot. We can get the full details of what failed for each shard in detail from the repository as well as from logs anyway. If we fail to finalize the snapshot we still get the rough reason for that failure with this change and can look at the logs for more details. --- .../elasticsearch/ElasticsearchException.java | 2 +- .../core/slm/SnapshotInvocationRecord.java | 8 ++-- .../xpack/slm/SnapshotLifecycleTask.java | 42 +++++++------------ .../slm/history/SnapshotHistoryItem.java | 22 ++-------- .../xpack/slm/SnapshotLifecycleTaskTests.java | 1 - 5 files changed, 23 insertions(+), 52 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 1c2e3bc0764f2..8c599774d868e 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -58,7 +58,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte * to control if the {@code caused_by} element should render. Unlike most parameters to {@code toXContent} methods this parameter is * internal only and not available as a URL parameter. */ - private static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; + public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; /** * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} * to control if the {@code stack_trace} element should render. Unlike most parameters to {@code toXContent} methods this parameter is diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java index 6946d08ed1191..63d92abe9a100 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java @@ -37,10 +37,10 @@ public class SnapshotInvocationRecord extends AbstractDiffable PARSER = new ConstructingObjectParser<>( "snapshot_policy_invocation_record", diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index 1f08ca62e67cd..483fdcd167d89 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -20,13 +20,10 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.TimeValue; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.core.slm.SnapshotInvocationRecord; @@ -39,13 +36,10 @@ import java.io.IOException; import java.time.Instant; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Optional; -import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; - public class SnapshotLifecycleTask implements SchedulerEngine.Listener { private static final Logger logger = LogManager.getLogger(SnapshotLifecycleTask.class); @@ -135,11 +129,6 @@ public void onResponse(CreateSnapshotResponse createSnapshotResponse) { request.snapshot(), "failed to create snapshot successfully, " + failures + " out of " + total + " total shards failed" ); - // Add each failed shard's exception as suppressed, the exception contains - // information about which shard failed - // TODO: this seems wrong, investigate whether we actually need all the shard level exception here given that we - // could be dealing with tens of thousands of them at a time - snapInfo.shardFailures().forEach(e::addSuppressed); // Call the failure handler to register this as a failure and persist it onFailure(e); } @@ -194,13 +183,17 @@ static Optional getSnapPolicyMetadata(final Str ); } + public static String exceptionToString(Exception ex) { + return Strings.toString((builder, params) -> { + ElasticsearchException.generateThrowableXContent(builder, params, ex); + return builder; + }, ToXContent.EMPTY_PARAMS); + } + /** * A cluster state update task to write the result of a snapshot job to the cluster metadata for the associated policy. */ private static class WriteJobStatus extends ClusterStateUpdateTask { - private static final ToXContent.Params STACKTRACE_PARAMS = new ToXContent.MapParams( - Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false") - ); private final String policyName; private final String snapshotName; @@ -230,18 +223,6 @@ static WriteJobStatus failure(String policyId, String snapshotName, long timesta return new WriteJobStatus(policyId, snapshotName, timestamp, timestamp, Optional.of(exception)); } - private String exceptionToString() throws IOException { - if (exception.isPresent()) { - try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { - causeXContentBuilder.startObject(); - ElasticsearchException.generateThrowableXContent(causeXContentBuilder, STACKTRACE_PARAMS, exception.get()); - causeXContentBuilder.endObject(); - return BytesReference.bytes(causeXContentBuilder).utf8ToString(); - } - } - return null; - } - @Override public ClusterState execute(ClusterState currentState) throws Exception { SnapshotLifecycleMetadata snapMeta = currentState.metadata().custom(SnapshotLifecycleMetadata.TYPE); @@ -274,7 +255,14 @@ public ClusterState execute(ClusterState currentState) throws Exception { if (exception.isPresent()) { stats.snapshotFailed(policyName); - newPolicyMetadata.setLastFailure(new SnapshotInvocationRecord(snapshotName, null, snapshotFinishTime, exceptionToString())); + newPolicyMetadata.setLastFailure( + new SnapshotInvocationRecord( + snapshotName, + null, + snapshotFinishTime, + exception.map(SnapshotLifecycleTask::exceptionToString).orElse(null) + ) + ); } else { stats.snapshotTaken(policyName); newPolicyMetadata.setLastSuccess(new SnapshotInvocationRecord(snapshotName, snapshotStartTime, snapshotFinishTime, null)); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java index fd24e697818b5..d273ef63844b5 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java @@ -7,9 +7,7 @@ package org.elasticsearch.xpack.slm.history; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -19,16 +17,13 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.slm.SnapshotLifecycleTask; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; - /** * Represents the record of a Snapshot Lifecycle Management action, so that it * can be indexed in a history index or recorded to a log in a structured way @@ -138,7 +133,7 @@ public static SnapshotHistoryItem creationFailureRecord( String snapshotName, Exception exception ) throws IOException { - String exceptionString = exceptionToString(exception); + String exceptionString = SnapshotLifecycleTask.exceptionToString(exception); return new SnapshotHistoryItem( timeStamp, policy.getId(), @@ -162,7 +157,7 @@ public static SnapshotHistoryItem deletionFailureRecord( String repository, Exception exception ) throws IOException { - String exceptionString = exceptionToString(exception); + String exceptionString = SnapshotLifecycleTask.exceptionToString(exception); return new SnapshotHistoryItem(timestamp, policyId, repository, snapshotName, DELETE_OPERATION, false, null, exceptionString); } @@ -273,15 +268,4 @@ public String toString() { return Strings.toString(this); } - private static String exceptionToString(Exception exception) throws IOException { - Params stacktraceParams = new MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); - String exceptionString; - try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { - causeXContentBuilder.startObject(); - ElasticsearchException.generateThrowableXContent(causeXContentBuilder, stacktraceParams, exception); - causeXContentBuilder.endObject(); - exceptionString = BytesReference.bytes(causeXContentBuilder).utf8ToString(); - } - return exceptionString; - } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index e830f87a7773f..83a94020f95bd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -288,7 +288,6 @@ public void testPartialFailureSnapshot() throws Exception { item.getErrorDetails(), containsString("failed to create snapshot successfully, 1 out of 3 total shards failed") ); - assertThat(item.getErrorDetails(), containsString("forced failure")); }); SnapshotLifecycleTask task = new SnapshotLifecycleTask(client, clusterService, historyStore); From 537f371f348207e7c59d28162b61c3720e931d0c Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 29 Nov 2021 23:49:27 +0200 Subject: [PATCH 53/55] URL option for BaseRunAsSuperuserCommand (#81025) Add a --url option for elasticsearch-reset-password and elasticsearch-create-enrollment-token CLI Tools ( and any tools that would extend BaseRunAsSuperuserCommand ). The tools use CommandLineHttpClient internally, which tries its best to deduce the URL of the local node based on the configuration but there are certain cases where it either fails or returns an unwanted result. Concretely: - CommandLineHttpClient#getDefaultURL will always return a URL with the port set to 9200, unless otherwise explicitly set in the configuration. When running multiple nodes on the same host, subsequent nodes get sequential port numbers after 9200 by default and this means that the CLI tool will always connect the first of n nodes in a given host. Since these tools depend on a file realm local user, requests to other nodes would fail - When an ES node binds and listens to many addresses, there can be the case that not all of the IP addresses are added as SANs in the certificate that is used for TLS on the HTTP layer. CommandLineHttpClient#getDefaultURL will pick an address based on a preference order but that address might not be in the SANs and thus all requests to the node would fail due to failed hostname verification. Manually setting `--url` to an appropriate value allows users to overcome these edge cases. --- .../commands/create-enrollment-token.asciidoc | 20 ++++++-- .../commands/reset-password.asciidoc | 17 ++++++- .../esnative/tool/ResetPasswordTool.java | 7 +-- .../ExternalEnrollmentTokenGenerator.java | 34 ++++++------- .../tool/CreateEnrollmentTokenTool.java | 11 +++- .../tool/BaseRunAsSuperuserCommand.java | 11 ++-- ...ExternalEnrollmentTokenGeneratorTests.java | 50 ++++++++++--------- .../esnative/tool/ResetPasswordToolTests.java | 33 ++++++++++++ .../tool/CreateEnrollmentTokenToolTests.java | 42 ++++++++++++++-- 9 files changed, 164 insertions(+), 61 deletions(-) diff --git a/docs/reference/commands/create-enrollment-token.asciidoc b/docs/reference/commands/create-enrollment-token.asciidoc index 4fd95f1b7bef4..ca95649324702 100644 --- a/docs/reference/commands/create-enrollment-token.asciidoc +++ b/docs/reference/commands/create-enrollment-token.asciidoc @@ -12,7 +12,7 @@ The `elasticsearch-create-enrollment-token` command creates enrollment tokens fo [source,shell] ---- bin/elasticsearch-create-enrollment-token -[-f, --force] [-h, --help] [-E ] [-s, --scope] +[-f, --force] [-h, --help] [-E ] [-s, --scope] [--url] ---- [discrete] @@ -23,7 +23,7 @@ Use this command to create enrollment tokens, which you can use to enroll new with an existing {es} cluster that has security features enabled. The command generates (and subsequently removes) a temporary user in the <> to run the request that creates enrollment tokens. -IMPORTANT: You cannot use this tool if the file realm is disabled in your +IMPORTANT: You cannot use this tool if the file realm is disabled in your `elasticsearch.yml` file. This command uses an HTTP connection to connect to the cluster and run the user @@ -42,12 +42,17 @@ option. For more information about debugging connection failures, see `-E `:: Configures a standard {es} or {xpack} setting. -`-f, --force`:: Forces the command to run against an unhealthy cluster. +`-f, --force`:: Forces the command to run against an unhealthy cluster. `-h, --help`:: Returns all of the command parameters. `-s, --scope`:: Specifies the scope of the generated token. Supported values are `node` and `kibana`. +`--url`:: Specifies the base URL (hostname and port of the local node) that the tool uses to submit API +requests to {es}. The default value is determined from the settings in your +`elasticsearch.yml` file. If `xpack.security.http.ssl.enabled` is set to `true`, +you must specify an HTTPS URL. + [discrete] === Examples @@ -57,3 +62,12 @@ The following command creates an enrollment token for enrolling an {es} node int ---- bin/elasticsearch-create-enrollment-token -s node ---- + +The following command creates an enrollment token for enrolling a {kib} instance into a cluster. +The specified URL indicates where the elasticsearch-create-enrollment-token tool attempts to reach the +local {es} node: + +[source,shell] +---- +bin/elasticsearch-create-enrollment-token -s kibana --url "https://172.0.0.3:9200" +---- diff --git a/docs/reference/commands/reset-password.asciidoc b/docs/reference/commands/reset-password.asciidoc index 012874fd61171..b8823158d0d0f 100644 --- a/docs/reference/commands/reset-password.asciidoc +++ b/docs/reference/commands/reset-password.asciidoc @@ -14,7 +14,7 @@ the native realm and built-in users. bin/elasticsearch-reset-password [-a, --auto] [-b, --batch] [-E , String> httpInfo = getNodeInfo(user, password); + final String apiKey = getApiKeyCredentials(user, password, action, baseUrl); + final Tuple, String> httpInfo = getNodeInfo(user, password, baseUrl); return new EnrollmentToken(apiKey, fingerprint, httpInfo.v2(), httpInfo.v1()); } @@ -89,12 +87,12 @@ private HttpResponse.HttpResponseBuilder responseBuilder(InputStream is) throws return httpResponseBuilder; } - protected URL createAPIKeyUrl() throws MalformedURLException, URISyntaxException { - return new URL(defaultUrl, (defaultUrl.toURI().getPath() + "/_security/api_key").replaceAll("/+", "/")); + protected URL createAPIKeyUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { + return new URL(baseUrl, (baseUrl.toURI().getPath() + "/_security/api_key").replaceAll("/+", "/")); } - protected URL getHttpInfoUrl() throws MalformedURLException, URISyntaxException { - return new URL(defaultUrl, (defaultUrl.toURI().getPath() + "/_nodes/_local/http").replaceAll("/+", "/")); + protected URL getHttpInfoUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { + return new URL(baseUrl, (baseUrl.toURI().getPath() + "/_nodes/_local/http").replaceAll("/+", "/")); } @SuppressWarnings("unchecked") @@ -114,7 +112,7 @@ static String getVersion(Map nodesInfo) { return nodeInfo.get("version").toString(); } - protected String getApiKeyCredentials(String user, SecureString password, String action) throws Exception { + protected String getApiKeyCredentials(String user, SecureString password, String action, URL baseUrl) throws Exception { final CheckedSupplier createApiKeyRequestBodySupplier = () -> { XContentBuilder xContentBuilder = JsonXContent.contentBuilder(); xContentBuilder.startObject() @@ -129,7 +127,7 @@ protected String getApiKeyCredentials(String user, SecureString password, String return Strings.toString(xContentBuilder); }; - final URL createApiKeyUrl = createAPIKeyUrl(); + final URL createApiKeyUrl = createAPIKeyUrl(baseUrl); final HttpResponse httpResponseApiKey = client.execute( "POST", createApiKeyUrl, @@ -155,8 +153,8 @@ protected String getApiKeyCredentials(String user, SecureString password, String return apiId + ":" + apiKey; } - protected Tuple, String> getNodeInfo(String user, SecureString password) throws Exception { - final URL httpInfoUrl = getHttpInfoUrl(); + protected Tuple, String> getNodeInfo(String user, SecureString password, URL baseUrl) throws Exception { + final URL httpInfoUrl = getHttpInfoUrl(baseUrl); final HttpResponse httpResponseHttp = client.execute("GET", httpInfoUrl, user, password, () -> null, is -> responseBuilder(is)); final int httpCode = httpResponseHttp.getHttpStatus(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java index 84c6ccf4964ea..954badc86e47a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java @@ -22,16 +22,19 @@ import org.elasticsearch.xpack.security.enrollment.ExternalEnrollmentTokenGenerator; import org.elasticsearch.xpack.security.tool.BaseRunAsSuperuserCommand; +import java.net.URL; import java.util.List; import java.util.function.Function; public class CreateEnrollmentTokenTool extends BaseRunAsSuperuserCommand { private final OptionSpec scope; + private final Function clientFunction; private final CheckedFunction createEnrollmentTokenFunction; static final List ALLOWED_SCOPES = List.of("node", "kibana"); CreateEnrollmentTokenTool() { + this( environment -> new CommandLineHttpClient(environment), environment -> KeyStoreWrapper.load(environment.configFile()), @@ -46,6 +49,7 @@ public class CreateEnrollmentTokenTool extends BaseRunAsSuperuserCommand { ) { super(clientFunction, keyStoreFunction, "Creates enrollment tokens for elasticsearch nodes and kibana instances"); this.createEnrollmentTokenFunction = createEnrollmentTokenFunction; + this.clientFunction = clientFunction; scope = parser.acceptsAll(List.of("scope", "s"), "The scope of this enrollment token, can be either \"node\" or \"kibana\"") .withRequiredArg() .required(); @@ -74,12 +78,15 @@ protected void validate(Terminal terminal, OptionSet options, Environment env) t protected void executeCommand(Terminal terminal, OptionSet options, Environment env, String username, SecureString password) throws Exception { final String tokenScope = scope.value(options); + final URL baseUrl = options.has(urlOption) + ? new URL(options.valueOf(urlOption)) + : new URL(clientFunction.apply(env).getDefaultURL()); try { ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = createEnrollmentTokenFunction.apply(env); if (tokenScope.equals("node")) { - terminal.println(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(username, password).getEncoded()); + terminal.println(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(username, password, baseUrl).getEncoded()); } else { - terminal.println(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(username, password).getEncoded()); + terminal.println(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(username, password, baseUrl).getEncoded()); } } catch (Exception e) { terminal.errorPrintln("Unable to create enrollment token for scope [" + tokenScope + "]"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java index 6909da4df03bb..c0ed18d0dc6b9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security.tool; import joptsimple.OptionSet; +import joptsimple.OptionSpec; import joptsimple.OptionSpecBuilder; import org.elasticsearch.Version; @@ -57,6 +58,7 @@ public abstract class BaseRunAsSuperuserCommand extends KeyStoreAwareCommand { private static final int PASSWORD_LENGTH = 14; private final OptionSpecBuilder force; + protected final OptionSpec urlOption; private final Function clientFunction; private final CheckedFunction keyStoreFunction; @@ -72,6 +74,7 @@ public BaseRunAsSuperuserCommand( List.of("f", "force"), "Use this option to force execution of the command against a cluster that is currently unhealthy." ); + urlOption = parser.accepts("url", "the URL where the elasticsearch node listens for connections.").withRequiredArg(); } @Override @@ -120,7 +123,7 @@ protected final void execute(Terminal terminal, OptionSet options, Environment e attributesChecker.check(terminal); final boolean forceExecution = options.has(force); - checkClusterHealthWithRetries(newEnv, terminal, username, password, 5, forceExecution); + checkClusterHealthWithRetries(newEnv, options, terminal, username, password, 5, forceExecution); executeCommand(terminal, options, newEnv, username, password); } catch (Exception e) { int exitCode; @@ -195,6 +198,7 @@ private void ensureFileRealmEnabled(Settings settings) throws Exception { */ private void checkClusterHealthWithRetries( Environment env, + OptionSet options, Terminal terminal, String username, SecureString password, @@ -202,7 +206,8 @@ private void checkClusterHealthWithRetries( boolean force ) throws Exception { CommandLineHttpClient client = clientFunction.apply(env); - final URL clusterHealthUrl = CommandLineHttpClient.createURL(new URL(client.getDefaultURL()), "_cluster/health", "?pretty"); + final URL baseUrl = options.has(urlOption) ? new URL(options.valueOf(urlOption)) : new URL(client.getDefaultURL()); + final URL clusterHealthUrl = CommandLineHttpClient.createURL(baseUrl, "_cluster/health", "?pretty"); final HttpResponse response; try { response = client.execute("GET", clusterHealthUrl, username, password, () -> null, CommandLineHttpClient::responseBuilder); @@ -225,7 +230,7 @@ private void checkClusterHealthWithRetries( ); Thread.sleep(1000); retries -= 1; - checkClusterHealthWithRetries(env, terminal, username, password, retries, force); + checkClusterHealthWithRetries(env, options, terminal, username, password, retries, force); } else { throw new UserException( ExitCodes.DATA_ERROR, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java index 63824c270e7c9..339b3de9ecb49 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java @@ -83,10 +83,10 @@ public void setupMocks() throws Exception { public void testCreateSuccess() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL baseURL = new URL("http://localhost:9200"); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when(client.execute(anyString(), any(URL.class), anyString(), any(SecureString.class), anyCheckedSupplier(), anyCheckedFunction())) @@ -147,7 +147,8 @@ public void testCreateSuccess() throws Exception { final String tokenNode = externalEnrollmentTokenGenerator.createNodeEnrollmentToken( "elastic", - new SecureString("elastic".toCharArray()) + new SecureString("elastic".toCharArray()), + baseURL ).getEncoded(); Map infoNode = getDecoded(tokenNode); @@ -158,7 +159,8 @@ public void testCreateSuccess() throws Exception { final String tokenKibana = externalEnrollmentTokenGenerator.createKibanaEnrollmentToken( "elastic", - new SecureString("elastic".toCharArray()) + new SecureString("elastic".toCharArray()), + baseURL ).getEncoded(); Map infoKibana = getDecoded(tokenKibana); @@ -170,9 +172,9 @@ public void testCreateSuccess() throws Exception { public void testFailedCreateApiKey() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); final HttpResponse httpResponseNotOK = new HttpResponse(HttpURLConnection.HTTP_BAD_REQUEST, new HashMap<>()); when( @@ -188,7 +190,7 @@ public void testFailedCreateApiKey() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat(ex.getMessage(), Matchers.containsString("Unexpected response code [400] from calling POST ")); @@ -196,10 +198,10 @@ public void testFailedCreateApiKey() throws Exception { public void testFailedRetrieveHttpInfo() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -248,7 +250,7 @@ public void testFailedRetrieveHttpInfo() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat(ex.getMessage(), Matchers.containsString("Unexpected response code [400] from calling GET ")); @@ -274,10 +276,10 @@ public void testFailedNoCaInKeystore() throws Exception { .build(); environment = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL baseURL = new URL("http://localhost:9200"); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -326,7 +328,7 @@ public void testFailedNoCaInKeystore() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( @@ -358,10 +360,10 @@ public void testFailedManyCaInKeystore() throws Exception { .build(); environment = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL baseURL = new URL("http://localhost:9200"); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -410,7 +412,7 @@ public void testFailedManyCaInKeystore() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( @@ -431,7 +433,7 @@ public void testNoKeyStore() throws Exception { .build(); final Environment environment_no_keystore = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator( environment_no_keystore, client @@ -439,7 +441,7 @@ public void testNoKeyStore() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( @@ -467,7 +469,7 @@ public void testEnrollmentNotEnabled() throws Exception { .build(); final Environment environment_not_enabled = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator( environment_not_enabled, client @@ -475,7 +477,7 @@ public void testEnrollmentNotEnabled() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java index 330df329b4ded..943b2770172a5 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java @@ -152,6 +152,39 @@ public void testSuccessInteractiveMode() throws Exception { assertThat(output, containsString("Password for the [" + user + "] user successfully reset.")); } + public void testUserCanPassUrlParameter() throws Exception { + URL url = new URL("http://localhost:9204"); + HttpResponse healthResponse = new HttpResponse(HttpURLConnection.HTTP_OK, Map.of("status", randomFrom("yellow", "green"))); + when( + client.execute( + anyString(), + eq(clusterHealthUrl(url)), + anyString(), + any(SecureString.class), + any(CheckedSupplier.class), + any(CheckedFunction.class) + ) + ).thenReturn(healthResponse); + HttpResponse changePasswordResponse = new HttpResponse(HttpURLConnection.HTTP_OK, Map.of()); + when( + client.execute( + anyString(), + eq(changePasswordUrl(url, user)), + anyString(), + any(SecureString.class), + any(CheckedSupplier.class), + any(CheckedFunction.class) + ) + ).thenReturn(changePasswordResponse); + terminal.addTextInput("y"); + execute(randomFrom("-u", "--username"), user, "--url", "http://localhost:9204"); + String output = terminal.getOutput(); + assertThat(output, containsString("This tool will reset the password of the [" + user + "] user to an autogenerated value.")); + assertThat(output, containsString("The password will be printed in the console.")); + assertThat(output, containsString("Password for the [" + user + "] user successfully reset.")); + assertThat(output, containsString("New value:")); + } + public void testUserCancelledAutoMode() throws Exception { terminal.addTextInput("n"); UserException e = expectThrows(UserException.class, () -> execute(randomFrom("-u", "--username"), user)); diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java index 16c7120ab3a76..d322ae2cfdc5d 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java @@ -134,8 +134,12 @@ public void setup() throws Exception { "8.0.0", Arrays.asList("[192.168.0.1:9201, 172.16.254.1:9202") ); - when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class))).thenReturn(kibanaToken); - when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class))).thenReturn(nodeToken); + when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenReturn( + kibanaToken + ); + when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenReturn( + nodeToken + ); } @AfterClass @@ -167,6 +171,36 @@ public void testInvalidScope() throws Exception { ); } + public void testUserCanPassUrl() throws Exception { + HttpResponse healthResponse = new HttpResponse(HttpURLConnection.HTTP_OK, Map.of("status", randomFrom("yellow", "green"))); + when( + client.execute( + anyString(), + eq(clusterHealthUrl(new URL("http://localhost:9204"))), + anyString(), + any(SecureString.class), + any(CheckedSupplier.class), + any(CheckedFunction.class) + ) + ).thenReturn(healthResponse); + EnrollmentToken kibanaToken = new EnrollmentToken( + "DR6CzXkBDf8amV_48yYX:x3YqU_rqQwm-ESrkExcnOg", + "ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d", + "8.0.0", + Arrays.asList("[192.168.0.1:9201, 172.16.254.1:9202") + ); + when( + externalEnrollmentTokenGenerator.createKibanaEnrollmentToken( + anyString(), + any(SecureString.class), + eq(new URL("http://localhost:9204")) + ) + ).thenReturn(kibanaToken); + String output = execute("--scope", "kibana", "--url", "http://localhost:9204"); + assertThat(output, containsString("1WXzQ4eVlYOngzWXFVX3JxUXdtLUVTcmtFeGNuT2cifQ==")); + + } + public void testUnhealthyCluster() throws Exception { String scope = randomBoolean() ? "node" : "kibana"; URL url = new URL(client.getDefaultURL()); @@ -207,10 +241,10 @@ public void testEnrollmentDisabled() { public void testUnableToCreateToken() throws Exception { this.externalEnrollmentTokenGenerator = mock(ExternalEnrollmentTokenGenerator.class); - when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class))).thenThrow( + when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenThrow( new IllegalStateException("example exception message") ); - when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class))).thenThrow( + when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenThrow( new IllegalStateException("example exception message") ); String scope = randomBoolean() ? "node" : "kibana"; From 806abee75a7c02eb438242bd51d72b236730bf53 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 30 Nov 2021 10:12:27 +1100 Subject: [PATCH 54/55] Optimize DLS bitset building for matchAll query (#81030) The PR avoids creating Weight and Scorer and stepping through docIterator when building DLS bitSet for an effective matchAll query. Instead it returns a MatchAllRoleBitSet directly after query rewritten for this scenario. Resolves: #80904 --- .../DocumentSubsetBitsetCache.java | 19 ++++++++++++++++++- .../DocumentSubsetBitsetCacheTests.java | 10 ++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java index 1f0d9ca8bca8d..070f67a626c46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java @@ -13,8 +13,10 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -273,7 +275,11 @@ private BitSet computeBitSet(Query query, LeafReaderContext context) throws IOEx final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); - final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + final Query rewrittenQuery = searcher.rewrite(query); + if (isEffectiveMatchAllDocsQuery(rewrittenQuery)) { + return new MatchAllRoleBitSet(context.reader().maxDoc()); + } + final Weight weight = searcher.createWeight(rewrittenQuery, ScoreMode.COMPLETE_NO_SCORES, 1f); final Scorer s = weight.scorer(context); if (s == null) { return null; @@ -282,6 +288,17 @@ private BitSet computeBitSet(Query query, LeafReaderContext context) throws IOEx } } + // Package private for testing + static boolean isEffectiveMatchAllDocsQuery(Query rewrittenQuery) { + if (rewrittenQuery instanceof ConstantScoreQuery && ((ConstantScoreQuery) rewrittenQuery).getQuery() instanceof MatchAllDocsQuery) { + return true; + } + if (rewrittenQuery instanceof MatchAllDocsQuery) { + return true; + } + return false; + } + private void maybeLogCacheFullWarning() { final long nextLogTime = cacheFullWarningTime.get(); final long now = System.currentTimeMillis(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index ac99e492088a2..286d1785b5aad 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -19,9 +19,13 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; @@ -515,6 +519,12 @@ public void testMatchAllRoleBitSet() throws Exception { } } + public void testEquivalentMatchAllDocsQuery() { + assertTrue(DocumentSubsetBitsetCache.isEffectiveMatchAllDocsQuery(new MatchAllDocsQuery())); + assertTrue(DocumentSubsetBitsetCache.isEffectiveMatchAllDocsQuery(new ConstantScoreQuery(new MatchAllDocsQuery()))); + assertFalse(DocumentSubsetBitsetCache.isEffectiveMatchAllDocsQuery(new TermQuery(new Term("term")))); + } + private void runTestOnIndex(CheckedBiConsumer body) throws Exception { runTestOnIndices(1, ctx -> { final TestIndexContext indexContext = ctx.get(0); From 2629c32efd53809f1792f093567e19cfcf44bf29 Mon Sep 17 00:00:00 2001 From: weizijun Date: Tue, 30 Nov 2021 07:24:51 +0800 Subject: [PATCH 55/55] Fix ComposableIndexTemplate equals when composed_of is null (#80864) when composed_of is null, the ComposableIndexTemplate will return an empty list. it will cause the input and output ComposableIndexTemplate not equals. reproduce: in ComposableIndexTemplateTests.randomInstance method, make `List componentTemplates = null;`, there are the failed tests: ``` Tests with failures: - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testAddIndexTemplateV2 - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testRemoveMultipleIndexTemplateV2Wildcards - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testUpdateIndexTemplateV2 - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testRemoveMultipleIndexTemplateV2 ``` the PR add a `componentTemplatesEquals` method to make null and empty list equals . --- .../cluster/metadata/ComposableIndexTemplate.java | 15 ++++++++++++++- .../metadata/ComposableIndexTemplateTests.java | 14 ++++++++++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index a7d1f8f9580bb..f7f5f84ab93b4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -275,7 +275,7 @@ public boolean equals(Object obj) { ComposableIndexTemplate other = (ComposableIndexTemplate) obj; return Objects.equals(this.indexPatterns, other.indexPatterns) && Objects.equals(this.template, other.template) - && Objects.equals(this.componentTemplates, other.componentTemplates) + && componentTemplatesEquals(this.componentTemplates, other.componentTemplates) && Objects.equals(this.priority, other.priority) && Objects.equals(this.version, other.version) && Objects.equals(this.metadata, other.metadata) @@ -283,6 +283,19 @@ public boolean equals(Object obj) { && Objects.equals(this.allowAutoCreate, other.allowAutoCreate); } + static boolean componentTemplatesEquals(List c1, List c2) { + if (Objects.equals(c1, c2)) { + return true; + } + if (c1 == null && c2.isEmpty()) { + return true; + } + if (c2 == null && c1.isEmpty()) { + return true; + } + return false; + } + @Override public String toString() { return Strings.toString(this); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index 090d36e3cec26..fed508df0ba41 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -21,6 +21,8 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class ComposableIndexTemplateTests extends AbstractDiffableSerializationTestCase { @Override protected ComposableIndexTemplate makeTestChanges(ComposableIndexTemplate testInstance) { @@ -79,11 +81,10 @@ public static ComposableIndexTemplate randomInstance() { } List indexPatterns = randomList(1, 4, () -> randomAlphaOfLength(4)); - List componentTemplates = randomList(0, 10, () -> randomAlphaOfLength(5)); return new ComposableIndexTemplate( indexPatterns, template, - componentTemplates, + randomBoolean() ? null : randomList(0, 10, () -> randomAlphaOfLength(5)), randomBoolean() ? null : randomNonNegativeLong(), randomBoolean() ? null : randomNonNegativeLong(), meta, @@ -242,4 +243,13 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori throw new IllegalStateException("illegal randomization branch"); } } + + public void testComponentTemplatesEquals() { + assertThat(ComposableIndexTemplate.componentTemplatesEquals(null, null), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(null, List.of()), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(), null), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(), List.of()), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(randomAlphaOfLength(5)), List.of()), equalTo(false)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(), List.of(randomAlphaOfLength(5))), equalTo(false)); + } }