From 944972a2494c7c2f9f8a09f30dc72914b8e1f435 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Wed, 23 Jan 2019 22:13:16 -0600 Subject: [PATCH 01/20] Deprecate HLRC EmptyResponse used by security (#37540) The EmptyResponse is essentially the same as returning a boolean, which is done in other places. This commit deprecates all the existing EmptyResponse methods and creates new boolean methods that have method params reordered so they can exist with the deprecated methods. A followup PR in master will remove the existing deprecated methods, fix the parameter ordering and deprecate the incorrectly ordered parameter methods. Relates #36938 --- .../client/RestHighLevelClient.java | 2 +- .../elasticsearch/client/SecurityClient.java | 103 ++++++++++++++++++ .../client/security/EmptyResponse.java | 2 + .../CustomRestHighLevelClientTests.java | 3 +- .../client/RestHighLevelClientTests.java | 43 ++++++-- .../SecurityDocumentationIT.java | 32 +++--- .../security/change-password.asciidoc | 5 +- .../high-level/security/disable-user.asciidoc | 5 +- .../high-level/security/enable-user.asciidoc | 5 +- 9 files changed, 161 insertions(+), 39 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index a9c6901d9820a..e82c2dc620494 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -1704,7 +1704,7 @@ protected final Resp parseEntity(final HttpEntity entity, } } - static boolean convertExistsResponse(Response response) { + protected static boolean convertExistsResponse(Response response) { return response.getStatusLine().getStatusCode() == 200; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index 48a1cdb778243..4d8d1d5db43aa 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -237,12 +237,29 @@ public void getRoleMappingsAsync(final GetRoleMappingsRequest request, final Req * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response from the enable user call * @throws IOException in case there is a problem sending the request or parsing back the response + * @deprecated use {@link #enableUser(RequestOptions, EnableUserRequest)} instead */ + @Deprecated public EmptyResponse enableUser(EnableUserRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::enableUser, options, EmptyResponse::fromXContent, emptySet()); } + /** + * Enable a native realm or built-in user synchronously. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param request the request with the user to enable + * @return {@code true} if the request succeeded (the user is enabled) + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public boolean enableUser(RequestOptions options, EnableUserRequest request) throws IOException { + return restHighLevelClient.performRequest(request, SecurityRequestConverters::enableUser, options, + RestHighLevelClient::convertExistsResponse, emptySet()); + } + /** * Enable a native realm or built-in user asynchronously. * See @@ -251,13 +268,30 @@ public EmptyResponse enableUser(EnableUserRequest request, RequestOptions option * @param request the request with the user to enable * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @deprecated use {@link #enableUserAsync(RequestOptions, EnableUserRequest, ActionListener)} instead */ + @Deprecated public void enableUserAsync(EnableUserRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::enableUser, options, EmptyResponse::fromXContent, listener, emptySet()); } + /** + * Enable a native realm or built-in user asynchronously. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param request the request with the user to enable + * @param listener the listener to be notified upon request completion + */ + public void enableUserAsync(RequestOptions options, EnableUserRequest request, + ActionListener listener) { + restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::enableUser, options, + RestHighLevelClient::convertExistsResponse, listener, emptySet()); + } + /** * Disable a native realm or built-in user synchronously. * See @@ -267,12 +301,29 @@ public void enableUserAsync(EnableUserRequest request, RequestOptions options, * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response from the enable user call * @throws IOException in case there is a problem sending the request or parsing back the response + * @deprecated use {@link #disableUser(RequestOptions, DisableUserRequest)} instead */ + @Deprecated public EmptyResponse disableUser(DisableUserRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::disableUser, options, EmptyResponse::fromXContent, emptySet()); } + /** + * Disable a native realm or built-in user synchronously. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param request the request with the user to disable + * @return {@code true} if the request succeeded (the user is disabled) + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public boolean disableUser(RequestOptions options, DisableUserRequest request) throws IOException { + return restHighLevelClient.performRequest(request, SecurityRequestConverters::disableUser, options, + RestHighLevelClient::convertExistsResponse, emptySet()); + } + /** * Disable a native realm or built-in user asynchronously. * See @@ -281,13 +332,30 @@ public EmptyResponse disableUser(DisableUserRequest request, RequestOptions opti * @param request the request with the user to disable * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @deprecated use {@link #disableUserAsync(RequestOptions, DisableUserRequest, ActionListener)} instead */ + @Deprecated public void disableUserAsync(DisableUserRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::disableUser, options, EmptyResponse::fromXContent, listener, emptySet()); } + /** + * Disable a native realm or built-in user asynchronously. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param request the request with the user to disable + * @param listener the listener to be notified upon request completion + */ + public void disableUserAsync(RequestOptions options, DisableUserRequest request, + ActionListener listener) { + restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::disableUser, options, + RestHighLevelClient::convertExistsResponse, listener, emptySet()); + } + /** * Authenticate the current user and return all the information about the authenticated user. * See @@ -457,12 +525,29 @@ public void getSslCertificatesAsync(RequestOptions options, ActionListener + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param request the request with the user's new password + * @return {@code true} if the request succeeded (the new password was set) + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public boolean changePassword(RequestOptions options, ChangePasswordRequest request) throws IOException { + return restHighLevelClient.performRequest(request, SecurityRequestConverters::changePassword, options, + RestHighLevelClient::convertExistsResponse, emptySet()); + } + /** * Change the password of a user of a native realm or built-in user asynchronously. * See @@ -471,13 +556,31 @@ public EmptyResponse changePassword(ChangePasswordRequest request, RequestOption * @param request the request with the user's new password * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @deprecated use {@link #changePasswordAsync(RequestOptions, ChangePasswordRequest, ActionListener)} instead */ + @Deprecated public void changePasswordAsync(ChangePasswordRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::changePassword, options, EmptyResponse::fromXContent, listener, emptySet()); } + /** + * Change the password of a user of a native realm or built-in user asynchronously. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param request the request with the user's new password + * @param listener the listener to be notified upon request completion + */ + public void changePasswordAsync(RequestOptions options, ChangePasswordRequest request, + ActionListener listener) { + restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::changePassword, options, + RestHighLevelClient::convertExistsResponse, listener, emptySet()); + } + + /** * Delete a role mapping. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EmptyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EmptyResponse.java index 62fea88e52356..961a9cb3cdfb4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EmptyResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/EmptyResponse.java @@ -26,7 +26,9 @@ /** * Response for a request which simply returns an empty object. + @deprecated Use a boolean instead of this class */ +@Deprecated public final class EmptyResponse { private static final ObjectParser PARSER = new ObjectParser<>("empty_response", false, EmptyResponse::new); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index 316de885fa136..afaf09755386c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -122,7 +122,8 @@ private static RequestOptions optionsForNodeName(String nodeName) { */ @SuppressForbidden(reason = "We're forced to uses Class#getDeclaredMethods() here because this test checks protected methods") public void testMethodsVisibility() { - final String[] methodNames = new String[]{"parseEntity", + final String[] methodNames = new String[]{"convertExistsResponse", + "parseEntity", "parseResponseException", "performRequest", "performRequestAndParseEntity", diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 2fa4283971b40..a20d78f939da5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -715,6 +715,10 @@ public void testApiNamingConventions() throws Exception { "nodes.reload_secure_settings", "search_shards", }; + List booleanReturnMethods = Arrays.asList( + "security.enable_user", + "security.disable_user", + "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); deprecatedMethods.add("multi_get"); @@ -736,6 +740,7 @@ public void testApiNamingConventions() throws Exception { .map(method -> Tuple.tuple(toSnakeCase(method.getName()), method)) .flatMap(tuple -> tuple.v2().getReturnType().getName().endsWith("Client") ? getSubClientMethods(tuple.v1(), tuple.v2().getReturnType()) : Stream.of(tuple)) + .filter(tuple -> tuple.v2().getAnnotation(Deprecated.class) == null) .collect(Collectors.groupingBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet()))); @@ -753,7 +758,7 @@ public void testApiNamingConventions() throws Exception { } else if (isSubmitTaskMethod(apiName)) { assertSubmitTaskMethod(methods, method, apiName, restSpec); } else { - assertSyncMethod(method, apiName); + assertSyncMethod(method, apiName, booleanReturnMethods); apiUnsupported.remove(apiName); if (apiSpec.contains(apiName) == false) { if (deprecatedMethods.contains(apiName)) { @@ -790,9 +795,9 @@ public void testApiNamingConventions() throws Exception { assertThat("Some API are not supported but they should be: " + apiUnsupported, apiUnsupported.size(), equalTo(0)); } - private static void assertSyncMethod(Method method, String apiName) { + private static void assertSyncMethod(Method method, String apiName, List booleanReturnMethods) { //A few methods return a boolean rather than a response object - if (apiName.equals("ping") || apiName.contains("exist")) { + if (apiName.equals("ping") || apiName.contains("exist") || booleanReturnMethods.contains(apiName)) { assertThat("the return type for method [" + method + "] is incorrect", method.getReturnType().getSimpleName(), equalTo("boolean")); } else { @@ -811,10 +816,18 @@ private static void assertSyncMethod(Method method, String apiName) { method.getParameterTypes()[0], equalTo(RequestOptions.class)); } else { assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterTypes().length); - assertThat("the first parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); - assertThat("the second parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[1], equalTo(RequestOptions.class)); + // This is no longer true for all methods. Some methods can contain these 2 args backwards because of deprecation + if (method.getParameterTypes()[0].equals(RequestOptions.class)) { + assertThat("the first parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[0], equalTo(RequestOptions.class)); + assertThat("the second parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[1].getSimpleName(), endsWith("Request")); + } else { + assertThat("the first parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); + assertThat("the second parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[1], equalTo(RequestOptions.class)); + } } } @@ -829,10 +842,18 @@ private static void assertAsyncMethod(Map> methods, Method m assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); } else { assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterTypes().length); - assertThat("the first parameter to async method [" + method + "] should be a request type", - method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); - assertThat("the second parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[1], equalTo(RequestOptions.class)); + // This is no longer true for all methods. Some methods can contain these 2 args backwards because of deprecation + if (method.getParameterTypes()[0].equals(RequestOptions.class)) { + assertThat("the first parameter to async method [" + method + "] should be a request type", + method.getParameterTypes()[0], equalTo(RequestOptions.class)); + assertThat("the second parameter to async method [" + method + "] is the wrong type", + method.getParameterTypes()[1].getSimpleName(), endsWith("Request")); + } else { + assertThat("the first parameter to async method [" + method + "] should be a request type", + method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); + assertThat("the second parameter to async method [" + method + "] is the wrong type", + method.getParameterTypes()[1], equalTo(RequestOptions.class)); + } assertThat("the third parameter to async method [" + method + "] is the wrong type", method.getParameterTypes()[2], equalTo(ActionListener.class)); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index f5ec0e2c885b4..183a8942a7b2b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -44,7 +44,6 @@ import org.elasticsearch.client.security.DeleteUserRequest; import org.elasticsearch.client.security.DeleteUserResponse; import org.elasticsearch.client.security.DisableUserRequest; -import org.elasticsearch.client.security.EmptyResponse; import org.elasticsearch.client.security.EnableUserRequest; import org.elasticsearch.client.security.ExpressionRoleMapping; import org.elasticsearch.client.security.GetPrivilegesRequest; @@ -85,7 +84,6 @@ import javax.crypto.SecretKeyFactory; import javax.crypto.spec.PBEKeySpec; - import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -519,18 +517,18 @@ public void testEnableUser() throws Exception { { //tag::enable-user-execute EnableUserRequest request = new EnableUserRequest("enable_user", RefreshPolicy.NONE); - EmptyResponse response = client.security().enableUser(request, RequestOptions.DEFAULT); + boolean response = client.security().enableUser(RequestOptions.DEFAULT, request); //end::enable-user-execute - assertNotNull(response); + assertTrue(response); } { //tag::enable-user-execute-listener EnableUserRequest request = new EnableUserRequest("enable_user", RefreshPolicy.NONE); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(EmptyResponse setUserEnabledResponse) { + public void onResponse(Boolean response) { // <1> } @@ -546,7 +544,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::enable-user-execute-async - client.security().enableUserAsync(request, RequestOptions.DEFAULT, listener); // <1> + client.security().enableUserAsync(RequestOptions.DEFAULT, request, listener); // <1> // end::enable-user-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -563,18 +561,18 @@ public void testDisableUser() throws Exception { { //tag::disable-user-execute DisableUserRequest request = new DisableUserRequest("disable_user", RefreshPolicy.NONE); - EmptyResponse response = client.security().disableUser(request, RequestOptions.DEFAULT); + boolean response = client.security().disableUser(RequestOptions.DEFAULT, request); //end::disable-user-execute - assertNotNull(response); + assertTrue(response); } { //tag::disable-user-execute-listener DisableUserRequest request = new DisableUserRequest("disable_user", RefreshPolicy.NONE); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(EmptyResponse setUserEnabledResponse) { + public void onResponse(Boolean response) { // <1> } @@ -590,7 +588,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::disable-user-execute-async - client.security().disableUserAsync(request, RequestOptions.DEFAULT, listener); // <1> + client.security().disableUserAsync(RequestOptions.DEFAULT, request, listener); // <1> // end::disable-user-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1040,17 +1038,17 @@ public void testChangePassword() throws Exception { { //tag::change-password-execute ChangePasswordRequest request = new ChangePasswordRequest("change_password_user", newPassword, RefreshPolicy.NONE); - EmptyResponse response = client.security().changePassword(request, RequestOptions.DEFAULT); + boolean response = client.security().changePassword(RequestOptions.DEFAULT, request); //end::change-password-execute - assertNotNull(response); + assertTrue(response); } { //tag::change-password-execute-listener ChangePasswordRequest request = new ChangePasswordRequest("change_password_user", password, RefreshPolicy.NONE); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(EmptyResponse response) { + public void onResponse(Boolean response) { // <1> } @@ -1066,7 +1064,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); //tag::change-password-execute-async - client.security().changePasswordAsync(request, RequestOptions.DEFAULT, listener); // <1> + client.security().changePasswordAsync(RequestOptions.DEFAULT, request, listener); // <1> //end::change-password-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/docs/java-rest/high-level/security/change-password.asciidoc b/docs/java-rest/high-level/security/change-password.asciidoc index 40490ad6a83b3..36d66b194cfea 100644 --- a/docs/java-rest/high-level/security/change-password.asciidoc +++ b/docs/java-rest/high-level/security/change-password.asciidoc @@ -15,8 +15,7 @@ include-tagged::{doc-tests}/SecurityDocumentationIT.java[change-password-execute [[java-rest-high-change-password-response]] ==== Response -The returned `EmptyResponse` does not contain any fields. The return of this -response indicates a successful request. +The returned `Boolean` indicates the request status. [[java-rest-high-x-pack-security-change-password-async]] ==== Asynchronous Execution @@ -35,7 +34,7 @@ has completed the `ActionListener` is called back using the `onResponse` method if the execution successfully completed or using the `onFailure` method if it failed. -A typical listener for a `EmptyResponse` looks like: +A typical listener for a `Boolean` looks like: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/security/disable-user.asciidoc b/docs/java-rest/high-level/security/disable-user.asciidoc index 8bb2299946c42..564b8699ebb8d 100644 --- a/docs/java-rest/high-level/security/disable-user.asciidoc +++ b/docs/java-rest/high-level/security/disable-user.asciidoc @@ -15,8 +15,7 @@ include-tagged::{doc-tests}/SecurityDocumentationIT.java[disable-user-execute] [[java-rest-high-security-disable-user-response]] ==== Response -The returned `EmptyResponse` does not contain any fields. The return of this -response indicates a successful request. +The returned `Boolean` indicates the request status. [[java-rest-high-security-disable-user-async]] ==== Asynchronous Execution @@ -35,7 +34,7 @@ has completed the `ActionListener` is called back using the `onResponse` method if the execution successfully completed or using the `onFailure` method if it failed. -A typical listener for a `EmptyResponse` looks like: +A typical listener for a `Boolean` looks like: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/security/enable-user.asciidoc b/docs/java-rest/high-level/security/enable-user.asciidoc index 7601653269789..4be0f38e39fa6 100644 --- a/docs/java-rest/high-level/security/enable-user.asciidoc +++ b/docs/java-rest/high-level/security/enable-user.asciidoc @@ -15,8 +15,7 @@ include-tagged::{doc-tests}/SecurityDocumentationIT.java[enable-user-execute] [[java-rest-high-security-enable-user-response]] ==== Response -The returned `EmptyResponse` does not contain any fields. The return of this -response indicates a successful request. +The returned `Boolean` indicates the request status. [[java-rest-high-security-enable-user-async]] ==== Asynchronous Execution @@ -35,7 +34,7 @@ has completed the `ActionListener` is called back using the `onResponse` method if the execution successfully completed or using the `onFailure` method if it failed. -A typical listener for a `EmptyResponse` looks like: +A typical listener for a `Boolean` looks like: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- From 04c64147bdd4137fb3d1804b810bf64a95969f9f Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Wed, 23 Jan 2019 22:14:01 -0600 Subject: [PATCH 02/20] Update authenticate to allow unknown fields (#37713) AuthenticateResponse did not allow unknown fields. This commit fixes the test and ConstructingObjectParser such that it does now allow unknown fields. Relates #36938 --- .../elasticsearch/client/security/AuthenticateResponse.java | 4 ++-- .../client/security/AuthenticateResponseTests.java | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java index b3b8fc2c23591..9bf770d89dbb3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java @@ -53,11 +53,11 @@ public final class AuthenticateResponse { @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "client_security_authenticate_response", + "client_security_authenticate_response", true, a -> new AuthenticateResponse(new User((String) a[0], ((List) a[1]), (Map) a[2], (String) a[3], (String) a[4]), (Boolean) a[5], (RealmInfo) a[6], (RealmInfo) a[7])); static { - final ConstructingObjectParser realmInfoParser = new ConstructingObjectParser<>("realm_info", + final ConstructingObjectParser realmInfoParser = new ConstructingObjectParser<>("realm_info", true, a -> new RealmInfo((String) a[0], (String) a[1])); realmInfoParser.declareString(constructorArg(), REALM_NAME); realmInfoParser.declareString(constructorArg(), REALM_TYPE); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java index f59038af55af7..e348cb4d46c7e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java @@ -42,7 +42,9 @@ public void testFromXContent() throws IOException { this::createTestInstance, this::toXContent, AuthenticateResponse::fromXContent) - .supportsUnknownFields(false) + .supportsUnknownFields(true) + //metadata is a series of kv pairs, so we dont want to add random fields here for test equality + .randomFieldsExcludeFilter(f -> f.startsWith("metadata")) .test(); } From d9d13f34146a2ee29656b52a440fa3be5b8c2cf6 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 23 Jan 2019 23:41:21 -0800 Subject: [PATCH 03/20] Use project dependency instead of substitutions for distributions (#37730) Currently integration tests which use either bwc snapshot versions or the current version of elasticsearch depend on project substitutions to link to the build of those artifacts. Likewise, vagrant tests use dependency substitutions to get to bwc snapshots of rpm and debs. This commit changes those to depend on the relevant project/configuration and removes the dependency substitutions for distributions we do not publish. --- build.gradle | 21 ----------- .../gradle/test/ClusterFormationTasks.groovy | 34 +++++++++++++----- .../gradle/vagrant/VagrantTestPlugin.groovy | 35 ++++++++++++++----- .../gradle/VersionCollection.java | 20 +++++++++++ distribution/bwc/build.gradle | 31 +++++++++------- 5 files changed, 89 insertions(+), 52 deletions(-) diff --git a/build.gradle b/build.gradle index dade656f78b82..c5611e8b453fb 100644 --- a/build.gradle +++ b/build.gradle @@ -221,14 +221,6 @@ allprojects { "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${version}": ':modules:lang-painless:spi', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip', - "downloads.zip:elasticsearch:${version}": ':distribution:archives:zip', - "downloads.zip:elasticsearch-oss:${version}": ':distribution:archives:oss-zip', - "downloads.tar:elasticsearch:${version}": ':distribution:archives:tar', - "downloads.tar:elasticsearch-oss:${version}": ':distribution:archives:oss-tar', - "downloads.rpm:elasticsearch:${version}": ':distribution:packages:rpm', - "downloads.rpm:elasticsearch-oss:${version}": ':distribution:packages:oss-rpm', - "downloads.deb:elasticsearch:${version}": ':distribution:packages:deb', - "downloads.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', "org.elasticsearch.xpack.test:feature-aware:${version}": ':x-pack:test:feature-aware', // for transport client @@ -240,19 +232,6 @@ allprojects { "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', ] - // substitute unreleased versions with projects that check out and build locally - bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unreleasedVersion -> - Version unreleased = unreleasedVersion.version - String snapshotProject = ":distribution:bwc:${unreleasedVersion.gradleProjectName}" - ext.projectSubstitutions["downloads.deb:elasticsearch:${unreleased}"] = snapshotProject - ext.projectSubstitutions["downloads.rpm:elasticsearch:${unreleased}"] = snapshotProject - ext.projectSubstitutions["downloads.zip:elasticsearch:${unreleased}"] = snapshotProject - if (unreleased.onOrAfter('6.3.0')) { - ext.projectSubstitutions["downloads.deb:elasticsearch-oss:${unreleased}"] = snapshotProject - ext.projectSubstitutions["downloads.rpm:elasticsearch-oss:${unreleased}"] = snapshotProject - ext.projectSubstitutions["downloads.zip:elasticsearch-oss:${unreleased}"] = snapshotProject - } - } /* * Gradle only resolve project substitutions during dependency resolution but diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 72041b4e628d3..e38cb854a10b0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -23,6 +23,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension @@ -171,6 +172,12 @@ class ClusterFormationTasks { /** Adds a dependency on the given distribution */ static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { + if (distro.equals("integ-test-zip")) { + // short circuit integ test so it doesn't complicate the rest of the distribution setup below + project.dependencies.add(configuration.name, + "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${elasticsearchVersion}@zip") + return + } // TEMP HACK // The oss docs CI build overrides the distro on the command line. This hack handles backcompat until CI is updated. if (distro.equals('oss-zip')) { @@ -180,22 +187,31 @@ class ClusterFormationTasks { distro = 'default' } // END TEMP HACK - if (['integ-test-zip', 'oss', 'default'].contains(distro) == false) { + if (['oss', 'default'].contains(distro) == false) { throw new GradleException("Unknown distribution: ${distro} in project ${project.path}") } Version version = Version.fromString(elasticsearchVersion) - if (version.before('6.3.0') && distro.startsWith('oss-')) { - distro = distro.substring('oss-'.length()) - } - String group = "downloads.zip" - if (distro.equals("integ-test-zip")) { - group = "org.elasticsearch.distribution.integ-test-zip" - } + String group = "downloads.zip" // dummy group, does not matter except for integ-test-zip, it is ignored by the fake ivy repo String artifactName = 'elasticsearch' if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) { artifactName += '-oss' } - project.dependencies.add(configuration.name, "${group}:${artifactName}:${elasticsearchVersion}@zip") + String snapshotProject = distro == 'oss' ? 'oss-zip' : 'zip' + Object dependency + boolean internalBuild = project.hasProperty('bwcVersions') + VersionCollection.UnreleasedVersionInfo unreleasedInfo = null + if (project.hasProperty('bwcVersions')) { + // NOTE: leniency is needed for external plugin authors using build-tools. maybe build the version compat info into build-tools? + unreleasedInfo = project.bwcVersions.unreleasedInfo(version) + } + if (unreleasedInfo != null) { + dependency = project.dependencies.project(path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: snapshotProject) + } else if (internalBuild && elasticsearchVersion.equals(VersionProperties.elasticsearch)) { + dependency = project.dependencies.project(path: ":distribution:archives:${snapshotProject}") + } else { + dependency = "${group}:${artifactName}:${elasticsearchVersion}@zip" + } + project.dependencies.add(configuration.name, dependency) } /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index dfb825afea255..fec6b2eab7405 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -4,6 +4,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.FileContentsTask import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionCollection import org.gradle.api.* import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionAdapter @@ -184,22 +185,38 @@ class VagrantTestPlugin implements Plugin { upgradeFromVersion = Version.fromString(upgradeFromVersionRaw) } + List dependencies = new ArrayList<>() DISTRIBUTIONS.each { // Adds a dependency for the current version - project.dependencies.add(PACKAGING_CONFIGURATION, - project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) + dependencies.add(project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) } - UPGRADE_FROM_ARCHIVES.each { - // The version of elasticsearch that we upgrade *from* - project.dependencies.add(PACKAGING_CONFIGURATION, - "downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}") - if (upgradeFromVersion.onOrAfter('6.3.0')) { - project.dependencies.add(PACKAGING_CONFIGURATION, - "downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") + // The version of elasticsearch that we upgrade *from* + VersionCollection.UnreleasedVersionInfo unreleasedInfo = project.bwcVersions.unreleasedInfo(upgradeFromVersion) + if (unreleasedInfo != null) { + // handle snapshots pointing to bwc build + UPGRADE_FROM_ARCHIVES.each { + dependencies.add(project.dependencies.project( + path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: it)) + if (upgradeFromVersion.onOrAfter('6.3.0')) { + dependencies.add(project.dependencies.project( + path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: "oss-${it}")) + } + } + } else { + UPGRADE_FROM_ARCHIVES.each { + // The version of elasticsearch that we upgrade *from* + dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}") + if (upgradeFromVersion.onOrAfter('6.3.0')) { + dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") + } } } + for (Object dependency : dependencies) { + project.dependencies.add(PACKAGING_CONFIGURATION, dependency) + } + project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java index 1cf2fd9e1037c..e2af34dbabdc0 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java @@ -20,6 +20,8 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -86,6 +88,7 @@ public class VersionCollection { private final Version currentVersion; private final Map> groupByMajor; + private final Map unreleased; public class UnreleasedVersionInfo { public final Version version; @@ -129,6 +132,16 @@ protected VersionCollection(List versionLines, Version currentVersionPro assertCurrentVersionMatchesParsed(currentVersionProperty); assertNoOlderThanTwoMajors(); + + Map unreleased = new HashMap<>(); + for (Version unreleasedVersion : getUnreleased()) { + if (unreleasedVersion.equals(currentVersion)) { + continue; + } + unreleased.put(unreleasedVersion, + new UnreleasedVersionInfo(unreleasedVersion, getBranchFor(unreleasedVersion), getGradleProjectNameFor(unreleasedVersion))); + } + this.unreleased = Collections.unmodifiableMap(unreleased); } private void assertNoOlderThanTwoMajors() { @@ -150,6 +163,13 @@ private void assertCurrentVersionMatchesParsed(Version currentVersionProperty) { } } + /** + * Returns info about the unreleased version, or {@code null} if the version is released. + */ + public UnreleasedVersionInfo unreleasedInfo(Version version) { + return unreleased.get(version); + } + public void forPreviousUnreleased(Consumer consumer) { getUnreleased().stream() .filter(version -> version.equals(currentVersion) == false) diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 47f3b78c43f5d..a0acce1ef64eb 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -120,18 +120,19 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre } } - List artifactFiles = [] + Map artifactFiles = [:] List projectDirs = [] - for (String project : ['zip', 'deb', 'rpm']) { + List projects = ['zip', 'deb', 'rpm'] + for (String projectName : projects) { String baseDir = "distribution" if (bwcVersion.onOrAfter('6.3.0')) { - baseDir += project == 'zip' ? '/archives' : '/packages' + baseDir += projectName == 'zip' ? '/archives' : '/packages' // add oss variant first - projectDirs.add("${baseDir}/oss-${project}") - artifactFiles.add(file("${checkoutDir}/${baseDir}/oss-${project}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT.${project}")) + projectDirs.add("${baseDir}/oss-${projectName}") + artifactFiles.put("oss-" + projectName, file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT.${projectName}")) } - projectDirs.add("${baseDir}/${project}") - artifactFiles.add(file("${checkoutDir}/${baseDir}/${project}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT.${project}")) + projectDirs.add("${baseDir}/${projectName}") + artifactFiles.put(projectName, file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT.${projectName}")) } task buildBwcVersion(type: Exec) { @@ -187,7 +188,7 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre standardOutput = new IndentingOutputStream(System.out, bwcVersion) errorOutput = new IndentingOutputStream(System.err, bwcVersion) doLast { - List missing = artifactFiles.grep { file -> + List missing = artifactFiles.values().grep { file -> false == file.exists() } if (false == missing.empty) { @@ -197,12 +198,16 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre } } - artifacts { - for (File artifactFile : artifactFiles) { - String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' + for (e in artifactFiles) { + String projectName = e.key + File artifactFile = e.value + String artifactFileName = artifactFile.name + String artifactName = artifactFileName.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' String suffix = artifactFile.toString()[-3..-1] - 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion - } + configurations.create(projectName) + artifacts { + it.add(projectName, [file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion]) + } } // make sure no dependencies were added to assemble; we want it to be a no-op assemble.dependsOn = [] From 2908ca1b3509ed755cf9d0242d54349c60d4fa8f Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 24 Jan 2019 08:50:23 +0100 Subject: [PATCH 04/20] Fix index filtering in follow info api. (#37752) The filtering by follower index was completely broken. Also the wrong persistent tasks were selected, causing the wrong status to be reported. Closes #37738 --- .../ccr/action/TransportFollowInfoAction.java | 26 ++-- .../elasticsearch/xpack/ccr/FollowInfoIT.java | 139 ++++++++++++++++++ .../TransportFollowInfoActionTests.java | 73 +++++++++ .../TransportFollowStatsActionTests.java | 2 +- 4 files changed, 228 insertions(+), 12 deletions(-) create mode 100644 x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowInfoIT.java create mode 100644 x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java index 3e9c0ecbef881..df227639137ae 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java @@ -65,19 +65,28 @@ protected void masterOperation(FollowInfoAction.Request request, List concreteFollowerIndices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.STRICT_EXPAND_OPEN_CLOSED, request.getFollowerIndices())); + List followerInfos = getFollowInfos(concreteFollowerIndices, state); + listener.onResponse(new FollowInfoAction.Response(followerInfos)); + } + + @Override + protected ClusterBlockException checkBlock(FollowInfoAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + static List getFollowInfos(List concreteFollowerIndices, ClusterState state) { List followerInfos = new ArrayList<>(); PersistentTasksCustomMetaData persistentTasks = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); - for (IndexMetaData indexMetaData : state.metaData()) { + for (String index : concreteFollowerIndices) { + IndexMetaData indexMetaData = state.metaData().index(index); Map ccrCustomData = indexMetaData.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); if (ccrCustomData != null) { Optional result; if (persistentTasks != null) { - result = persistentTasks.taskMap().values().stream() - .map(persistentTask -> (ShardFollowTask) persistentTask.getParams()) - .filter(shardFollowTask -> concreteFollowerIndices.isEmpty() || - concreteFollowerIndices.contains(shardFollowTask.getFollowShardId().getIndexName())) + result = persistentTasks.findTasks(ShardFollowTask.NAME, task -> true).stream() + .map(task -> (ShardFollowTask) task.getParams()) + .filter(shardFollowTask -> index.equals(shardFollowTask.getFollowShardId().getIndexName())) .findAny(); } else { result = Optional.empty(); @@ -107,11 +116,6 @@ protected void masterOperation(FollowInfoAction.Request request, } } - listener.onResponse(new FollowInfoAction.Response(followerInfos)); - } - - @Override - protected ClusterBlockException checkBlock(FollowInfoAction.Request request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + return followerInfos; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowInfoIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowInfoIT.java new file mode 100644 index 0000000000000..478043a862b38 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowInfoIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.xpack.CcrSingleNodeTestCase; +import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; +import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; + +import java.util.Comparator; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.ccr.LocalIndexFollowingIT.getIndexSettings; +import static org.elasticsearch.xpack.core.ccr.action.FollowInfoAction.Response.Status; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class FollowInfoIT extends CcrSingleNodeTestCase { + + public void testFollowInfoApiFollowerIndexFiltering() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + assertAcked(client().admin().indices().prepareCreate("leader2").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader2"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + followRequest = getPutFollowRequest("leader2", "follower2"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowInfoAction.Request request = new FollowInfoAction.Request(); + request.setFollowerIndices("follower1"); + FollowInfoAction.Response response = client().execute(FollowInfoAction.INSTANCE, request).actionGet(); + assertThat(response.getFollowInfos().size(), equalTo(1)); + assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1")); + assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1")); + assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE)); + assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue()); + + request = new FollowInfoAction.Request(); + request.setFollowerIndices("follower2"); + response = client().execute(FollowInfoAction.INSTANCE, request).actionGet(); + assertThat(response.getFollowInfos().size(), equalTo(1)); + assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower2")); + assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader2")); + assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE)); + assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue()); + + request = new FollowInfoAction.Request(); + request.setFollowerIndices("_all"); + response = client().execute(FollowInfoAction.INSTANCE, request).actionGet(); + response.getFollowInfos().sort(Comparator.comparing(FollowInfoAction.Response.FollowerInfo::getFollowerIndex)); + assertThat(response.getFollowInfos().size(), equalTo(2)); + assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1")); + assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1")); + assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE)); + assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue()); + assertThat(response.getFollowInfos().get(1).getFollowerIndex(), equalTo("follower2")); + assertThat(response.getFollowInfos().get(1).getLeaderIndex(), equalTo("leader2")); + assertThat(response.getFollowInfos().get(1).getStatus(), equalTo(Status.ACTIVE)); + assertThat(response.getFollowInfos().get(1).getParameters(), notNullValue()); + + // Pause follower1 index and check the follower info api: + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + + request = new FollowInfoAction.Request(); + request.setFollowerIndices("follower1"); + response = client().execute(FollowInfoAction.INSTANCE, request).actionGet(); + assertThat(response.getFollowInfos().size(), equalTo(1)); + assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1")); + assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1")); + assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.PAUSED)); + assertThat(response.getFollowInfos().get(0).getParameters(), nullValue()); + + request = new FollowInfoAction.Request(); + request.setFollowerIndices("follower2"); + response = client().execute(FollowInfoAction.INSTANCE, request).actionGet(); + assertThat(response.getFollowInfos().size(), equalTo(1)); + assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower2")); + assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader2")); + assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE)); + assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue()); + + request = new FollowInfoAction.Request(); + request.setFollowerIndices("_all"); + response = client().execute(FollowInfoAction.INSTANCE, request).actionGet(); + response.getFollowInfos().sort(Comparator.comparing(FollowInfoAction.Response.FollowerInfo::getFollowerIndex)); + assertThat(response.getFollowInfos().size(), equalTo(2)); + assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1")); + assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1")); + assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.PAUSED)); + assertThat(response.getFollowInfos().get(0).getParameters(), nullValue()); + assertThat(response.getFollowInfos().get(1).getFollowerIndex(), equalTo("follower2")); + assertThat(response.getFollowInfos().get(1).getLeaderIndex(), equalTo("leader2")); + assertThat(response.getFollowInfos().get(1).getStatus(), equalTo(Status.ACTIVE)); + assertThat(response.getFollowInfos().get(1).getParameters(), notNullValue()); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower2")).actionGet()); + } + + public void testFollowInfoApiIndexMissing() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + assertAcked(client().admin().indices().prepareCreate("leader2").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader2"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + followRequest = getPutFollowRequest("leader2", "follower2"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowInfoAction.Request request1 = new FollowInfoAction.Request(); + request1.setFollowerIndices("follower3"); + expectThrows(IndexNotFoundException.class, () -> client().execute(FollowInfoAction.INSTANCE, request1).actionGet()); + + FollowInfoAction.Request request2 = new FollowInfoAction.Request(); + request2.setFollowerIndices("follower2", "follower3"); + expectThrows(IndexNotFoundException.class, () -> client().execute(FollowInfoAction.INSTANCE, request2).actionGet()); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower2")).actionGet()); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java new file mode 100644 index 0000000000000..4a023e6ee3ed2 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction.Response; +import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction.Response.FollowerInfo; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; + +import static org.elasticsearch.xpack.ccr.action.TransportFollowStatsActionTests.createShardFollowTask; +import static org.hamcrest.Matchers.equalTo; + +public class TransportFollowInfoActionTests extends ESTestCase { + + public void testGetFollowInfos() { + ClusterState state = createCS( + new String[] {"follower1", "follower2", "follower3", "index4"}, + new boolean[]{true, true, true, false}, + new boolean[]{true, true, false, false} + ); + List concreteIndices = Arrays.asList("follower1", "follower3"); + + List result = TransportFollowInfoAction.getFollowInfos(concreteIndices, state); + assertThat(result.size(), equalTo(2)); + assertThat(result.get(0).getFollowerIndex(), equalTo("follower1")); + assertThat(result.get(0).getStatus(), equalTo(Response.Status.ACTIVE)); + assertThat(result.get(1).getFollowerIndex(), equalTo("follower3")); + assertThat(result.get(1).getStatus(), equalTo(Response.Status.PAUSED)); + } + + private static ClusterState createCS(String[] indices, boolean[] followerIndices, boolean[] statuses) { + PersistentTasksCustomMetaData.Builder persistentTasks = PersistentTasksCustomMetaData.builder(); + MetaData.Builder mdBuilder = MetaData.builder(); + for (int i = 0; i < indices.length; i++) { + String index = indices[i]; + boolean isFollowIndex = followerIndices[i]; + boolean active = statuses[i]; + + IndexMetaData.Builder imdBuilder = IndexMetaData.builder(index) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0); + + if (isFollowIndex) { + imdBuilder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, new HashMap<>()); + if (active) { + persistentTasks.addTask(Integer.toString(i), ShardFollowTask.NAME, createShardFollowTask(index), null); + } + } + mdBuilder.put(imdBuilder); + } + + mdBuilder.putCustom(PersistentTasksCustomMetaData.TYPE, persistentTasks.build()); + return ClusterState.builder(new ClusterName("_cluster")) + .metaData(mdBuilder.build()) + .build(); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java index bc8c58f1de7de..b8f570e4ef4f6 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -44,7 +44,7 @@ public void testFindFollowerIndicesFromShardFollowTasks() { assertThat(result.size(), equalTo(0)); } - private static ShardFollowTask createShardFollowTask(String followerIndex) { + static ShardFollowTask createShardFollowTask(String followerIndex) { return new ShardFollowTask( null, new ShardId(followerIndex, "", 0), From bdef2ab8c047e31d84e7fdbe9c843e57802a6291 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 24 Jan 2019 08:57:40 +0000 Subject: [PATCH 05/20] Use m_m_nodes from Zen1 master for Zen2 bootstrap (#37701) Today we support a smooth rolling upgrade from Zen1 to Zen2 by automatically bootstrapping the cluster once all the Zen1 nodes have left, as long as the `minimum_master_nodes` count is satisfied. However this means that Zen2 nodes also require the `minimum_master_nodes` setting for this one specific and transient situation. Since nodes only perform this automatic bootstrapping if they previously belonged to a Zen1 cluster, they can keep track of the `minimum_master_nodes` setting from the previous master instead of requiring it to be set on the Zen2 node. --- .../state/TransportClusterStateAction.java | 1 + .../elasticsearch/cluster/ClusterState.java | 43 ++++++++++-- .../cluster/coordination/Coordinator.java | 4 +- .../coordination/DiscoveryUpgradeService.java | 69 +++++++++++-------- .../cluster/coordination/JoinHelper.java | 2 +- .../coordination/JoinTaskExecutor.java | 11 ++- .../discovery/zen/NodeJoinController.java | 6 +- .../discovery/zen/ZenDiscovery.java | 2 +- .../ClusterSerializationTests.java | 6 +- .../MinimumMasterNodesInClusterStateIT.java | 66 ++++++++++++++++++ .../zen/NodeJoinControllerTests.java | 2 +- .../indices/cluster/ClusterStateChanges.java | 2 +- 12 files changed, 168 insertions(+), 46 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/discovery/zen/MinimumMasterNodesInClusterStateIT.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 9fc35dc7be38a..75dc811f37db6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -127,6 +127,7 @@ private void buildResponse(final ClusterStateRequest request, ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); builder.stateUUID(currentState.stateUUID()); + builder.minimumMasterNodesOnPublishingMaster(currentState.getMinimumMasterNodesOnPublishingMaster()); if (request.nodes()) { builder.nodes(currentState.nodes()); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 8dd7291410ecc..200f5b59d5416 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -178,17 +179,19 @@ default boolean isPrivate() { private final boolean wasReadFromDiff; + private final int minimumMasterNodesOnPublishingMaster; + // built on demand private volatile RoutingNodes routingNodes; public ClusterState(long version, String stateUUID, ClusterState state) { this(state.clusterName, version, stateUUID, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), - state.customs(), false); + state.customs(), -1, false); } public ClusterState(ClusterName clusterName, long version, String stateUUID, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, - boolean wasReadFromDiff) { + int minimumMasterNodesOnPublishingMaster, boolean wasReadFromDiff) { this.version = version; this.stateUUID = stateUUID; this.clusterName = clusterName; @@ -197,6 +200,7 @@ public ClusterState(ClusterName clusterName, long version, String stateUUID, Met this.nodes = nodes; this.blocks = blocks; this.customs = customs; + this.minimumMasterNodesOnPublishingMaster = minimumMasterNodesOnPublishingMaster; this.wasReadFromDiff = wasReadFromDiff; } @@ -290,6 +294,17 @@ public Set getVotingConfigExclusions() { return coordinationMetaData().getVotingConfigExclusions(); } + /** + * The node-level `discovery.zen.minimum_master_nodes` setting on the master node that published this cluster state, for use in rolling + * upgrades from 6.x to 7.x. Once all the 6.x master-eligible nodes have left the cluster, the 7.x nodes use this value to determine how + * many master-eligible nodes must be discovered before the cluster can be bootstrapped. Note that this method returns the node-level + * value of this setting, and ignores any cluster-level override that was set via the API. Callers are expected to combine this value + * with any value set in the cluster-level settings. This should be removed once we no longer need support for {@link Version#V_6_7_0}. + */ + public int getMinimumMasterNodesOnPublishingMaster() { + return minimumMasterNodesOnPublishingMaster; + } + // Used for testing and logging to determine how this cluster state was send over the wire public boolean wasReadFromDiff() { return wasReadFromDiff; @@ -644,7 +659,7 @@ public static class Builder { private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; private boolean fromDiff; - + private int minimumMasterNodesOnPublishingMaster = -1; public Builder(ClusterState state) { this.clusterName = state.clusterName; @@ -655,6 +670,7 @@ public Builder(ClusterState state) { this.metaData = state.metaData(); this.blocks = state.blocks(); this.customs = ImmutableOpenMap.builder(state.customs()); + this.minimumMasterNodesOnPublishingMaster = state.minimumMasterNodesOnPublishingMaster; this.fromDiff = false; } @@ -715,6 +731,11 @@ public Builder stateUUID(String uuid) { return this; } + public Builder minimumMasterNodesOnPublishingMaster(int minimumMasterNodesOnPublishingMaster) { + this.minimumMasterNodesOnPublishingMaster = minimumMasterNodesOnPublishingMaster; + return this; + } + public Builder putCustom(String type, Custom custom) { customs.put(type, custom); return this; @@ -739,7 +760,8 @@ public ClusterState build() { if (UNKNOWN_UUID.equals(uuid)) { uuid = UUIDs.randomBase64UUID(); } - return new ClusterState(clusterName, version, uuid, metaData, routingTable, nodes, blocks, customs.build(), fromDiff); + return new ClusterState(clusterName, version, uuid, metaData, routingTable, nodes, blocks, customs.build(), + minimumMasterNodesOnPublishingMaster, fromDiff); } public static byte[] toBytes(ClusterState state) throws IOException { @@ -782,6 +804,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr Custom customIndexMetaData = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData); } + builder.minimumMasterNodesOnPublishingMaster = in.getVersion().onOrAfter(Version.V_7_0_0) ? in.readVInt() : -1; return builder.build(); } @@ -807,6 +830,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(cursor.value); } } + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeVInt(minimumMasterNodesOnPublishingMaster); + } } private static class ClusterStateDiff implements Diff { @@ -829,6 +855,8 @@ private static class ClusterStateDiff implements Diff { private final Diff> customs; + private final int minimumMasterNodesOnPublishingMaster; + ClusterStateDiff(ClusterState before, ClusterState after) { fromUuid = before.stateUUID; toUuid = after.stateUUID; @@ -839,6 +867,7 @@ private static class ClusterStateDiff implements Diff { metaData = after.metaData.diff(before.metaData); blocks = after.blocks.diff(before.blocks); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); + minimumMasterNodesOnPublishingMaster = after.minimumMasterNodesOnPublishingMaster; } ClusterStateDiff(StreamInput in, DiscoveryNode localNode) throws IOException { @@ -851,6 +880,7 @@ private static class ClusterStateDiff implements Diff { metaData = MetaData.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); + minimumMasterNodesOnPublishingMaster = in.getVersion().onOrAfter(Version.V_7_0_0) ? in.readVInt() : -1; } @Override @@ -864,6 +894,9 @@ public void writeTo(StreamOutput out) throws IOException { metaData.writeTo(out); blocks.writeTo(out); customs.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeVInt(minimumMasterNodesOnPublishingMaster); + } } @Override @@ -883,9 +916,9 @@ public ClusterState apply(ClusterState state) { builder.metaData(metaData.apply(state.metaData)); builder.blocks(blocks.apply(state.blocks)); builder.customs(customs.apply(state.customs)); + builder.minimumMasterNodesOnPublishingMaster(minimumMasterNodesOnPublishingMaster); builder.fromDiff(true); return builder.build(); } - } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 4a018c1f78f91..a4e1d3ed8c990 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -168,7 +168,7 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.reconfigurator = new Reconfigurator(settings, clusterSettings); this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService, this::getFoundPeers, this::isInitialConfigurationSet, this::setInitialConfiguration); - this.discoveryUpgradeService = new DiscoveryUpgradeService(settings, clusterSettings, transportService, + this.discoveryUpgradeService = new DiscoveryUpgradeService(settings, transportService, this::isInitialConfigurationSet, joinHelper, peerFinder::getFoundPeers, this::setInitialConfiguration); this.lagDetector = new LagDetector(settings, transportService.getThreadPool(), n -> removeNode(n, "lagging"), transportService::getLocalNode); @@ -467,7 +467,7 @@ void becomeCandidate(String method) { clusterFormationFailureHelper.start(); if (getCurrentTerm() == ZEN1_BWC_TERM) { - discoveryUpgradeService.activate(lastKnownLeader); + discoveryUpgradeService.activate(lastKnownLeader, coordinationState.get().getLastAcceptedState()); } leaderChecker.setCurrentNodes(DiscoveryNodes.EMPTY_NODES); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeService.java index 496adb65bb6f0..56102704848c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeService.java @@ -24,11 +24,11 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -60,6 +60,7 @@ import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING; import static org.elasticsearch.cluster.ClusterState.UNKNOWN_VERSION; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; +import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.discovery.zen.ZenDiscovery.PING_TIMEOUT_SETTING; /** @@ -80,7 +81,12 @@ public class DiscoveryUpgradeService { public static final Setting ENABLE_UNSAFE_BOOTSTRAPPING_ON_UPGRADE_SETTING = Setting.boolSetting("discovery.zen.unsafe_rolling_upgrades_enabled", true, Setting.Property.NodeScope); - private final ElectMasterService electMasterService; + /** + * Dummy {@link ElectMasterService} that is only used to choose the best 6.x master from the discovered nodes, ignoring the + * `minimum_master_nodes` setting. + */ + private static final ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY); + private final TransportService transportService; private final BooleanSupplier isBootstrappedSupplier; private final JoinHelper joinHelper; @@ -93,12 +99,11 @@ public class DiscoveryUpgradeService { @Nullable // null if no active joining round private volatile JoiningRound joiningRound; - public DiscoveryUpgradeService(Settings settings, ClusterSettings clusterSettings, TransportService transportService, + public DiscoveryUpgradeService(Settings settings, TransportService transportService, BooleanSupplier isBootstrappedSupplier, JoinHelper joinHelper, Supplier> peersSupplier, Consumer initialConfigurationConsumer) { assert Version.CURRENT.major == Version.V_6_6_0.major + 1 : "remove this service once unsafe upgrades are no longer needed"; - electMasterService = new ElectMasterService(settings); this.transportService = transportService; this.isBootstrappedSupplier = isBootstrappedSupplier; this.joinHelper = joinHelper; @@ -107,12 +112,9 @@ public DiscoveryUpgradeService(Settings settings, ClusterSettings clusterSetting this.bwcPingTimeout = BWC_PING_TIMEOUT_SETTING.get(settings); this.enableUnsafeBootstrappingOnUpgrade = ENABLE_UNSAFE_BOOTSTRAPPING_ON_UPGRADE_SETTING.get(settings); this.clusterName = CLUSTER_NAME_SETTING.get(settings); - - clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, - electMasterService::minimumMasterNodes); // TODO reject update if the new value is too large } - public void activate(Optional lastKnownLeader) { + public void activate(Optional lastKnownLeader, ClusterState lastAcceptedClusterState) { // called under coordinator mutex if (isBootstrappedSupplier.getAsBoolean()) { @@ -122,8 +124,13 @@ public void activate(Optional lastKnownLeader) { assert lastKnownLeader.isPresent() == false || Coordinator.isZen1Node(lastKnownLeader.get()) : lastKnownLeader; // if there was a leader and it's not a old node then we must have been bootstrapped + final Settings dynamicSettings = lastAcceptedClusterState.metaData().settings(); + final int minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(dynamicSettings) + ? DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(dynamicSettings) + : lastAcceptedClusterState.getMinimumMasterNodesOnPublishingMaster(); + assert joiningRound == null : joiningRound; - joiningRound = new JoiningRound(lastKnownLeader.isPresent()); + joiningRound = new JoiningRound(enableUnsafeBootstrappingOnUpgrade && lastKnownLeader.isPresent(), minimumMasterNodes); joiningRound.scheduleNextAttempt(); } @@ -160,15 +167,21 @@ void countDown() { private class JoiningRound { private final boolean upgrading; + private final int minimumMasterNodes; - JoiningRound(boolean upgrading) { + JoiningRound(boolean upgrading, int minimumMasterNodes) { this.upgrading = upgrading; + this.minimumMasterNodes = minimumMasterNodes; } private boolean isRunning() { return joiningRound == this && isBootstrappedSupplier.getAsBoolean() == false; } + private boolean canBootstrap(Set discoveryNodes) { + return upgrading && minimumMasterNodes <= discoveryNodes.stream().filter(DiscoveryNode::isMasterNode).count(); + } + void scheduleNextAttempt() { if (isRunning() == false) { return; @@ -189,26 +202,22 @@ public void run() { // this set of nodes is reasonably fresh - the PeerFinder cleans up nodes to which the transport service is not // connected each time it wakes up (every second by default) - logger.debug("nodes: {}", discoveryNodes); - - if (electMasterService.hasEnoughMasterNodes(discoveryNodes)) { - if (discoveryNodes.stream().anyMatch(Coordinator::isZen1Node)) { - electBestOldMaster(discoveryNodes); - } else if (upgrading && enableUnsafeBootstrappingOnUpgrade) { - // no Zen1 nodes found, but the last-known master was a Zen1 node, so this is a rolling upgrade - transportService.getThreadPool().generic().execute(() -> { - try { - initialConfigurationConsumer.accept(new VotingConfiguration(discoveryNodes.stream() - .map(DiscoveryNode::getId).collect(Collectors.toSet()))); - } catch (Exception e) { - logger.debug("exception during bootstrapping upgrade, retrying", e); - } finally { - scheduleNextAttempt(); - } - }); - } else { - scheduleNextAttempt(); - } + logger.debug("upgrading={}, minimumMasterNodes={}, nodes={}", upgrading, minimumMasterNodes, discoveryNodes); + + if (discoveryNodes.stream().anyMatch(Coordinator::isZen1Node)) { + electBestOldMaster(discoveryNodes); + } else if (canBootstrap(discoveryNodes)) { + // no Zen1 nodes found, but the last-known master was a Zen1 node, so this is a rolling upgrade + transportService.getThreadPool().generic().execute(() -> { + try { + initialConfigurationConsumer.accept(new VotingConfiguration(discoveryNodes.stream() + .map(DiscoveryNode::getId).collect(Collectors.toSet()))); + } catch (Exception e) { + logger.debug("exception during bootstrapping upgrade, retrying", e); + } finally { + scheduleNextAttempt(); + } + }); } else { scheduleNextAttempt(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index 8c41d7b2eaa52..53fada396fcef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -90,7 +90,7 @@ public JoinHelper(Settings settings, AllocationService allocationService, Master this.masterService = masterService; this.transportService = transportService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); - this.joinTaskExecutor = new JoinTaskExecutor(allocationService, logger) { + this.joinTaskExecutor = new JoinTaskExecutor(settings, allocationService, logger) { @Override public ClusterTasksResult execute(ClusterState currentState, List joiningTasks) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index b754e50a945c1..2dcc1022f8d46 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -29,7 +29,9 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.ArrayList; @@ -46,6 +48,8 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor secondThirdNodes = internalCluster().startNodes(2); + assertThat(internalCluster().getMasterName(), equalTo(firstNode)); + + final List allNodes = Stream.concat(Stream.of(firstNode), secondThirdNodes.stream()).collect(Collectors.toList()); + for (final String node : allNodes) { + final ClusterState localState = client(node).admin().cluster().state(new ClusterStateRequest().local(true)).get().getState(); + assertThat(localState.getMinimumMasterNodesOnPublishingMaster(), equalTo(1)); + assertThat(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(localState.metaData().settings()), equalTo(2)); + } + + internalCluster().stopRandomNode(nameFilter(firstNode)); + assertThat(internalCluster().getMasterName(), isIn(secondThirdNodes)); + + for (final String node : secondThirdNodes) { + final ClusterState localState = client(node).admin().cluster().state(new ClusterStateRequest().local(true)).get().getState(); + assertThat(localState.getMinimumMasterNodesOnPublishingMaster(), equalTo(2)); + assertThat(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(localState.metaData().settings()), equalTo(2)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index a3ae6b07b19c9..35a2173e0aea0 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -141,7 +141,7 @@ private void setupMasterServiceAndNodeJoinController(ClusterState initialState) throw new IllegalStateException("method setupMasterServiceAndNodeJoinController can only be called once"); } masterService = ClusterServiceUtils.createMasterService(threadPool, initialState); - nodeJoinController = new NodeJoinController(masterService, createAllocationService(Settings.EMPTY), + nodeJoinController = new NodeJoinController(Settings.EMPTY, masterService, createAllocationService(Settings.EMPTY), new ElectMasterService(Settings.EMPTY)); } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 387ba1c3d9653..8a00be28f5eb2 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -213,7 +213,7 @@ allocationService, new AliasValidator(), environment, transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver); nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); - joinTaskExecutor = new JoinTaskExecutor(allocationService, logger); + joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger); } public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { From 36889e8a2f8320a4e8a7528b7247f087346b1a85 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 24 Jan 2019 10:11:18 +0100 Subject: [PATCH 06/20] Remove Custom Listeners from SnapshotsService (#37629) * Remove Custom Listeners from SnapshotsService Motivations: * Shorten the code some more * Use ActionListener#wrap to get easy to reason about behavior in failure scenarios * Remove duplication in the logic of handling snapshot completion listeners (listeners removing themselves and comparing snapshots to their targets) * Also here, move all listener handling into `SnapshotsService` and remove custom listener class by putting listeners in a map --- .../create/TransportCreateSnapshotAction.java | 42 +---- .../delete/TransportDeleteSnapshotAction.java | 13 +- .../snapshots/SnapshotsService.java | 178 ++++++------------ 3 files changed, 72 insertions(+), 161 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index abd50f0785a83..fe4089df945ac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.snapshots.Snapshot; -import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -72,37 +70,13 @@ protected ClusterBlockException checkBlock(CreateSnapshotRequest request, Cluste @Override protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, - final ActionListener listener) { - snapshotsService.createSnapshot(request, new SnapshotsService.CreateSnapshotListener() { - @Override - public void onResponse(Snapshot snapshotCreated) { - if (request.waitForCompletion()) { - snapshotsService.addListener(new SnapshotsService.SnapshotCompletionListener() { - @Override - public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) { - if (snapshotCreated.equals(snapshot)) { - listener.onResponse(new CreateSnapshotResponse(snapshotInfo)); - snapshotsService.removeListener(this); - } - } - - @Override - public void onSnapshotFailure(Snapshot snapshot, Exception e) { - if (snapshotCreated.equals(snapshot)) { - listener.onFailure(e); - snapshotsService.removeListener(this); - } - } - }); - } else { - listener.onResponse(new CreateSnapshotResponse()); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + final ActionListener listener) { + if (request.waitForCompletion()) { + snapshotsService.executeSnapshot(request, + ActionListener.wrap(snapshotInfo-> listener.onResponse(new CreateSnapshotResponse(snapshotInfo)), listener::onFailure)); + } else { + snapshotsService.createSnapshot(request, + ActionListener.wrap(snapshot -> listener.onResponse(new CreateSnapshotResponse()), listener::onFailure)); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index e53330349b3bb..dfb38aaed2106 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -67,16 +67,7 @@ protected ClusterBlockException checkBlock(DeleteSnapshotRequest request, Cluste @Override protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener listener) { - snapshotsService.deleteSnapshot(request.repository(), request.snapshot(), new SnapshotsService.DeleteSnapshotListener() { - @Override - public void onResponse() { - listener.onResponse(new AcknowledgedResponse(true)); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }, false); + snapshotsService.deleteSnapshot(request.repository(), request.snapshot(), + ActionListener.wrap(v -> listener.onResponse(new AcknowledgedResponse(true)), listener::onFailure), false); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 4e8c26ea593d6..af6d7055e533a 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -80,6 +80,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.stream.Collectors; @@ -91,10 +92,10 @@ *

* A typical snapshot creating process looks like this: *

    - *
  • On the master node the {@link #createSnapshot(CreateSnapshotRequest, CreateSnapshotListener)} is called and makes sure that + *
  • On the master node the {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} is called and makes sure that * no snapshot is currently running and registers the new snapshot in cluster state
  • *
  • When cluster state is updated - * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, CreateSnapshotListener)} method kicks in and initializes + * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, ActionListener)} method kicks in and initializes * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state
  • *
  • Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method
  • @@ -118,7 +119,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final ThreadPool threadPool; - private final CopyOnWriteArrayList snapshotCompletionListeners = new CopyOnWriteArrayList<>(); + private final Map>> snapshotCompletionListeners = new ConcurrentHashMap<>(); @Inject public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, @@ -225,6 +226,17 @@ public List currentSnapshots(final String repositoryName) { return Collections.unmodifiableList(snapshotList); } + /** + * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of + * the snapshot. + * + * @param request snapshot request + * @param listener snapshot completion listener + */ + public void executeSnapshot(final CreateSnapshotRequest request, final ActionListener listener) { + createSnapshot(request, ActionListener.wrap(snapshot -> addListener(snapshot, listener), listener::onFailure)); + } + /** * Initializes the snapshotting process. *

    @@ -234,7 +246,7 @@ public List currentSnapshots(final String repositoryName) { * @param request snapshot request * @param listener snapshot creation listener */ - public void createSnapshot(final CreateSnapshotRequest request, final CreateSnapshotListener listener) { + public void createSnapshot(final CreateSnapshotRequest request, final ActionListener listener) { final String repositoryName = request.repository(); final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); validate(repositoryName, snapshotName); @@ -351,7 +363,7 @@ private static void validate(final String repositoryName, final String snapshotN private void beginSnapshot(final ClusterState clusterState, final SnapshotsInProgress.Entry snapshot, final boolean partial, - final CreateSnapshotListener userCreateSnapshotListener) { + final ActionListener userCreateSnapshotListener) { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { boolean snapshotCreated; @@ -491,11 +503,11 @@ private class CleanupAfterErrorListener implements ActionListener private final SnapshotsInProgress.Entry snapshot; private final boolean snapshotCreated; - private final CreateSnapshotListener userCreateSnapshotListener; + private final ActionListener userCreateSnapshotListener; private final Exception e; CleanupAfterErrorListener(SnapshotsInProgress.Entry snapshot, boolean snapshotCreated, - CreateSnapshotListener userCreateSnapshotListener, Exception e) { + ActionListener userCreateSnapshotListener, Exception e) { this.snapshot = snapshot; this.snapshotCreated = snapshotCreated; this.userCreateSnapshotListener = userCreateSnapshotListener; @@ -781,9 +793,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { entries.add(updatedSnapshot); // Clean up the snapshot that failed to start from the old master - deleteSnapshot(snapshot.snapshot(), new DeleteSnapshotListener() { + deleteSnapshot(snapshot.snapshot(), new ActionListener() { @Override - public void onResponse() { + public void onResponse(Void aVoid) { logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshot()); } @@ -1077,15 +1089,16 @@ public void onNoLongerMaster(String source) { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - for (SnapshotCompletionListener listener : snapshotCompletionListeners) { + final List> completionListeners = snapshotCompletionListeners.remove(snapshot); + if (completionListeners != null) { try { - if (snapshotInfo != null) { - listener.onSnapshotCompletion(snapshot, snapshotInfo); + if (snapshotInfo == null) { + ActionListener.onFailure(completionListeners, failure); } else { - listener.onSnapshotFailure(snapshot, failure); + ActionListener.onResponse(completionListeners, snapshotInfo); } - } catch (Exception t) { - logger.warn(() -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); + } catch (Exception e) { + logger.warn("Failed to notify listeners", e); } } if (listener != null) { @@ -1103,7 +1116,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS * @param snapshotName snapshotName * @param listener listener */ - public void deleteSnapshot(final String repositoryName, final String snapshotName, final DeleteSnapshotListener listener, + public void deleteSnapshot(final String repositoryName, final String snapshotName, final ActionListener listener, final boolean immediatePriority) { // First, look for the snapshot in the repository final Repository repository = repositoriesService.repository(repositoryName); @@ -1143,7 +1156,7 @@ public void deleteSnapshot(final String repositoryName, final String snapshotNam * @param listener listener * @param repositoryStateId the unique id for the state of the repository */ - private void deleteSnapshot(final Snapshot snapshot, final DeleteSnapshotListener listener, final long repositoryStateId, + private void deleteSnapshot(final Snapshot snapshot, final ActionListener listener, final long repositoryStateId, final boolean immediatePriority) { Priority priority = immediatePriority ? Priority.IMMEDIATE : Priority.NORMAL; clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask(priority) { @@ -1234,8 +1247,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } } SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards); - snapshots = new SnapshotsInProgress(newSnapshot); - clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, snapshots); + clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(newSnapshot)); } return clusterStateBuilder.build(); } @@ -1249,50 +1261,34 @@ public void onFailure(String source, Exception e) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { if (waitForSnapshot) { logger.trace("adding snapshot completion listener to wait for deleted snapshot to finish"); - addListener(new SnapshotCompletionListener() { - @Override - public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapshotInfo) { - if (completedSnapshot.equals(snapshot)) { - logger.debug("deleted snapshot completed - deleting files"); - removeListener(this); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { - try { - deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), - listener, true); - - } catch (Exception ex) { - logger.warn(() -> - new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); - } - } - ); - } - } - - @Override - public void onSnapshotFailure(Snapshot failedSnapshot, Exception e) { - if (failedSnapshot.equals(snapshot)) { - logger.warn("deleted snapshot failed - deleting files", e); - removeListener(this); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + addListener(snapshot, ActionListener.wrap( + snapshotInfo -> { + logger.debug("deleted snapshot completed - deleting files"); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { try { - deleteSnapshot(failedSnapshot.getRepository(), - failedSnapshot.getSnapshotId().getName(), - listener, - true); - } catch (SnapshotMissingException smex) { - logger.info(() -> new ParameterizedMessage( - "Tried deleting in-progress snapshot [{}], but it " + - "could not be found after failing to abort.", - smex.getSnapshotName()), e); - listener.onFailure(new SnapshotException(snapshot, - "Tried deleting in-progress snapshot [" + smex.getSnapshotName() + "], but it " + - "could not be found after failing to abort.", smex)); + deleteSnapshot(snapshot.getRepository(), snapshot.getSnapshotId().getName(), listener, true); + } catch (Exception ex) { + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); } - }); - } + } + ); + }, + e -> { + logger.warn("deleted snapshot failed - deleting files", e); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + try { + deleteSnapshot(snapshot.getRepository(), snapshot.getSnapshotId().getName(), listener, true); + } catch (SnapshotMissingException smex) { + logger.info(() -> new ParameterizedMessage( + "Tried deleting in-progress snapshot [{}], but it could not be found after failing to abort.", + smex.getSnapshotName()), e); + listener.onFailure(new SnapshotException(snapshot, + "Tried deleting in-progress snapshot [" + smex.getSnapshotName() + "], but it " + + "could not be found after failing to abort.", smex)); + } + }); } - }); + )); } else { logger.debug("deleted snapshot is not running - deleting files"); deleteSnapshotFromRepository(snapshot, listener, repositoryStateId); @@ -1335,8 +1331,7 @@ public static boolean isRepositoryInUse(ClusterState clusterState, String reposi * @param listener listener * @param repositoryStateId the unique id representing the state of the repository at the time the deletion began */ - private void deleteSnapshotFromRepository(final Snapshot snapshot, @Nullable final DeleteSnapshotListener listener, - long repositoryStateId) { + private void deleteSnapshotFromRepository(Snapshot snapshot, @Nullable ActionListener listener, long repositoryStateId) { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { try { Repository repository = repositoriesService.repository(snapshot.getRepository()); @@ -1354,7 +1349,7 @@ private void deleteSnapshotFromRepository(final Snapshot snapshot, @Nullable fin * Removes the snapshot deletion from {@link SnapshotDeletionsInProgress} in the cluster state. */ private void removeSnapshotDeletionFromClusterState(final Snapshot snapshot, @Nullable final Exception failure, - @Nullable final DeleteSnapshotListener listener) { + @Nullable final ActionListener listener) { clusterService.submitStateUpdateTask("remove snapshot deletion metadata", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { @@ -1388,7 +1383,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS if (failure != null) { listener.onFailure(failure); } else { - listener.onResponse(); + listener.onResponse(null); } } } @@ -1508,19 +1503,11 @@ private static Set indicesToFailForCloseOrDeletion(ClusterState currentSt /** * Adds snapshot completion listener * + * @param snapshot Snapshot to listen for * @param listener listener */ - public void addListener(SnapshotCompletionListener listener) { - this.snapshotCompletionListeners.add(listener); - } - - /** - * Removes snapshot completion listener - * - * @param listener listener - */ - public void removeListener(SnapshotCompletionListener listener) { - this.snapshotCompletionListeners.remove(listener); + private void addListener(Snapshot snapshot, ActionListener listener) { + snapshotCompletionListeners.computeIfAbsent(snapshot, k -> new CopyOnWriteArrayList<>()).add(listener); } @Override @@ -1541,45 +1528,4 @@ protected void doClose() { public RepositoriesService getRepositoriesService() { return repositoriesService; } - - /** - * Listener for create snapshot operation - */ - public interface CreateSnapshotListener { - - /** - * Called when snapshot has successfully started - * - * @param snapshot snapshot that was created - */ - void onResponse(Snapshot snapshot); - - /** - * Called if a snapshot operation couldn't start - */ - void onFailure(Exception e); - } - - /** - * Listener for delete snapshot operation - */ - public interface DeleteSnapshotListener { - - /** - * Called if delete operation was successful - */ - void onResponse(); - - /** - * Called if delete operation failed - */ - void onFailure(Exception e); - } - - public interface SnapshotCompletionListener { - - void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo); - - void onSnapshotFailure(Snapshot snapshot, Exception e); - } } From e1226f69b7648dc50204afd641969a3b9f283a83 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Jan 2019 09:18:48 +0000 Subject: [PATCH 07/20] [ML] Increase close job timeout and lower the max number (#37770) --- .../org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java | 4 ++-- .../elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java index c4150d633a8f0..f79e9f1e4e945 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java @@ -124,11 +124,11 @@ public void testLazyNodeValidation() throws Exception { } public void testSingleNode() throws Exception { - verifyMaxNumberOfJobsLimit(1, randomIntBetween(1, 100)); + verifyMaxNumberOfJobsLimit(1, randomIntBetween(1, 20)); } public void testMultipleNodes() throws Exception { - verifyMaxNumberOfJobsLimit(3, randomIntBetween(1, 100)); + verifyMaxNumberOfJobsLimit(3, randomIntBetween(1, 20)); } private void verifyMaxNumberOfJobsLimit(int numNodes, int maxNumberOfJobsPerNode) throws Exception { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 44cdd2434aec7..e5627fdd41b81 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -318,7 +318,7 @@ public static void deleteAllJobs(Logger logger, Client client) throws Exception try { CloseJobAction.Request closeRequest = new CloseJobAction.Request(MetaData.ALL); - closeRequest.setCloseTimeout(TimeValue.timeValueSeconds(20L)); + closeRequest.setCloseTimeout(TimeValue.timeValueSeconds(30L)); logger.info("Closing jobs using [{}]", MetaData.ALL); CloseJobAction.Response response = client.execute(CloseJobAction.INSTANCE, closeRequest) .get(); @@ -327,7 +327,7 @@ public static void deleteAllJobs(Logger logger, Client client) throws Exception try { CloseJobAction.Request closeRequest = new CloseJobAction.Request(MetaData.ALL); closeRequest.setForce(true); - closeRequest.setCloseTimeout(TimeValue.timeValueSeconds(20L)); + closeRequest.setCloseTimeout(TimeValue.timeValueSeconds(30L)); CloseJobAction.Response response = client.execute(CloseJobAction.INSTANCE, closeRequest).get(); assertTrue(response.isClosed()); From f12bfb4684a1ec9cfc4d99f5e7becaaa920022c6 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 24 Jan 2019 09:58:50 +0000 Subject: [PATCH 08/20] Mute FollowerFailOverIT testReadRequestsReturnsLatestMappingVersion Due to https://github.com/elastic/elasticsearch/issues/37807 --- .../java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index f03eeaaa03648..b49e7c9ced7ce 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -230,6 +230,7 @@ public void testAddNewReplicasOnFollower() throws Exception { pauseFollow("follower-index"); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37807") public void testReadRequestsReturnsLatestMappingVersion() throws Exception { InternalTestCluster leaderCluster = getLeaderCluster(); Settings nodeAttributes = Settings.builder().put("node.attr.box", "large").build(); From b6936e3c1e00af50393c9be15f966cde333f46f4 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Thu, 24 Jan 2019 12:36:10 +0200 Subject: [PATCH 09/20] Remove index audit output type (#37707) This commit removes the Index Audit Output type, following its deprecation in 6.7 by 8765a31d4e6770. It also adds the migration notice (settings notice). In general, the problem with the index audit output is that event indexing can be slower than the rate with which audit events are generated, especially during the daily rollovers or the rolling cluster upgrades. In this situation audit events will be lost which is a terrible failure situation for an audit system. Besides of the settings under the `xpack.security.audit.index` namespace, the `xpack.security.audit.outputs` setting has also been deprecated and will be removed in 7. Although explicitly configuring the logfile output does not touch any deprecation bits, this setting is made redundant in 7 so this PR deprecates it as well. Relates #29881 --- .../migration/migrate_7_0/settings.asciidoc | 15 + .../settings/audit-settings.asciidoc | 126 +- .../settings/security-settings.asciidoc | 10 - .../auditing/auditing-search-queries.asciidoc | 23 +- .../en/security/auditing/event-types.asciidoc | 9 +- .../auditing/forwarding-logs.asciidoc | 99 -- .../docs/en/security/auditing/index.asciidoc | 6 - .../security/auditing/output-index.asciidoc | 49 - .../en/security/auditing/overview.asciidoc | 25 +- .../docs/en/security/configuring-es.asciidoc | 5 +- .../xpack/security/Security.java | 163 +-- .../xpack/security/SecurityFeatureSet.java | 7 +- .../security/audit/index/IndexAuditTrail.java | 1240 ----------------- .../security/SecurityFeatureSetTests.java | 13 +- .../xpack/security/SecuritySettingsTests.java | 51 - .../xpack/security/SecurityTests.java | 32 - .../security/audit/index/AuditTrailTests.java | 195 --- .../index/IndexAuditTrailMutedTests.java | 324 ----- .../audit/index/IndexAuditTrailTests.java | 996 ------------- .../RemoteIndexAuditTrailStartingTests.java | 169 --- .../AuditTrailSettingsUpdateTests.java | 1 - x-pack/plugin/sql/qa/security/build.gradle | 2 - x-pack/qa/audit-tests/build.gradle | 40 - .../xpack/security/audit/IndexAuditIT.java | 226 --- x-pack/qa/rolling-upgrade/build.gradle | 2 - .../upgrades/IndexAuditUpgradeIT.java | 97 -- 26 files changed, 51 insertions(+), 3874 deletions(-) delete mode 100644 x-pack/docs/en/security/auditing/forwarding-logs.asciidoc delete mode 100644 x-pack/docs/en/security/auditing/output-index.asciidoc delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecuritySettingsTests.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java delete mode 100644 x-pack/qa/audit-tests/build.gradle delete mode 100644 x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java delete mode 100644 x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index b5ae85eb7dff6..a005f80c1663a 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -154,3 +154,18 @@ node's keystore, i.e., they are not to be specified via the cluster settings API `xpack.notification.pagerduty.account..secure_service_api_key` - `xpack.notification.slack.account..url`, instead use `xpack.notification.slack.account..secure_url` + +[float] +[[remove-audit-index-output]] +==== Audit index output type removed + +All the settings under the `xpack.security.audit.index` namespace have been +removed. In addition, the `xpack.security.audit.outputs` setting has been +removed as well. + +These settings enabled and configured the audit index output type. This output +type has been removed because it was unreliable in certain scenarios and this +could have lead to dropping audit events while the operations on the system +were allowed to continue as usual. The recommended replacement is the +use of the `logfile` audit output type and using other components from the +Elastic Stack to handle the indexing part. diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index d551516984052..483c889ce5898 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -14,38 +14,19 @@ file. For more information, see `xpack.security.audit.enabled`:: Set to `true` to enable auditing on the node. The default value is `false`. - -`xpack.security.audit.outputs`:: -Specifies where audit logs are output. For example: `[ index, logfile ]`. The -default value is `logfile`, which puts the auditing events in a dedicated -file named `_audit.log` on each node. -You can also specify `index`, which puts the auditing events in an {es} index -that is prefixed with `.security_audit_log`. The index can reside on the same -cluster or a separate cluster. -+ -For backwards compatibility reasons, if you use the logfile output type, a -`_access.log` file is also created. It contains the same -information, but it uses the older (pre-6.5.0) formatting style. -If the backwards compatible format is not required, it should be disabled. -To do that, change its logger level to `off` in the `log4j2.properties` file. -For more information, see <>. -+ --- -TIP: If the index is unavailable, it is possible for auditing events to -be lost. The `index` output type should therefore be used in conjunction with -the `logfile` output type and the latter should be the official record of events. - --- +This puts the auditing events in a dedicated file named `_audit.log` +on each node. For more information, see <>. [[event-audit-settings]] ==== Audited Event Settings -The events and some other information about what gets logged can be -controlled by using the following settings: +The events and some other information about what gets logged can be controlled +by using the following settings: `xpack.security.audit.logfile.events.include`:: Specifies which events to include in the auditing output. The default value is: -`access_denied, access_granted, anonymous_access_denied, authentication_failed, connection_denied, tampered_request, run_as_denied, run_as_granted`. +`access_denied, access_granted, anonymous_access_denied, authentication_failed, +connection_denied, tampered_request, run_as_denied, run_as_granted`. `xpack.security.audit.logfile.events.exclude`:: Excludes the specified events from the output. By default, no events are @@ -113,98 +94,3 @@ A list of index names or wildcards. The specified policy will not print audit events when all the indices in the event match these values. If the event concerns several indices, some of which are *not* covered by the policy, the policy will *not* cover this event. - -[[index-audit-settings]] -==== Audit Log Indexing Configuration Settings - -`xpack.security.audit.index.bulk_size`:: -Controls how many audit events are batched into a single write. The default -value is `1000`. - -`xpack.security.audit.index.flush_interval`:: -Controls how often buffered events are flushed to the index. The default value -is `1s`. - -`xpack.security.audit.index.rollover`:: -Controls how often to roll over to a new index: `hourly`, `daily`, `weekly`, or -`monthly`. The default value is `daily`. - -`xpack.security.audit.index.events.include`:: -Specifies the audit events to be indexed. The default value is -`anonymous_access_denied, authentication_failed, realm_authentication_failed, access_granted, access_denied, tampered_request, connection_granted, connection_denied, run_as_granted, run_as_denied`. -See {xpack-ref}/audit-event-types.html[Audit Entry Types] for the -complete list. - -`xpack.security.audit.index.events.exclude`:: -Excludes the specified auditing events from indexing. By default, no events are -excluded. - -`xpack.security.audit.index.events.emit_request_body`:: -Specifies whether to include the request body from REST requests on certain -event types such as `authentication_failed`. The default value is `false`. - -`xpack.security.audit.index.settings`:: -Specifies settings for the indices that the events are stored in. For example, -the following configuration sets the number of shards and replicas to 1 for the -audit indices: -+ --- -[source,yaml] ----------------------------- -xpack.security.audit.index.settings: - index: - number_of_shards: 1 - number_of_replicas: 1 ----------------------------- --- -+ --- -NOTE: These settings apply to the local audit indices, as well as to the -<>, but only if the remote cluster -does *not* have {security-features} enabled or the {es} versions are different. -If the remote cluster has {security-features} enabled and the versions coincide, -the settings for the audit indices there will take precedence, -even if they are unspecified (i.e. left to defaults). --- - -[[remote-audit-settings]] -==== Remote Audit Log Indexing Configuration Settings - -To index audit events to a remote {es} cluster, you configure the following -`xpack.security.audit.index.client` settings: - -`xpack.security.audit.index.client.hosts`:: -Specifies a comma-separated list of `host:port` pairs. These hosts should be -nodes in the remote cluster. If you are using default values for the -<> setting, you can omit the -`port` value. Otherwise, it must match the `transport.port` setting. - -`xpack.security.audit.index.client.cluster.name`:: -Specifies the name of the remote cluster. - -`xpack.security.audit.index.client.xpack.security.user`:: -Specifies the `username:password` pair that is used to authenticate with the -remote cluster. This user must have authority to create the `.security-audit` -index on the remote cluster. - -If the remote {es} cluster has Transport Layer Security (TLS/SSL) enabled, you -must set the following setting to `true`: - -`xpack.security.audit.index.client.xpack.security.transport.ssl.enabled`:: -Used to enable or disable TLS/SSL for the transport client that forwards audit -logs to the remote cluster. The default is `false`. - -You must also specify the information necessary to access certificates. See -<>. - -You can pass additional settings to the remote client by specifying them in the -`xpack.security.audit.index.client` namespace. For example, you can add -<> and -<> in that namespace. To allow the remote -client to discover all of the nodes in the remote cluster you can specify the -`client.transport.sniff` setting: - -[source,yaml] ----------------------------- -xpack.security.audit.index.client.transport.sniff: true ----------------------------- diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 0a88a19f6f050..8a7144c0a1388 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -1303,16 +1303,6 @@ transport profile, use the prefix `transport.profiles.$PROFILE.xpack.security.` append the portion of the setting after `xpack.security.transport.`. For the key setting, this would be `transport.profiles.$PROFILE.xpack.security.ssl.key`. -[[auditing-tls-ssl-settings]] -:ssl-prefix: xpack.security.audit.index.client.xpack.security.transport -:component: Auditing -:client-auth-default!: -:server!: - -include::ssl-settings.asciidoc[] - -See also <>. - [float] [[ip-filtering-settings]] ==== IP filtering settings diff --git a/x-pack/docs/en/security/auditing/auditing-search-queries.asciidoc b/x-pack/docs/en/security/auditing/auditing-search-queries.asciidoc index fa00f8eeeef52..77680f8cb5d10 100644 --- a/x-pack/docs/en/security/auditing/auditing-search-queries.asciidoc +++ b/x-pack/docs/en/security/auditing/auditing-search-queries.asciidoc @@ -13,26 +13,13 @@ Search queries are contained inside HTTP request bodies, however, and some audit events that are generated by the REST layer can be toggled to output the request body to the audit log. -To make certain audit events include the request body, edit the following -settings in the `elasticsearch.yml` file: +To make certain audit events include the request body, edit the following +setting in the `elasticsearch.yml` file: -* For the `logfile` audit output: -+ --- [source,yaml] ---------------------------- xpack.security.audit.logfile.events.emit_request_body: true ---------------------------- --- - -* For the `index` output: -+ --- -[source,yaml] ----------------------------- -xpack.security.audit.index.events.emit_request_body: true ----------------------------- --- IMPORTANT: No filtering is performed when auditing, so sensitive data might be audited in plain text when audit events include the request body. Also, the @@ -44,10 +31,8 @@ generated in the REST layer and can access the request body. Most of them are no included by default. A good practical piece of advice is to add `authentication_success` to the event -types that are audited. Add it to the list in the -`xpack.security.audit.logfile.events.include` or -`xpack.security.audit.index.events.include` settings. This type is not audited -by default. +types that are audited (add it to the list in the `xpack.security.audit.logfile.events.include`), +as this event type is not audited by default. NOTE: Typically, the include list contains other event types as well, such as `access_granted` or `access_denied`. diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index 417b26cbd09b3..ace4d7d4c09c8 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -210,8 +210,7 @@ that have been previously described: === Audit event attributes for the deprecated formats The following table shows the common attributes that can be associated with -every event, when it is output to the `_access.log` file or to the -<>. +every event, when it is output to the `_access.log` file. .Common attributes [cols="2,7",options="header"] @@ -229,10 +228,8 @@ every event, when it is output to the `_access.log` file or to the `run_as_denied`, `run_as_granted`. |====== -For an event in the <>, these are -positional attributes, which are printed at the beginning of each log line and -are not adjoined by the attribute name. As a matter of course, the names are -present for each attribute when the event is forwarded to the <>. +These are positional attributes, which are printed at the beginning of each log line and +are not adjoined by the attribute name. The attribute `origin_address` is also common to every audit event. It is always named, that is, it is not positional. It denotes the source IP address of the diff --git a/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc b/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc deleted file mode 100644 index 5bdc25238ad2e..0000000000000 --- a/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc +++ /dev/null @@ -1,99 +0,0 @@ -[role="xpack"] -[[forwarding-audit-logfiles]] -=== Forwarding audit logs to a remote cluster - -When you are auditing security events, you can optionally store the logs in an -{es} index on a remote cluster. The logs are sent to the remote cluster by -using the {javaclient}/transport-client.html[transport client]. - -. Configure auditing such that the logs are stored in {es} rolling indices. -See <>. - -. Establish a connection to the remote cluster by configuring the following -`xpack.security.audit.index.client` settings: -+ --- -[source, yaml] --------------------------------------------------- -xpack.security.audit.index.client.hosts: 192.168.0.1, 192.168.0.2 <1> -xpack.security.audit.index.client.cluster.name: logging-prod <2> -xpack.security.audit.index.client.xpack.security.user: myuser:mypassword <3> --------------------------------------------------- -<1> A list of hosts in the remote cluster. If you are not using the default -value for the `transport.port` setting on the remote cluster, you must -specify the appropriate port number (prefixed by a colon) after each host. -<2> The remote cluster name. -<3> A valid user and password, which must have authority to create the -`.security-audit` index on the remote cluster. - -For more information about these settings, see -{ref}/auditing-settings.html#remote-audit-settings[Remote audit log indexing configuration settings]. - --- - -. If the remote cluster has Transport Layer Security (TLS/SSL) enabled, you -must specify extra security settings: - -.. {ref}/configuring-tls.html#node-certificates[Generate a node certificate on -the remote cluster], then copy that certificate to the client. - -.. Enable TLS and specify the information required to access the node certificate. - -*** If the signed certificate is in PKCS#12 format, add the following information -to the `elasticsearch.yml` file: -+ --- -[source,yaml] ------------------------------------------------------------ -xpack.security.audit.index.client.xpack.security.transport.ssl.enabled: true -xpack.security.audit.index.client.xpack.security.transport.ssl.keystore.path: certs/remote-elastic-certificates.p12 -xpack.security.audit.index.client.xpack.security.transport.ssl.truststore.path: certs/remote-elastic-certificates.p12 ------------------------------------------------------------ - -For more information about these settings, see -{ref}/security-settings.html#auditing-tls-ssl-settings[Auditing TLS settings]. --- - -*** If the certificate is in PEM format, add the following information to the -`elasticsearch.yml` file: -+ --- -[source, yaml] --------------------------------------------------- -xpack.security.audit.index.client.xpack.security.transport.ssl.enabled: true -xpack.security.audit.index.client.xpack.security.transport.ssl.key: /home/es/config/audit-client.key -xpack.security.audit.index.client.xpack.security.transport.ssl.certificate: /home/es/config/audit-client.crt -xpack.security.audit.index.client.xpack.security.transport.ssl.certificate_authorities: [ "/home/es/config/remote-ca.crt" ] --------------------------------------------------- - -For more information about these settings, see -{ref}/security-settings.html#auditing-tls-ssl-settings[Auditing TLS settings]. --- - -.. If you secured the certificate with a password, add the password to -your {es} keystore: - -*** If the signed certificate is in PKCS#12 format, use the following commands: -+ --- -[source,shell] ------------------------------------------------------------ -bin/elasticsearch-keystore add xpack.security.audit.index.client.xpack.security.transport.ssl.keystore.secure_password - -bin/elasticsearch-keystore add xpack.security.audit.index.client.xpack.security.transport.ssl.truststore.secure_password ------------------------------------------------------------ --- - -*** If the certificate is in PEM format, use the following commands: -+ --- -[source,shell] ------------------------------------------------------------ -bin/elasticsearch-keystore add xpack.security.audit.index.client.xpack.security.transport.ssl.secure_key_passphrase ------------------------------------------------------------ --- - -. Restart {es}. - -When these steps are complete, your audit logs are stored in {es} rolling -indices on the remote cluster. \ No newline at end of file diff --git a/x-pack/docs/en/security/auditing/index.asciidoc b/x-pack/docs/en/security/auditing/index.asciidoc index 027482df75fd3..ba79779629a44 100644 --- a/x-pack/docs/en/security/auditing/index.asciidoc +++ b/x-pack/docs/en/security/auditing/index.asciidoc @@ -8,11 +8,5 @@ include::event-types.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-logfile.asciidoc include::output-logfile.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-index.asciidoc -include::output-index.asciidoc[] - :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/auditing-search-queries.asciidoc include::auditing-search-queries.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc -include::forwarding-logs.asciidoc[] diff --git a/x-pack/docs/en/security/auditing/output-index.asciidoc b/x-pack/docs/en/security/auditing/output-index.asciidoc deleted file mode 100644 index 0d4ea3cc6fc60..0000000000000 --- a/x-pack/docs/en/security/auditing/output-index.asciidoc +++ /dev/null @@ -1,49 +0,0 @@ -[role="xpack"] -[[audit-index]] -=== Index audit output - -In addition to logging to a file, you can store audit logs in Elasticsearch -rolling indices. These indices can be either on the same cluster, or on a -remote cluster. You configure the following settings in -`elasticsearch.yml` to control how audit entries are indexed. To enable -this output, you need to configure the setting `xpack.security.audit.outputs` -in the `elasticsearch.yml` file: - -[source,yaml] ----------------------------- -xpack.security.audit.outputs: [ index, logfile ] ----------------------------- - -For more configuration options, see -{ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings]. - -IMPORTANT: No filtering is performed when auditing, so sensitive data may be -audited in plain text when including the request body in audit events. - -[float] -==== Audit index settings - -You can also configure settings for the indices that the events are stored in. -These settings are configured in the `xpack.security.audit.index.settings` namespace -in `elasticsearch.yml`. For example, the following configuration sets the -number of shards and replicas to 1 for the audit indices: - -[source,yaml] ----------------------------- -xpack.security.audit.index.settings: - index: - number_of_shards: 1 - number_of_replicas: 1 ----------------------------- - -These settings apply to the local audit indices, as well as to the -<>, but only if the remote cluster -does *not* have {security-features} enabled or the {es} versions are different. -If the remote cluster has {security-features} enabled and the versions coincide, -the settings for the audit indices there will take precedence, -even if they are unspecified (i.e. left to defaults). - -NOTE: Audit events are batched for indexing so there is a lag before -events appear in the index. You can control how frequently batches of -events are pushed to the index by setting -`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`. diff --git a/x-pack/docs/en/security/auditing/overview.asciidoc b/x-pack/docs/en/security/auditing/overview.asciidoc index 2bd66190fdb27..f0b58684e078f 100644 --- a/x-pack/docs/en/security/auditing/overview.asciidoc +++ b/x-pack/docs/en/security/auditing/overview.asciidoc @@ -13,26 +13,5 @@ Audit logs are **disabled** by default. To enable this functionality, you must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. ============================================================================ -The {es} {security-features} provide two ways to persist audit logs: - -* The <> output, which persists events to - a dedicated `_audit.log` file on the host's file system. - For backwards compatibility reasons, a file named `_access.log` - is also generated. -* The <> output, which persists events to an Elasticsearch - index. The audit index can reside on the same cluster, or a separate cluster. - -By default, only the `logfile` output is used when enabling auditing, -implicitly outputting to both `_audit.log` and `_access.log`. -To facilitate browsing and analyzing the events, you can also enable -indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: - -[source,yaml] ----------------------------- -xpack.security.audit.outputs: [ index, logfile ] ----------------------------- - -TIP: If you choose to enable the `index` output type, we strongly recommend that -you still use the `logfile` output as the official record of events. If the -target index is unavailable (for example, during a rolling upgrade), the `index` -output can lose messages. +The audit log persists events to a dedicated `_audit.log` file on +the host's file system (on each node). diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index d9be6129347ec..db2c8c664b9d2 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -131,9 +131,8 @@ and <>. .. Restart {es}. -By default, events are logged to a dedicated `elasticsearch-access.log` file in -`ES_HOME/logs`. You can also store the events in an {es} index for -easier analysis and control what events are logged. +Events are logged to a dedicated `_audit.log` file in +`ES_HOME/logs`, on each cluster node. -- :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 86f615a259ff0..665f3ed6d35c0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -23,15 +23,12 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -42,12 +39,7 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContent; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -126,7 +118,6 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; -import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; @@ -136,7 +127,6 @@ import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; import org.elasticsearch.xpack.core.ssl.action.TransportGetCertificateInfoAction; import org.elasticsearch.xpack.core.ssl.rest.RestGetCertificateInfoAction; -import org.elasticsearch.xpack.core.template.TemplateUtils; import org.elasticsearch.xpack.security.action.filter.SecurityActionFilter; import org.elasticsearch.xpack.security.action.interceptor.BulkShardRequestInterceptor; import org.elasticsearch.xpack.security.action.interceptor.IndicesAliasesRequestInterceptor; @@ -172,8 +162,6 @@ import org.elasticsearch.xpack.security.action.user.TransportSetEnabledAction; import org.elasticsearch.xpack.security.audit.AuditTrail; import org.elasticsearch.xpack.security.audit.AuditTrailService; -import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; -import org.elasticsearch.xpack.security.audit.index.IndexNameResolver; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.InternalRealms; @@ -224,11 +212,7 @@ import org.elasticsearch.xpack.security.transport.netty4.SecurityNetty4ServerTransport; import org.elasticsearch.xpack.security.transport.nio.SecurityNioHttpServerTransport; import org.elasticsearch.xpack.security.transport.nio.SecurityNioTransport; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.time.Clock; import java.util.ArrayList; @@ -237,7 +221,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -262,14 +245,6 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw private static final Logger logger = LogManager.getLogger(Security.class); - static final Setting> AUDIT_OUTPUTS_SETTING = - Setting.listSetting(SecurityField.setting("audit.outputs"), - Function.identity(), - s -> s.keySet().contains(SecurityField.setting("audit.outputs")) - ? Collections.emptyList() - : Collections.singletonList(LoggingAuditTrail.NAME), - Property.NodeScope); - private final Settings settings; private final Environment env; private final boolean enabled; @@ -286,7 +261,6 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw private final SetOnce tokenService = new SetOnce<>(); private final SetOnce securityActionFilter = new SetOnce<>(); private final SetOnce securityIndex = new SetOnce<>(); - private final SetOnce indexAuditTrail = new SetOnce<>(); private final SetOnce groupFactory = new SetOnce<>(); private final List bootstrapChecks; private final List securityExtensions = new ArrayList<>(); @@ -324,7 +298,6 @@ public Security(Settings settings, final Path configPath) { } private static void runStartupChecks(Settings settings) { - validateAutoCreateIndex(settings); validateRealmSettings(settings); } @@ -402,31 +375,11 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste securityContext.set(new SecurityContext(settings, threadPool.getThreadContext())); components.add(securityContext.get()); - // audit trails construction - Set auditTrails = new LinkedHashSet<>(); - if (XPackSettings.AUDIT_ENABLED.get(settings)) { - List outputs = AUDIT_OUTPUTS_SETTING.get(settings); - if (outputs.isEmpty()) { - throw new IllegalArgumentException("Audit logging is enabled but there are zero output types in " - + XPackSettings.AUDIT_ENABLED.getKey()); - } - - for (String output : outputs) { - switch (output) { - case LoggingAuditTrail.NAME: - auditTrails.add(new LoggingAuditTrail(settings, clusterService, threadPool)); - break; - case IndexAuditTrail.NAME: - indexAuditTrail.set(new IndexAuditTrail(settings, client, threadPool, clusterService)); - auditTrails.add(indexAuditTrail.get()); - break; - default: - throw new IllegalArgumentException("Unknown audit trail output [" + output + "]"); - } - } - } - final AuditTrailService auditTrailService = - new AuditTrailService(new ArrayList<>(auditTrails), getLicenseState()); + // audit trail service construction + final List auditTrails = XPackSettings.AUDIT_ENABLED.get(settings) + ? Collections.singletonList(new LoggingAuditTrail(settings, clusterService, threadPool)) + : Collections.emptyList(); + final AuditTrailService auditTrailService = new AuditTrailService(auditTrails, getLicenseState()); components.add(auditTrailService); this.auditTrailService.set(auditTrailService); @@ -613,9 +566,7 @@ public static List> getSettings(boolean transportClientMode, List outputs = AUDIT_OUTPUTS_SETTING.get(settings); - for (String output : outputs) { - if (output.equals(IndexAuditTrail.NAME)) { - return true; - } - } - } - return false; - } - - static void validateAutoCreateIndex(Settings settings) { - String value = settings.get("action.auto_create_index"); - if (value == null) { - return; - } - - final boolean indexAuditingEnabled = Security.indexAuditLoggingEnabled(settings); - if (indexAuditingEnabled) { - String auditIndex = IndexAuditTrailField.INDEX_NAME_PREFIX + "*"; - String errorMessage = LoggerMessageFormat.format( - "the [action.auto_create_index] setting value [{}] is too" + - " restrictive. disable [action.auto_create_index] or set it to include " + - "[{}]", (Object) value, auditIndex); - if (Booleans.isFalse(value)) { - throw new IllegalArgumentException(errorMessage); - } - - if (Booleans.isTrue(value)) { - return; - } - - String[] matches = Strings.commaDelimitedListToStringArray(value); - List indices = new ArrayList<>(); - DateTime now = new DateTime(DateTimeZone.UTC); - // just use daily rollover - - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now, IndexNameResolver.Rollover.DAILY)); - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusDays(1), - IndexNameResolver.Rollover.DAILY)); - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(1), - IndexNameResolver.Rollover.DAILY)); - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(2), - IndexNameResolver.Rollover.DAILY)); - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(3), - IndexNameResolver.Rollover.DAILY)); - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(4), - IndexNameResolver.Rollover.DAILY)); - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(5), - IndexNameResolver.Rollover.DAILY)); - indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(6), - IndexNameResolver.Rollover.DAILY)); - - for (String index : indices) { - boolean matched = false; - for (String match : matches) { - char c = match.charAt(0); - if (c == '-') { - if (Regex.simpleMatch(match.substring(1), index)) { - throw new IllegalArgumentException(errorMessage); - } - } else if (c == '+') { - if (Regex.simpleMatch(match.substring(1), index)) { - matched = true; - break; - } - } else { - if (Regex.simpleMatch(match, index)) { - matched = true; - break; - } - } - } - if (!matched) { - throw new IllegalArgumentException(errorMessage); - } - } - - logger.warn("the [action.auto_create_index] setting is configured to be restrictive [{}]. " + - " for the next 6 months audit indices are allowed to be created, but please make sure" + - " that any future history indices after 6 months with the pattern " + - "[.security_audit_log*] are allowed to be created", value); - } - } - @Override public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { if (transportClientMode || enabled == false) { // don't register anything if we are not enabled @@ -998,23 +863,9 @@ public List> getExecutorBuilders(final Settings settings) { @Override public UnaryOperator> getIndexTemplateMetaDataUpgrader() { return templates -> { + // .security index is not managed by using templates anymore templates.remove(SECURITY_TEMPLATE_NAME); - final XContent xContent = XContentFactory.xContent(XContentType.JSON); - final byte[] auditTemplate = TemplateUtils.loadTemplate("/" + IndexAuditTrail.INDEX_TEMPLATE_NAME + ".json", - Version.CURRENT.toString(), SecurityIndexManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); - - try (XContentParser parser = xContent - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, auditTemplate)) { - IndexTemplateMetaData auditMetadata = new IndexTemplateMetaData.Builder( - IndexTemplateMetaData.Builder.fromXContent(parser, IndexAuditTrail.INDEX_TEMPLATE_NAME)) - .settings(IndexAuditTrail.customAuditIndexSettings(settings, logger)) - .build(); - templates.put(IndexAuditTrail.INDEX_TEMPLATE_NAME, auditMetadata); - } catch (IOException e) { - // TODO: should we handle this with a thrown exception? - logger.error("Error loading template [{}] as part of metadata upgrading", IndexAuditTrail.INDEX_TEMPLATE_NAME); - } - + templates.remove("security_audit_log"); return templates; }; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java index 6f357790d2f6e..bc79fab0043aa 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java @@ -16,11 +16,13 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -153,7 +155,10 @@ static Map sslUsage(Settings settings) { static Map auditUsage(Settings settings) { Map map = new HashMap<>(2); map.put("enabled", XPackSettings.AUDIT_ENABLED.get(settings)); - map.put("outputs", Security.AUDIT_OUTPUTS_SETTING.get(settings)); + if (XPackSettings.AUDIT_ENABLED.get(settings)) { + // the only available output type is "logfile", but the optputs= is to keep compatibility with previous reporting format + map.put("outputs", Arrays.asList(LoggingAuditTrail.NAME)); + } return map; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java deleted file mode 100644 index 914a029c0c434..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ /dev/null @@ -1,1240 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.audit.index; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.elasticsearch.action.bulk.BulkProcessor; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.metadata.AliasOrIndex; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportMessage; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; -import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.core.security.user.XPackUser; -import org.elasticsearch.xpack.core.template.TemplateUtils; -import org.elasticsearch.xpack.security.audit.AuditLevel; -import org.elasticsearch.xpack.security.audit.AuditTrail; -import org.elasticsearch.xpack.security.rest.RemoteHostHeader; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.clientWithOrigin; -import static org.elasticsearch.xpack.core.security.SecurityField.setting; -import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_DENIED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_GRANTED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.ANONYMOUS_ACCESS_DENIED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_FAILED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_SUCCESS; -import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_DENIED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_GRANTED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.REALM_AUTHENTICATION_FAILED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_DENIED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_GRANTED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.SYSTEM_ACCESS_GRANTED; -import static org.elasticsearch.xpack.security.audit.AuditLevel.TAMPERED_REQUEST; -import static org.elasticsearch.xpack.security.audit.AuditLevel.parse; -import static org.elasticsearch.xpack.security.audit.AuditUtil.indices; -import static org.elasticsearch.xpack.security.audit.AuditUtil.restRequestContent; -import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.resolve; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_VERSION_STRING; - -/** - * Audit trail implementation that writes events into an index. - */ -public class IndexAuditTrail implements AuditTrail, ClusterStateListener { - - public static final String NAME = "index"; - public static final String DOC_TYPE = "doc"; - public static final String INDEX_TEMPLATE_NAME = "security_audit_log"; - - private static final int DEFAULT_BULK_SIZE = 1000; - private static final int MAX_BULK_SIZE = 10000; - private static final int DEFAULT_MAX_QUEUE_SIZE = 10000; - private static final TimeValue DEFAULT_FLUSH_INTERVAL = TimeValue.timeValueSeconds(1); - private static final IndexNameResolver.Rollover DEFAULT_ROLLOVER = IndexNameResolver.Rollover.DAILY; - private static final Setting ROLLOVER_SETTING = - new Setting<>(setting("audit.index.rollover"), (s) -> DEFAULT_ROLLOVER.name(), - s -> IndexNameResolver.Rollover.valueOf(s.toUpperCase(Locale.ENGLISH)), Property.NodeScope); - private static final Setting QUEUE_SIZE_SETTING = - Setting.intSetting(setting("audit.index.queue_max_size"), DEFAULT_MAX_QUEUE_SIZE, 1, Property.NodeScope); - private static final String DEFAULT_CLIENT_NAME = "security-audit-client"; - - private static final List DEFAULT_EVENT_INCLUDES = Arrays.asList( - ACCESS_DENIED.toString(), - ACCESS_GRANTED.toString(), - ANONYMOUS_ACCESS_DENIED.toString(), - AUTHENTICATION_FAILED.toString(), - REALM_AUTHENTICATION_FAILED.toString(), - CONNECTION_DENIED.toString(), - CONNECTION_GRANTED.toString(), - TAMPERED_REQUEST.toString(), - RUN_AS_DENIED.toString(), - RUN_AS_GRANTED.toString(), - AUTHENTICATION_SUCCESS.toString() - ); - private static final String FORBIDDEN_INDEX_SETTING = "index.mapper.dynamic"; - - private static final Setting INDEX_SETTINGS = - Setting.groupSetting(setting("audit.index.settings.index."), Property.NodeScope); - private static final Setting> INCLUDE_EVENT_SETTINGS = - Setting.listSetting(setting("audit.index.events.include"), DEFAULT_EVENT_INCLUDES, Function.identity(), - Property.NodeScope); - private static final Setting> EXCLUDE_EVENT_SETTINGS = - Setting.listSetting(setting("audit.index.events.exclude"), Collections.emptyList(), - Function.identity(), Property.NodeScope); - private static final Setting INCLUDE_REQUEST_BODY = - Setting.boolSetting(setting("audit.index.events.emit_request_body"), false, Property.NodeScope); - private static final Setting REMOTE_CLIENT_SETTINGS = - Setting.groupSetting(setting("audit.index.client."), Property.NodeScope); - private static final Setting BULK_SIZE_SETTING = - Setting.intSetting(setting("audit.index.bulk_size"), DEFAULT_BULK_SIZE, 1, MAX_BULK_SIZE, Property.NodeScope); - private static final Setting FLUSH_TIMEOUT_SETTING = - Setting.timeSetting(setting("audit.index.flush_interval"), DEFAULT_FLUSH_INTERVAL, - TimeValue.timeValueMillis(1L), Property.NodeScope); - private static final Logger logger = LogManager.getLogger(IndexAuditTrail.class); - - private final AtomicReference state = new AtomicReference<>(State.INITIALIZED); - private final Settings settings; - private final String nodeName; - private final Client client; - private final QueueConsumer queueConsumer; - private final ThreadPool threadPool; - private final ClusterService clusterService; - private final boolean indexToRemoteCluster; - private final EnumSet events; - private final IndexNameResolver.Rollover rollover; - private final boolean includeRequestBody; - - private BulkProcessor bulkProcessor; - private String nodeHostName; - private String nodeHostAddress; - - @Override - public String name() { - return NAME; - } - - public IndexAuditTrail(Settings settings, Client client, ThreadPool threadPool, ClusterService clusterService) { - this.settings = settings; - this.threadPool = threadPool; - this.clusterService = clusterService; - this.nodeName = Node.NODE_NAME_SETTING.get(settings); - final int maxQueueSize = QUEUE_SIZE_SETTING.get(settings); - this.queueConsumer = new QueueConsumer(EsExecutors.threadName(settings, "audit-queue-consumer"), createQueue(maxQueueSize)); - this.rollover = ROLLOVER_SETTING.get(settings); - this.events = parse(INCLUDE_EVENT_SETTINGS.get(settings), EXCLUDE_EVENT_SETTINGS.get(settings)); - this.indexToRemoteCluster = REMOTE_CLIENT_SETTINGS.get(settings).names().size() > 0; - this.includeRequestBody = INCLUDE_REQUEST_BODY.get(settings); - - if (indexToRemoteCluster == false) { - // in the absence of client settings for remote indexing, fall back to the client that was passed in. - this.client = clientWithOrigin(client, SECURITY_ORIGIN); - } else { - this.client = initializeRemoteClient(settings, logger); - } - clusterService.addListener(this); - clusterService.addLifecycleListener(new LifecycleListener() { - @Override - public void beforeStop() { - stop(); - } - }); - - } - - public State state() { - return state.get(); - } - - @Override - public void clusterChanged(ClusterChangedEvent event) { - try { - if (state() == IndexAuditTrail.State.INITIALIZED && canStart(event)) { - threadPool.generic().execute(new AbstractRunnable() { - - @Override - public void onFailure(Exception throwable) { - logger.error("failed to start index audit trail services", throwable); - assert false : "security lifecycle services startup failed"; - } - - @Override - public void doRun() { - start(); - } - }); - } - } catch (Exception e) { - logger.error("failed to start index audit trail", e); - } - } - - /** - * This method determines if this service can be started based on the state in the {@link ClusterChangedEvent} and - * if the node is the master or not. When using remote indexing, a call to the remote cluster will be made to retrieve - * the state and the same rules will be applied. In order for the service to start, the following must be true: - *
      - *
    1. The cluster must not have a {@link GatewayService#STATE_NOT_RECOVERED_BLOCK}; in other words the gateway - * must have recovered from disk already.
    2. - *
    3. The current node must be the master OR the security_audit_log index template must exist
    4. - *
    5. The current audit index must not exist or have all primary shards active. The current audit index name - * is determined by the rollover settings and current time
    6. - *
    - * - * @param event the {@link ClusterChangedEvent} containing the up to date cluster state - * @return true if all requirements are met and the service can be started - */ - public boolean canStart(ClusterChangedEvent event) { - if (indexToRemoteCluster) { - // just return true as we do not determine whether we can start or not based on the local cluster state, but must base it off - // of the remote cluster state and this method is called on the cluster state update thread, so we do not really want to - // execute remote calls on this thread - return true; - } - synchronized (this) { - return canStart(event.state()); - } - } - - private boolean canStart(ClusterState clusterState) { - if (clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // wait until the gateway has recovered from disk, otherwise we think may not have audit indices - // but they may not have been restored from the cluster state on disk - logger.debug("index audit trail waiting until gateway has recovered from disk"); - return false; - } - - if (TemplateUtils.checkTemplateExistsAndVersionMatches(INDEX_TEMPLATE_NAME, SECURITY_VERSION_STRING, - clusterState, logger, Version.CURRENT::onOrBefore) == false) { - logger.debug("security audit index template [{}] is not up to date", INDEX_TEMPLATE_NAME); - return false; - } - - String index = getIndexName(); - IndexMetaData metaData = clusterState.metaData().index(index); - if (metaData == null) { - logger.debug("security audit index [{}] does not exist, so service can start", index); - return true; - } - - if (clusterState.routingTable().index(index).allPrimaryShardsActive()) { - logger.debug("security audit index [{}] all primary shards started, so service can start", index); - return true; - } - logger.debug("security audit index [{}] does not have all primary shards started, so service cannot start", index); - return false; - } - - private String getIndexName() { - final Message first = peek(); - final String index; - if (first == null) { - index = resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, DateTime.now(DateTimeZone.UTC), rollover); - } else { - index = resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, first.timestamp, rollover); - } - return index; - } - - private boolean hasStaleMessage() { - final Message first = peek(); - if (first == null) { - return false; - } - return false == IndexNameResolver.resolve(first.timestamp, rollover) - .equals(IndexNameResolver.resolve(DateTime.now(DateTimeZone.UTC), rollover)); - } - - /** - * Starts the service. The state is moved to {@link org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State#STARTING} - * at the beginning of the method. The service's components are initialized and if the current node is the master, the index - * template will be stored. The state is moved {@link org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State#STARTED} - * and before returning the queue of messages that came before the service started is drained. - */ - public void start() { - if (state.compareAndSet(State.INITIALIZED, State.STARTING)) { - this.nodeHostName = clusterService.localNode().getHostName(); - this.nodeHostAddress = clusterService.localNode().getHostAddress(); - if (indexToRemoteCluster) { - client.admin().cluster().prepareState().execute(new ActionListener() { - @Override - public void onResponse(ClusterStateResponse clusterStateResponse) { - logger.trace("remote cluster state is [{}] [{}]", - clusterStateResponse.getClusterName(), clusterStateResponse.getState()); - if (canStart(clusterStateResponse.getState())) { - updateCurrentIndexMappingsIfNecessary(clusterStateResponse.getState()); - } else if (TemplateUtils.checkTemplateExistsAndVersionMatches(INDEX_TEMPLATE_NAME, - SECURITY_VERSION_STRING, clusterStateResponse.getState(), logger, - Version.CURRENT::onOrAfter) == false) { - putTemplate(customAuditIndexSettings(settings, logger), - e -> { - logger.error("failed to put audit trail template", e); - transitionStartingToInitialized(); - }); - } else { - // for some reason we can't start up since the remote cluster is not fully setup. in this case - // we try to wait for yellow status (all primaries started up) this will also wait for - // state recovery etc. - String indexName = getIndexName(); - // if this index doesn't exists the call will fail with a not_found exception... - client.admin().cluster().prepareHealth().setIndices(indexName).setWaitForYellowStatus().execute( - ActionListener.wrap( - (x) -> { - logger.debug("have yellow status on remote index [{}] ", indexName); - transitionStartingToInitialized(); - start(); - }, - (e) -> { - logger.error("failed to get wait for yellow status on remote index [" + indexName + "]", e); - transitionStartingToInitialized(); - })); - } - } - - @Override - public void onFailure(Exception e) { - transitionStartingToInitialized(); - logger.error("failed to get remote cluster state", e); - } - }); - } else { - updateCurrentIndexMappingsIfNecessary(clusterService.state()); - } - } - } - - // pkg private for tests - void updateCurrentIndexMappingsIfNecessary(ClusterState state) { - final String index = getIndexName(); - - AliasOrIndex aliasOrIndex = state.getMetaData().getAliasAndIndexLookup().get(index); - if (aliasOrIndex != null) { - // check mappings - final List indices = aliasOrIndex.getIndices(); - if (aliasOrIndex.isAlias() && indices.size() > 1) { - throw new IllegalStateException("Alias [" + index + "] points to more than one index: " + - indices.stream().map(imd -> imd.getIndex().getName()).collect(Collectors.toList())); - } - IndexMetaData indexMetaData = indices.get(0); - MappingMetaData docMapping = indexMetaData.getMappings().get("doc"); - if (docMapping == null) { - if (indexToRemoteCluster || state.nodes().isLocalNodeElectedMaster() || hasStaleMessage()) { - putAuditIndexMappingsAndStart(index); - } else { - logger.trace("audit index [{}] is missing mapping for type [{}]", index, DOC_TYPE); - transitionStartingToInitialized(); - } - } else { - @SuppressWarnings("unchecked") - Map meta = (Map) docMapping.sourceAsMap().get("_meta"); - if (meta == null) { - logger.info("Missing _meta field in mapping [{}] of index [{}]", docMapping.type(), index); - throw new IllegalStateException("Cannot read security-version string in index " + index); - } - - final String versionString = (String) meta.get(SECURITY_VERSION_STRING); - if (versionString != null && Version.fromString(versionString).onOrAfter(Version.CURRENT)) { - innerStart(); - } else { - if (indexToRemoteCluster || state.nodes().isLocalNodeElectedMaster() || hasStaleMessage()) { - putAuditIndexMappingsAndStart(index); - } else if (versionString == null) { - logger.trace("audit index [{}] mapping is missing meta field [{}]", index, SECURITY_VERSION_STRING); - transitionStartingToInitialized(); - } else { - logger.trace("audit index [{}] has the incorrect version [{}]", index, versionString); - transitionStartingToInitialized(); - } - } - } - } else { - innerStart(); - } - } - - private void putAuditIndexMappingsAndStart(String index) { - putAuditIndexMappings(index, getPutIndexTemplateRequest(Settings.EMPTY).mappings().get(DOC_TYPE), - ActionListener.wrap(ignore -> { - logger.trace("updated mappings on audit index [{}]", index); - innerStart(); - }, e -> { - logger.error(new ParameterizedMessage("failed to update mappings on audit index [{}]", index), e); - transitionStartingToInitialized(); // reset to initialized so we can retry - })); - } - - private void transitionStartingToInitialized() { - if (state.compareAndSet(State.STARTING, State.INITIALIZED) == false) { - final String message = "state transition from starting to initialized failed, current value: " + state.get(); - assert false : message; - logger.error(message); - } - } - - void innerStart() { - initializeBulkProcessor(); - queueConsumer.start(); - if (state.compareAndSet(State.STARTING, State.STARTED) == false) { - final String message = "state transition from starting to started failed, current value: " + state.get(); - assert false : message; - logger.error(message); - } else { - logger.trace("successful state transition from starting to started, current value: [{}]", state.get()); - } - } - - public synchronized void stop() { - if (state.compareAndSet(State.STARTED, State.STOPPING)) { - queueConsumer.close(); - } - - if (state() != State.STOPPED) { - try { - if (bulkProcessor != null) { - if (bulkProcessor.awaitClose(10, TimeUnit.SECONDS) == false) { - logger.warn("index audit trail failed to store all pending events after waiting for 10s"); - } - } - } catch (InterruptedException exc) { - Thread.currentThread().interrupt(); - } finally { - if (indexToRemoteCluster) { - client.close(); - } - state.set(State.STOPPED); - } - } - } - - @Override - public void authenticationSuccess(String requestId, String realm, User user, RestRequest request) { - if (events.contains(AUTHENTICATION_SUCCESS)) { - try { - enqueue(message("authentication_success", new Tuple<>(realm, realm), user, null, request), "authentication_success"); - } catch (final Exception e) { - logger.warn("failed to index audit event: [authentication_success]", e); - } - } - } - - @Override - public void authenticationSuccess(String requestId, String realm, User user, String action, TransportMessage message) { - if (events.contains(AUTHENTICATION_SUCCESS)) { - try { - enqueue(message("authentication_success", action, user, null, new Tuple<>(realm, realm), null, message), - "authentication_success"); - } catch (final Exception e) { - logger.warn("failed to index audit event: [authentication_success]", e); - } - } - } - - @Override - public void anonymousAccessDenied(String requestId, String action, TransportMessage message) { - if (events.contains(ANONYMOUS_ACCESS_DENIED)) { - try { - enqueue(message("anonymous_access_denied", action, (User) null, null, null, indices(message), message), - "anonymous_access_denied"); - } catch (Exception e) { - logger.warn("failed to index audit event: [anonymous_access_denied]", e); - } - } - } - - @Override - public void anonymousAccessDenied(String requestId, RestRequest request) { - if (events.contains(ANONYMOUS_ACCESS_DENIED)) { - try { - enqueue(message("anonymous_access_denied", null, null, null, null, request), "anonymous_access_denied"); - } catch (Exception e) { - logger.warn("failed to index audit event: [anonymous_access_denied]", e); - } - } - } - - @Override - public void authenticationFailed(String requestId, String action, TransportMessage message) { - if (events.contains(AUTHENTICATION_FAILED)) { - try { - enqueue(message("authentication_failed", action, (User) null, null, null, indices(message), message), - "authentication_failed"); - } catch (Exception e) { - logger.warn("failed to index audit event: [authentication_failed]", e); - } - } - } - - @Override - public void authenticationFailed(String requestId, RestRequest request) { - if (events.contains(AUTHENTICATION_FAILED)) { - try { - enqueue(message("authentication_failed", null, null, null, null, request), "authentication_failed"); - } catch (Exception e) { - logger.warn("failed to index audit event: [authentication_failed]", e); - } - } - } - - @Override - public void authenticationFailed(String requestId, AuthenticationToken token, String action, TransportMessage message) { - if (events.contains(AUTHENTICATION_FAILED)) { - if (XPackUser.is(token.principal()) == false) { - try { - enqueue(message("authentication_failed", action, token, null, indices(message), message), "authentication_failed"); - } catch (Exception e) { - logger.warn("failed to index audit event: [authentication_failed]", e); - } - } - } - } - - @Override - public void authenticationFailed(String requestId, AuthenticationToken token, RestRequest request) { - if (events.contains(AUTHENTICATION_FAILED)) { - if (XPackUser.is(token.principal()) == false) { - try { - enqueue(message("authentication_failed", null, token, null, null, request), "authentication_failed"); - } catch (Exception e) { - logger.warn("failed to index audit event: [authentication_failed]", e); - } - } - } - } - - @Override - public void authenticationFailed(String requestId, String realm, AuthenticationToken token, String action, TransportMessage message) { - if (events.contains(REALM_AUTHENTICATION_FAILED)) { - if (XPackUser.is(token.principal()) == false) { - try { - enqueue(message("realm_authentication_failed", action, token, realm, indices(message), message), - "realm_authentication_failed"); - } catch (Exception e) { - logger.warn("failed to index audit event: [authentication_failed]", e); - } - } - } - } - - @Override - public void authenticationFailed(String requestId, String realm, AuthenticationToken token, RestRequest request) { - if (events.contains(REALM_AUTHENTICATION_FAILED)) { - if (XPackUser.is(token.principal()) == false) { - try { - enqueue(message("realm_authentication_failed", null, token, realm, null, request), "realm_authentication_failed"); - } catch (Exception e) { - logger.warn("failed to index audit event: [authentication_failed]", e); - } - } - } - } - - @Override - public void accessGranted(String requestId, Authentication authentication, String action, TransportMessage msg, String[] roleNames) { - final User user = authentication.getUser(); - final boolean isSystem = SystemUser.is(user) || XPackUser.is(user); - final boolean logSystemAccessGranted = isSystem && events.contains(SYSTEM_ACCESS_GRANTED); - final boolean shouldLog = logSystemAccessGranted || (isSystem == false && events.contains(ACCESS_GRANTED)); - if (shouldLog) { - try { - assert authentication.getAuthenticatedBy() != null; - final String authRealmName = authentication.getAuthenticatedBy().getName(); - final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); - enqueue(message("access_granted", action, user, roleNames, new Tuple(authRealmName, lookRealmName), indices(msg), - msg), "access_granted"); - } catch (final Exception e) { - logger.warn("failed to index audit event: [access_granted]", e); - } - } - } - - @Override - public void accessDenied(String requestId, Authentication authentication, String action, TransportMessage message, String[] roleNames) { - if (events.contains(ACCESS_DENIED) && (XPackUser.is(authentication.getUser()) == false)) { - try { - assert authentication.getAuthenticatedBy() != null; - final String authRealmName = authentication.getAuthenticatedBy().getName(); - final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); - enqueue(message("access_denied", action, authentication.getUser(), roleNames, new Tuple(authRealmName, lookRealmName), - indices(message), message), "access_denied"); - } catch (final Exception e) { - logger.warn("failed to index audit event: [access_denied]", e); - } - } - } - - @Override - public void tamperedRequest(String requestId, RestRequest request) { - if (events.contains(TAMPERED_REQUEST)) { - try { - enqueue(message("tampered_request", null, null, null, null, request), "tampered_request"); - } catch (Exception e) { - logger.warn("failed to index audit event: [tampered_request]", e); - } - } - } - - @Override - public void tamperedRequest(String requestId, String action, TransportMessage message) { - if (events.contains(TAMPERED_REQUEST)) { - try { - enqueue(message("tampered_request", action, (User) null, null, null, indices(message), message), "tampered_request"); - } catch (Exception e) { - logger.warn("failed to index audit event: [tampered_request]", e); - } - } - } - - @Override - public void tamperedRequest(String requestId, User user, String action, TransportMessage request) { - if (events.contains(TAMPERED_REQUEST) && XPackUser.is(user) == false) { - try { - enqueue(message("tampered_request", action, user, null, null, indices(request), request), "tampered_request"); - } catch (Exception e) { - logger.warn("failed to index audit event: [tampered_request]", e); - } - } - } - - @Override - public void connectionGranted(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { - if (events.contains(CONNECTION_GRANTED)) { - try { - enqueue(message("ip_filter", "connection_granted", inetAddress, profile, rule), "connection_granted"); - } catch (Exception e) { - logger.warn("failed to index audit event: [connection_granted]", e); - } - } - } - - @Override - public void connectionDenied(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { - if (events.contains(CONNECTION_DENIED)) { - try { - enqueue(message("ip_filter", "connection_denied", inetAddress, profile, rule), "connection_denied"); - } catch (Exception e) { - logger.warn("failed to index audit event: [connection_denied]", e); - } - } - } - - @Override - public void runAsGranted(String requestId, Authentication authentication, String action, TransportMessage message, String[] roleNames) { - if (events.contains(RUN_AS_GRANTED)) { - try { - assert authentication.getAuthenticatedBy() != null; - final String authRealmName = authentication.getAuthenticatedBy().getName(); - final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); - enqueue(message("run_as_granted", action, authentication.getUser(), roleNames, new Tuple<>(authRealmName, lookRealmName), - null, message), "run_as_granted"); - } catch (final Exception e) { - logger.warn("failed to index audit event: [run_as_granted]", e); - } - } - } - - @Override - public void runAsDenied(String requestId, Authentication authentication, String action, TransportMessage message, String[] roleNames) { - if (events.contains(RUN_AS_DENIED)) { - try { - assert authentication.getAuthenticatedBy() != null; - final String authRealmName = authentication.getAuthenticatedBy().getName(); - final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); - enqueue(message("run_as_denied", action, authentication.getUser(), roleNames, new Tuple<>(authRealmName, lookRealmName), - null, message), "run_as_denied"); - } catch (final Exception e) { - logger.warn("failed to index audit event: [run_as_denied]", e); - } - } - } - - @Override - public void runAsDenied(String requestId, Authentication authentication, RestRequest request, String[] roleNames) { - if (events.contains(RUN_AS_DENIED)) { - try { - assert authentication.getAuthenticatedBy() != null; - final String authRealmName = authentication.getAuthenticatedBy().getName(); - final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); - enqueue(message("run_as_denied", new Tuple<>(authRealmName, lookRealmName), authentication.getUser(), roleNames, request), - "run_as_denied"); - } catch (final Exception e) { - logger.warn("failed to index audit event: [run_as_denied]", e); - } - } - } - - private Message message(String type, @Nullable String action, @Nullable User user, @Nullable String[] roleNames, - @Nullable Tuple realms, @Nullable Set indices, TransportMessage message) - throws Exception { - - Message msg = new Message().start(); - common("transport", type, msg.builder); - originAttributes(message, msg.builder, clusterService.localNode(), threadPool.getThreadContext()); - - if (action != null) { - msg.builder.field(Field.ACTION, action); - } - addUserAndRealmFields(msg.builder, type, user, realms); - if (roleNames != null) { - msg.builder.array(Field.ROLE_NAMES, roleNames); - } - if (indices != null) { - msg.builder.array(Field.INDICES, indices.toArray(Strings.EMPTY_ARRAY)); - } - msg.builder.field(Field.REQUEST, message.getClass().getSimpleName()); - - return msg.end(); - } - - private void addUserAndRealmFields(XContentBuilder builder, String type, @Nullable User user, @Nullable Tuple realms) - throws IOException { - if (user != null) { - if (user.isRunAs()) { - if ("run_as_granted".equals(type) || "run_as_denied".equals(type)) { - builder.field(Field.PRINCIPAL, user.authenticatedUser().principal()); - builder.field(Field.RUN_AS_PRINCIPAL, user.principal()); - if (realms != null) { - // realms.v1() is the authenticating realm - builder.field(Field.REALM, realms.v1()); - // realms.v2() is the lookup realm - builder.field(Field.RUN_AS_REALM, realms.v2()); - } - } else { - // TODO: this doesn't make sense... - builder.field(Field.PRINCIPAL, user.principal()); - builder.field(Field.RUN_BY_PRINCIPAL, user.authenticatedUser().principal()); - if (realms != null) { - // realms.v2() is the lookup realm - builder.field(Field.REALM, realms.v2()); - // realms.v1() is the authenticating realm - builder.field(Field.RUN_BY_REALM, realms.v1()); - } - } - } else { - builder.field(Field.PRINCIPAL, user.principal()); - if (realms != null) { - // realms.v1() is the authenticating realm - builder.field(Field.REALM, realms.v1()); - } - } - } - } - - // FIXME - clean up the message generation - private Message message(String type, @Nullable String action, @Nullable AuthenticationToken token, - @Nullable String realm, @Nullable Set indices, TransportMessage message) throws Exception { - - Message msg = new Message().start(); - common("transport", type, msg.builder); - originAttributes(message, msg.builder, clusterService.localNode(), threadPool.getThreadContext()); - - if (action != null) { - msg.builder.field(Field.ACTION, action); - } - if (token != null) { - msg.builder.field(Field.PRINCIPAL, token.principal()); - } - if (realm != null) { - msg.builder.field(Field.REALM, realm); - } - if (indices != null) { - msg.builder.array(Field.INDICES, indices.toArray(Strings.EMPTY_ARRAY)); - } - msg.builder.field(Field.REQUEST, message.getClass().getSimpleName()); - - return msg.end(); - } - - private Message message(String type, @Nullable String action, @Nullable AuthenticationToken token, - @Nullable String realm, @Nullable Set indices, RestRequest request) throws Exception { - - Message msg = new Message().start(); - common("rest", type, msg.builder); - - if (action != null) { - msg.builder.field(Field.ACTION, action); - } - - if (token != null) { - msg.builder.field(Field.PRINCIPAL, token.principal()); - } - - if (realm != null) { - msg.builder.field(Field.REALM, realm); - } - if (indices != null) { - msg.builder.array(Field.INDICES, indices.toArray(Strings.EMPTY_ARRAY)); - } - if (includeRequestBody) { - msg.builder.field(Field.REQUEST_BODY, restRequestContent(request)); - } - msg.builder.field(Field.ORIGIN_TYPE, "rest"); - InetSocketAddress address = request.getHttpChannel().getRemoteAddress(); - if (address != null) { - msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(address.getAddress())); - } else { - msg.builder.field(Field.ORIGIN_ADDRESS, address); - } - msg.builder.field(Field.URI, request.uri()); - return msg.end(); - } - - private Message message(String type, @Nullable Tuple realms, @Nullable User user, @Nullable String[] roleNames, - RestRequest request) throws Exception { - - Message msg = new Message().start(); - common("rest", type, msg.builder); - - addUserAndRealmFields(msg.builder, type, user, realms); - if (roleNames != null) { - msg.builder.array(Field.ROLE_NAMES, roleNames); - } - if (includeRequestBody) { - msg.builder.field(Field.REQUEST_BODY, restRequestContent(request)); - } - msg.builder.field(Field.ORIGIN_TYPE, "rest"); - InetSocketAddress address = request.getHttpChannel().getRemoteAddress(); - if (address != null) { - msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(address.getAddress())); - } else { - msg.builder.field(Field.ORIGIN_ADDRESS, address); - } - msg.builder.field(Field.URI, request.uri()); - - return msg.end(); - } - - private Message message(String layer, String type, InetAddress originAddress, String profile, - SecurityIpFilterRule rule) throws IOException { - - Message msg = new Message().start(); - common(layer, type, msg.builder); - - msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(originAddress)); - msg.builder.field(Field.TRANSPORT_PROFILE, profile); - msg.builder.field(Field.RULE, rule); - - return msg.end(); - } - - private XContentBuilder common(String layer, String type, XContentBuilder builder) throws IOException { - builder.field(Field.NODE_NAME, nodeName); - builder.field(Field.NODE_HOST_NAME, nodeHostName); - builder.field(Field.NODE_HOST_ADDRESS, nodeHostAddress); - builder.field(Field.LAYER, layer); - builder.field(Field.TYPE, type); - - String opaqueId = threadPool.getThreadContext().getHeader(Task.X_OPAQUE_ID); - if (opaqueId != null) { - builder.field("opaque_id", opaqueId); - } - - return builder; - } - - private static XContentBuilder originAttributes(TransportMessage message, XContentBuilder builder, - DiscoveryNode localNode, ThreadContext threadContext) throws IOException { - - // first checking if the message originated in a rest call - InetSocketAddress restAddress = RemoteHostHeader.restRemoteAddress(threadContext); - if (restAddress != null) { - builder.field(Field.ORIGIN_TYPE, "rest"); - builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(restAddress.getAddress())); - return builder; - } - - // we'll see if was originated in a remote node - TransportAddress address = message.remoteAddress(); - if (address != null) { - builder.field(Field.ORIGIN_TYPE, "transport"); - builder.field(Field.ORIGIN_ADDRESS, - NetworkAddress.format(address.address().getAddress())); - return builder; - } - - // the call was originated locally on this node - builder.field(Field.ORIGIN_TYPE, "local_node"); - builder.field(Field.ORIGIN_ADDRESS, localNode.getHostAddress()); - return builder; - } - - void enqueue(Message message, String type) { - State currentState = state(); - if (currentState != State.STOPPING && currentState != State.STOPPED) { - boolean accepted = queueConsumer.offer(message); - if (!accepted) { - logger.warn("failed to index audit event: [{}]. internal queue is full, which may be caused by a high indexing rate or " + - "issue with the destination", type); - } - } - } - - // for testing to ensure we get the proper timestamp and index name... - Message peek() { - return queueConsumer.peek(); - } - - Client initializeRemoteClient(Settings settings, Logger logger) { - Settings clientSettings = REMOTE_CLIENT_SETTINGS.get(settings); - List hosts = clientSettings.getAsList("hosts"); - if (hosts.isEmpty()) { - throw new ElasticsearchException("missing required setting " + - "[" + REMOTE_CLIENT_SETTINGS.getKey() + ".hosts] for remote audit log indexing"); - } - - final int processors = EsExecutors.PROCESSORS_SETTING.get(settings); - if (EsExecutors.PROCESSORS_SETTING.exists(clientSettings)) { - final int clientProcessors = EsExecutors.PROCESSORS_SETTING.get(clientSettings); - if (clientProcessors != processors) { - final String message = String.format( - Locale.ROOT, - "explicit processor setting [%d] for audit trail remote client does not match inherited processor setting [%d]", - clientProcessors, - processors); - throw new IllegalStateException(message); - } - } - - if (clientSettings.get("cluster.name", "").isEmpty()) { - throw new ElasticsearchException("missing required setting " + - "[" + REMOTE_CLIENT_SETTINGS.getKey() + ".cluster.name] for remote audit log indexing"); - } - - List> hostPortPairs = new ArrayList<>(); - - for (String host : hosts) { - List hostPort = Arrays.asList(host.trim().split(":")); - if (hostPort.size() != 1 && hostPort.size() != 2) { - logger.warn("invalid host:port specified: [{}] for setting [{}.hosts]", REMOTE_CLIENT_SETTINGS.getKey(), host); - } - hostPortPairs.add(new Tuple<>(hostPort.get(0), hostPort.size() == 2 ? Integer.valueOf(hostPort.get(1)) : 9300)); - } - - if (hostPortPairs.size() == 0) { - throw new ElasticsearchException("no valid host:port pairs specified for setting [" - + REMOTE_CLIENT_SETTINGS.getKey() + ".hosts]"); - } - final Settings theClientSetting = - Settings.builder() - .put(clientSettings.filter((s) -> s.startsWith("hosts") == false)) // hosts is not a valid setting - .put(EsExecutors.PROCESSORS_SETTING.getKey(), processors) - .build(); - final TransportClient transportClient = new TransportClient(Settings.builder() - .put("node.name", DEFAULT_CLIENT_NAME + "-" + Node.NODE_NAME_SETTING.get(settings)) - .put(theClientSetting).build(), Settings.EMPTY, remoteTransportClientPlugins(), null) {}; - for (Tuple pair : hostPortPairs) { - try { - transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName(pair.v1()), pair.v2())); - } catch (UnknownHostException e) { - throw new ElasticsearchException("could not find host {}", e, pair.v1()); - } - } - - logger.info("forwarding audit events to remote cluster [{}] using hosts [{}]", - clientSettings.get("cluster.name", ""), hostPortPairs.toString()); - return transportClient; - } - - public static Settings customAuditIndexSettings(Settings nodeSettings, Logger logger) { - final Settings newSettings = Settings.builder() - .put(INDEX_SETTINGS.get(nodeSettings), false) - .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX) - .build(); - if (newSettings.names().isEmpty()) { - return Settings.EMPTY; - } - - // Filter out forbidden setting - return Settings.builder().put(newSettings.filter(name -> { - if (FORBIDDEN_INDEX_SETTING.equals(name)) { - logger.warn("overriding the default [{}} setting is forbidden. ignoring...", name); - return false; - } - return true; - })).build(); - } - - private void putTemplate(Settings customSettings, Consumer consumer) { - try { - final PutIndexTemplateRequest request = getPutIndexTemplateRequest(customSettings); - - client.admin().indices().putTemplate(request, ActionListener.wrap((response) -> { - if (response.isAcknowledged()) { - // now we may need to update the mappings of the current index - client.admin().cluster().prepareState().execute(ActionListener.wrap( - stateResponse -> updateCurrentIndexMappingsIfNecessary(stateResponse.getState()), - consumer)); - } else { - consumer.accept(new IllegalStateException("failed to put index template for audit logging")); - } - }, consumer)); - } catch (Exception e) { - logger.debug("unexpected exception while putting index template", e); - consumer.accept(e); - } - } - - private PutIndexTemplateRequest getPutIndexTemplateRequest(Settings customSettings) { - final byte[] template = TemplateUtils.loadTemplate("/" + INDEX_TEMPLATE_NAME + ".json", - Version.CURRENT.toString(), SecurityIndexManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); - final PutIndexTemplateRequest request = new PutIndexTemplateRequest(INDEX_TEMPLATE_NAME).source(template, XContentType.JSON); - if (customSettings != null && customSettings.names().size() > 0) { - Settings updatedSettings = Settings.builder() - .put(request.settings()) - .put(customSettings) - .build(); - request.settings(updatedSettings); - } - return request; - } - - private void putAuditIndexMappings(String index, String mappings, ActionListener listener) { - client.admin().indices().preparePutMapping(index) - .setType(DOC_TYPE) - .setSource(mappings, XContentType.JSON) - .execute(ActionListener.wrap((response) -> { - if (response.isAcknowledged()) { - listener.onResponse(null); - } else { - listener.onFailure(new IllegalStateException("failed to put mappings for audit logging index [" + index + "]")); - } - }, - listener::onFailure)); - } - - BlockingQueue createQueue(int maxQueueSize) { - return new LinkedBlockingQueue<>(maxQueueSize); - } - - private void initializeBulkProcessor() { - - final int bulkSize = BULK_SIZE_SETTING.get(settings); - final TimeValue interval = FLUSH_TIMEOUT_SETTING.get(settings); - - bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) { - } - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - if (response.hasFailures()) { - logger.info("failed to bulk index audit events: [{}]", response.buildFailureMessage()); - } - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Throwable failure) { - logger.error(new ParameterizedMessage("failed to bulk index audit events: [{}]", failure.getMessage()), failure); - } - }).setBulkActions(bulkSize) - .setFlushInterval(interval) - .setConcurrentRequests(1) - .build(); - } - - // method for testing to allow different plugins such as mock transport... - List> remoteTransportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class); - } - - public static void registerSettings(List> settings) { - settings.add(INDEX_SETTINGS); - settings.add(EXCLUDE_EVENT_SETTINGS); - settings.add(INCLUDE_EVENT_SETTINGS); - settings.add(ROLLOVER_SETTING); - settings.add(BULK_SIZE_SETTING); - settings.add(FLUSH_TIMEOUT_SETTING); - settings.add(QUEUE_SIZE_SETTING); - settings.add(REMOTE_CLIENT_SETTINGS); - settings.add(INCLUDE_REQUEST_BODY); - } - - private final class QueueConsumer extends Thread implements Closeable { - private final AtomicBoolean open = new AtomicBoolean(true); - private final BlockingQueue eventQueue; - private final Message shutdownSentinelMessage; - - QueueConsumer(String name, BlockingQueue eventQueue) { - super(name); - this.eventQueue = eventQueue; - try { - shutdownSentinelMessage = new Message(); - } catch (IOException e) { - throw new AssertionError(e); - } - } - - @Override - public void close() { - if (open.compareAndSet(true, false)) { - try { - eventQueue.put(shutdownSentinelMessage); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } - - @Override - public void run() { - while (open.get()) { - try { - final Message message = eventQueue.take(); - if (message == shutdownSentinelMessage || open.get() == false) { - break; - } - final IndexRequest indexRequest = client.prepareIndex() - .setIndex(resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, message.timestamp, rollover)) - .setType(DOC_TYPE).setSource(message.builder).request(); - bulkProcessor.add(indexRequest); - } catch (InterruptedException e) { - logger.debug("index audit queue consumer interrupted", e); - close(); - break; - } catch (Exception e) { - // log the exception and keep going - logger.warn("failed to index audit message from queue", e); - } - } - eventQueue.clear(); - } - - public boolean offer(Message message) { - if (open.get()) { - return eventQueue.offer(message); - } - return false; - } - - public Message peek() { - return eventQueue.peek(); - } - } - - static class Message { - - final DateTime timestamp; - final XContentBuilder builder; - - Message() throws IOException { - this.timestamp = DateTime.now(DateTimeZone.UTC); - this.builder = XContentFactory.jsonBuilder(); - } - - Message start() throws IOException { - builder.startObject(); - builder.timeField(Field.TIMESTAMP, timestamp); - return this; - } - - Message end() throws IOException { - builder.endObject(); - return this; - } - } - - interface Field { - String TIMESTAMP = "@timestamp"; - String NODE_NAME = "node_name"; - String NODE_HOST_NAME = "node_host_name"; - String NODE_HOST_ADDRESS = "node_host_address"; - String LAYER = "layer"; - String TYPE = "event_type"; - String ORIGIN_ADDRESS = "origin_address"; - String ORIGIN_TYPE = "origin_type"; - String PRINCIPAL = "principal"; - String ROLE_NAMES = "roles"; - String RUN_AS_PRINCIPAL = "run_as_principal"; - String RUN_AS_REALM = "run_as_realm"; - String RUN_BY_PRINCIPAL = "run_by_principal"; - String RUN_BY_REALM = "run_by_realm"; - String ACTION = "action"; - String INDICES = "indices"; - String REQUEST = "request"; - String REQUEST_BODY = "request_body"; - String URI = "uri"; - String REALM = "realm"; - String TRANSPORT_PROFILE = "transport_profile"; - String RULE = "rule"; - } - - public enum State { - INITIALIZED, - STARTING, - STARTED, - STOPPING, - STOPPED - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java index 2944cd3134a79..a8b2bf4b5350d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; @@ -97,12 +98,6 @@ public void testUsage() throws Exception { settings.put("xpack.security.transport.ssl.enabled", transportSSLEnabled); final boolean auditingEnabled = randomBoolean(); settings.put(XPackSettings.AUDIT_ENABLED.getKey(), auditingEnabled); - final String[] auditOutputs = randomFrom( - new String[] { "logfile" }, - new String[] { "index" }, - new String[] { "logfile", "index" } - ); - settings.putList(Security.AUDIT_OUTPUTS_SETTING.getKey(), auditOutputs); final boolean httpIpFilterEnabled = randomBoolean(); final boolean transportIPFilterEnabled = randomBoolean(); when(ipFilter.usageStats()) @@ -192,7 +187,11 @@ public void testUsage() throws Exception { // auditing assertThat(source.getValue("audit.enabled"), is(auditingEnabled)); - assertThat(source.getValue("audit.outputs"), contains(auditOutputs)); + if (auditingEnabled) { + assertThat(source.getValue("audit.outputs"), contains(LoggingAuditTrail.NAME)); + } else { + assertThat(source.getValue("audit.outputs"), is(nullValue())); + } // ip filter assertThat(source.getValue("ipfilter.http.enabled"), is(httpIpFilterEnabled)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecuritySettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecuritySettingsTests.java deleted file mode 100644 index c928d5b46b654..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecuritySettingsTests.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; - - -import static org.hamcrest.Matchers.containsString; - -public class SecuritySettingsTests extends ESTestCase { - - public void testValidAutoCreateIndex() { - Security.validateAutoCreateIndex(Settings.EMPTY); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", true).build()); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", false).build()); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security,.security-6").build()); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security*").build()); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "*s*").build()); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".s*").build()); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "foo").build()); - Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security_audit_log*").build()); - - Security.validateAutoCreateIndex(Settings.builder() - .put("action.auto_create_index", ".security,.security-6") - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .build()); - - try { - Security.validateAutoCreateIndex(Settings.builder() - .put("action.auto_create_index", ".security,.security-6") - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), randomFrom("index", "logfile,index")) - .build()); - fail("IllegalArgumentException expected"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString(IndexAuditTrailField.INDEX_NAME_PREFIX)); - } - - Security.validateAutoCreateIndex(Settings.builder() - .put("action.auto_create_index", ".security_audit_log*,.security,.security-6") - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), randomFrom("index", "logfile,index")) - .build()); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 8674a5b295085..1f98cd6660274 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.audit.AuditTrailService; -import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; import org.elasticsearch.xpack.security.authc.Realms; import org.hamcrest.Matchers; @@ -180,37 +179,6 @@ public void testDisabledByDefault() throws Exception { assertEquals(0, auditTrailService.getAuditTrails().size()); } - public void testIndexAuditTrail() throws Exception { - Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index").build(); - Collection components = createComponents(settings); - AuditTrailService service = findComponent(AuditTrailService.class, components); - assertNotNull(service); - assertEquals(1, service.getAuditTrails().size()); - assertEquals(IndexAuditTrail.NAME, service.getAuditTrails().get(0).name()); - } - - public void testIndexAndLoggingAuditTrail() throws Exception { - Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index,logfile").build(); - Collection components = createComponents(settings); - AuditTrailService service = findComponent(AuditTrailService.class, components); - assertNotNull(service); - assertEquals(2, service.getAuditTrails().size()); - assertEquals(IndexAuditTrail.NAME, service.getAuditTrails().get(0).name()); - assertEquals(LoggingAuditTrail.NAME, service.getAuditTrails().get(1).name()); - } - - public void testUnknownOutput() { - Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "foo").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createComponents(settings)); - assertEquals("Unknown audit trail output [foo]", e.getMessage()); - } - public void testHttpSettingDefaults() throws Exception { final Settings defaultSettings = Security.additionalSettings(Settings.EMPTY, true, false); assertThat(SecurityField.NAME4, equalTo(NetworkModule.TRANSPORT_TYPE_SETTING.get(defaultSettings))); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java deleted file mode 100644 index 022328f426fa3..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.audit.index; - -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Requests; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.xpack.core.security.ScrollHelper; -import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; -import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; -import org.elasticsearch.xpack.security.audit.AuditTrail; -import org.elasticsearch.xpack.security.audit.AuditTrailService; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.iterableWithSize; -import static org.hamcrest.Matchers.nullValue; - -public class AuditTrailTests extends SecurityIntegTestCase { - - private static final String AUTHENTICATE_USER = "http_user"; - private static final String EXECUTE_USER = "exec_user"; - private static final String ROLE_CAN_RUN_AS = "can_run_as"; - private static final String ROLES = ROLE_CAN_RUN_AS + ":\n" + " run_as: [ '" + EXECUTE_USER + "' ]\n"; - - @Override - protected boolean addMockHttpTransport() { - return false; // enable http - } - - @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("xpack.security.audit.enabled", true) - .put("xpack.security.audit.outputs", "index") - .putList("xpack.security.audit.index.events.include", "access_denied", "authentication_failed", "run_as_denied") - .build(); - } - - @Override - public String configRoles() { - return ROLES + super.configRoles(); - } - - @Override - public String configUsers() { - return super.configUsers() - + AUTHENTICATE_USER + ":" + SecuritySettingsSource.TEST_PASSWORD_HASHED + "\n" - + EXECUTE_USER + ":xx_no_password_xx\n"; - } - - @Override - public String configUsersRoles() { - return super.configUsersRoles() - + ROLE_CAN_RUN_AS + ":" + AUTHENTICATE_USER + "\n" - + "monitoring_user:" + EXECUTE_USER; - } - - @Override - public boolean transportSSLEnabled() { - return true; - } - - public void testAuditAccessDeniedWithRunAsUser() throws Exception { - try { - Request request = new Request("GET", "/.security/_search"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(AUTHENTICATE_USER, TEST_PASSWORD_SECURE_STRING)); - options.addHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, EXECUTE_USER); - request.setOptions(options); - getRestClient().performRequest(request); - fail("request should have failed"); - } catch (final ResponseException e) { - assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); - } - - final Collection> events = waitForAuditEvents(); - - assertThat(events, iterableWithSize(1)); - final Map event = events.iterator().next(); - assertThat(event.get(IndexAuditTrail.Field.TYPE), equalTo("access_denied")); - assertThat((List) event.get(IndexAuditTrail.Field.INDICES), containsInAnyOrder(".security")); - assertThat(event.get(IndexAuditTrail.Field.PRINCIPAL), equalTo(EXECUTE_USER)); - assertThat(event.get(IndexAuditTrail.Field.RUN_BY_PRINCIPAL), equalTo(AUTHENTICATE_USER)); - } - - - public void testAuditRunAsDeniedEmptyUser() throws Exception { - try { - Request request = new Request("GET", "/.security/_search"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(AUTHENTICATE_USER, TEST_PASSWORD_SECURE_STRING)); - options.addHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, ""); - request.setOptions(options); - getRestClient().performRequest(request); - fail("request should have failed"); - } catch (final ResponseException e) { - assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); - } - - final Collection> events = waitForAuditEvents(); - - assertThat(events, iterableWithSize(1)); - final Map event = events.iterator().next(); - assertThat(event.get(IndexAuditTrail.Field.TYPE), equalTo("run_as_denied")); - assertThat(event.get(IndexAuditTrail.Field.PRINCIPAL), equalTo(AUTHENTICATE_USER)); - assertThat(event.get(IndexAuditTrail.Field.RUN_AS_PRINCIPAL), equalTo("")); - assertThat(event.get(IndexAuditTrail.Field.REALM), equalTo("file")); - assertThat(event.get(IndexAuditTrail.Field.RUN_AS_REALM), nullValue()); - } - - private Collection> waitForAuditEvents() throws InterruptedException { - waitForAuditTrailToBeWritten(); - final AtomicReference>> eventsRef = new AtomicReference<>(); - awaitBusy(() -> { - try { - final Collection> events = getAuditEvents(); - eventsRef.set(events); - return events.size() > 0; - } catch (final Exception e) { - throw new RuntimeException(e); - } - }); - - return eventsRef.get(); - } - private Collection> getAuditEvents() throws Exception { - final Client client = client(); - final DateTime now = new DateTime(DateTimeZone.UTC); - final String indexName = IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now, IndexNameResolver.Rollover.DAILY); - - assertTrue(awaitBusy(() -> indexExists(client, indexName), 5, TimeUnit.SECONDS)); - - client.admin().indices().refresh(Requests.refreshRequest(indexName)).get(); - - final SearchRequest request = client.prepareSearch(indexName) - .setScroll(TimeValue.timeValueMinutes(10L)) - .setTypes(IndexAuditTrail.DOC_TYPE) - .setQuery(QueryBuilders.matchAllQuery()) - .setSize(1000) - .setFetchSource(true) - .request(); - request.indicesOptions().ignoreUnavailable(); - - final PlainActionFuture>> listener = new PlainActionFuture<>(); - ScrollHelper.fetchAllByEntity(client, request, listener, SearchHit::getSourceAsMap); - - return listener.get(); - } - - private boolean indexExists(Client client, String indexName) { - try { - final ActionFuture future = client.admin().indices().exists(Requests.indicesExistsRequest(indexName)); - return future.get().isExists(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException("Failed to check if " + indexName + " exists", e); - } - } - - private void waitForAuditTrailToBeWritten() throws InterruptedException { - final AuditTrailService auditTrailService = (AuditTrailService) internalCluster().getInstance(AuditTrail.class); - assertThat(auditTrailService.getAuditTrails(), iterableWithSize(1)); - - final IndexAuditTrail indexAuditTrail = (IndexAuditTrail) auditTrailService.getAuditTrails().get(0); - assertTrue(awaitBusy(() -> indexAuditTrail.peek() == null, 5, TimeUnit.SECONDS)); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java deleted file mode 100644 index 45e54cb2d5464..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.audit.index; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.FilterClient; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportMessage; -import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; -import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State; -import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; -import org.junit.After; -import org.junit.Before; - -import java.net.InetAddress; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -public class IndexAuditTrailMutedTests extends ESTestCase { - - private Client client; - private TransportClient transportClient; - private ThreadPool threadPool; - private ClusterService clusterService; - private IndexAuditTrail auditTrail; - - private AtomicBoolean messageEnqueued; - private AtomicBoolean clientCalled; - - @Before - public void setup() { - DiscoveryNode localNode = mock(DiscoveryNode.class); - when(localNode.getHostAddress()).thenReturn(buildNewFakeTransportAddress().toString()); - clusterService = mock(ClusterService.class); - when(clusterService.localNode()).thenReturn(localNode); - - threadPool = new TestThreadPool("index audit trail tests"); - transportClient = new MockTransportClient(Settings.EMPTY); - clientCalled = new AtomicBoolean(false); - class IClient extends FilterClient { - IClient(Client transportClient){ - super(Settings.EMPTY, threadPool, transportClient); - } - - @Override - protected - void doExecute(Action action, Request request, ActionListener listener) { - clientCalled.set(true); - } - } - client = new IClient(transportClient); - messageEnqueued = new AtomicBoolean(false); - } - - @After - public void stop() { - if (auditTrail != null) { - auditTrail.stop(); - } - if (transportClient != null) { - transportClient.close(); - } - threadPool.shutdown(); - } - - public void testAnonymousAccessDeniedMutedTransport() { - createAuditTrail(new String[] { "anonymous_access_denied" }); - TransportMessage message = mock(TransportMessage.class); - auditTrail.anonymousAccessDenied(randomAlphaOfLengthBetween(6, 12), "_action", message); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(message); - } - - public void testAnonymousAccessDeniedMutedRest() { - createAuditTrail(new String[] { "anonymous_access_denied" }); - RestRequest restRequest = mock(RestRequest.class); - auditTrail.anonymousAccessDenied(randomAlphaOfLengthBetween(6, 12), restRequest); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(restRequest); - } - - public void testAuthenticationFailedMutedTransport() { - createAuditTrail(new String[] { "authentication_failed" }); - TransportMessage message = mock(TransportMessage.class); - AuthenticationToken token = mock(AuthenticationToken.class); - - // without realm - auditTrail.authenticationFailed(randomAlphaOfLengthBetween(6, 12), token, "_action", message); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - // without the token - auditTrail.authenticationFailed(randomAlphaOfLengthBetween(6, 12), "_action", message); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(token, message); - } - - public void testAuthenticationFailedMutedRest() { - createAuditTrail(new String[] { "authentication_failed" }); - RestRequest restRequest = mock(RestRequest.class); - AuthenticationToken token = mock(AuthenticationToken.class); - - // without the realm - auditTrail.authenticationFailed(randomAlphaOfLengthBetween(6, 12), token, restRequest); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - // without the token - auditTrail.authenticationFailed(randomAlphaOfLengthBetween(6, 12), restRequest); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(token, restRequest); - } - - public void testAuthenticationFailedRealmMutedTransport() { - createAuditTrail(new String[] { "realm_authentication_failed" }); - TransportMessage message = mock(TransportMessage.class); - AuthenticationToken token = mock(AuthenticationToken.class); - - // with realm - auditTrail.authenticationFailed(randomAlphaOfLengthBetween(6, 12), randomAlphaOfLengthBetween(2, 10), token, "_action", message); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(token, message); - } - - public void testAuthenticationFailedRealmMutedRest() { - createAuditTrail(new String[]{"realm_authentication_failed"}); - RestRequest restRequest = mock(RestRequest.class); - AuthenticationToken token = mock(AuthenticationToken.class); - - // with realm - auditTrail.authenticationFailed(randomAlphaOfLengthBetween(6, 12), randomAlphaOfLengthBetween(2, 10), token, restRequest); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - verifyZeroInteractions(token, restRequest); - } - - public void testAccessGrantedMuted() { - createAuditTrail(new String[] { "access_granted" }); - final TransportMessage message = mock(TransportMessage.class); - final Authentication authentication = mock(Authentication.class); - auditTrail.accessGranted(randomAlphaOfLengthBetween(6, 12), authentication, randomAlphaOfLengthBetween(6, 40), message, - new String[] { "role" }); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - verifyZeroInteractions(message); - } - - public void testSystemAccessGrantedMuted() { - createAuditTrail(randomFrom(new String[] { "access_granted" }, null)); - final TransportMessage message = mock(TransportMessage.class); - final Authentication authentication = new Authentication(SystemUser.INSTANCE, new RealmRef(null, null, null), null); - auditTrail.accessGranted(randomAlphaOfLengthBetween(6, 12), authentication, "internal:foo", message, new String[] { "role" }); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(message); - } - - public void testAccessDeniedMuted() { - createAuditTrail(new String[] { "access_denied" }); - final TransportMessage message = mock(TransportMessage.class); - final Authentication authentication = mock(Authentication.class); - auditTrail.accessDenied(randomAlphaOfLengthBetween(6, 12), authentication, randomAlphaOfLengthBetween(6, 40), message, - new String[] { "role" }); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(message, authentication); - } - - public void testTamperedRequestMuted() { - createAuditTrail(new String[] { "tampered_request" }); - TransportMessage message = mock(TransportMessage.class); - User user = mock(User.class); - - // with user - auditTrail.tamperedRequest(randomAlphaOfLengthBetween(6, 12), user, randomAlphaOfLengthBetween(6, 40), message); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - // without user - auditTrail.tamperedRequest(randomAlphaOfLengthBetween(6, 12), randomAlphaOfLengthBetween(6, 40), message); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(message, user); - } - - public void testConnectionGrantedMuted() { - createAuditTrail(new String[] { "connection_granted" }); - InetAddress address = mock(InetAddress.class); - SecurityIpFilterRule rule = mock(SecurityIpFilterRule.class); - - auditTrail.connectionGranted(address, randomAlphaOfLengthBetween(1, 12), rule); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(address, rule); - } - - public void testConnectionDeniedMuted() { - createAuditTrail(new String[] { "connection_denied" }); - InetAddress address = mock(InetAddress.class); - SecurityIpFilterRule rule = mock(SecurityIpFilterRule.class); - - auditTrail.connectionDenied(address, randomAlphaOfLengthBetween(1, 12), rule); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(address, rule); - } - - public void testRunAsGrantedMuted() { - createAuditTrail(new String[] { "run_as_granted" }); - TransportMessage message = mock(TransportMessage.class); - Authentication authentication = mock(Authentication.class); - - auditTrail.runAsGranted(randomAlphaOfLengthBetween(6, 12), authentication, randomAlphaOfLengthBetween(6, 40), message, - new String[] { "role" }); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(message, authentication); - } - - public void testRunAsDeniedMuted() { - createAuditTrail(new String[] { "run_as_denied" }); - TransportMessage message = mock(TransportMessage.class); - Authentication authentication = mock(Authentication.class); - - auditTrail.runAsDenied(randomAlphaOfLengthBetween(6, 12), authentication, randomAlphaOfLengthBetween(6, 40), message, - new String[] { "role" }); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(message, authentication); - } - - public void testAuthenticationSuccessRest() { - createAuditTrail(new String[] { "authentication_success" }); - RestRequest restRequest = mock(RestRequest.class); - User user = mock(User.class); - String realm = "_realm"; - - auditTrail.authenticationSuccess(randomAlphaOfLengthBetween(6, 12), realm, user, restRequest); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(restRequest); - } - - public void testAuthenticationSuccessTransport() { - createAuditTrail(new String[] { "authentication_success" }); - TransportMessage message = mock(TransportMessage.class); - User user = mock(User.class); - String realm = "_realm"; - auditTrail.authenticationSuccess(randomAlphaOfLengthBetween(6, 12), realm, user, randomAlphaOfLengthBetween(6, 40), message); - assertThat(messageEnqueued.get(), is(false)); - assertThat(clientCalled.get(), is(false)); - - verifyZeroInteractions(message, user); - } - - IndexAuditTrail createAuditTrail(String[] excludes) { - Settings settings = IndexAuditTrailTests.levelSettings(null, excludes); - auditTrail = new IndexAuditTrail(settings, client, threadPool, clusterService) { - @Override - void updateCurrentIndexMappingsIfNecessary(ClusterState state) { - // skip stuff so we don't have to stub out unnecessary client activities and cluster state - innerStart(); - } - - @Override - BlockingQueue createQueue(int maxQueueSize) { - return new LinkedBlockingQueue(maxQueueSize) { - @Override - public boolean offer(Message message) { - messageEnqueued.set(true); - return super.offer(message); - } - }; - } - }; - auditTrail.start(); - assertThat(auditTrail.state(), is(State.STARTED)); - return auditTrail; - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java deleted file mode 100644 index 2f910658b3569..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ /dev/null @@ -1,996 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.audit.index; - -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; -import org.elasticsearch.http.HttpChannel; -import org.elasticsearch.plugins.MetaDataUpgrader; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.discovery.TestZenDiscovery; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportInfo; -import org.elasticsearch.transport.TransportMessage; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; -import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; -import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.LocalStateSecurity; -import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.Message; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import org.elasticsearch.xpack.security.transport.filter.IPFilter; -import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.ISODateTimeFormat; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - -import static java.util.Collections.emptyMap; -import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -import static org.elasticsearch.test.InternalTestCluster.clusterName; -import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.DAILY; -import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.HOURLY; -import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.MONTHLY; -import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.WEEKLY; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - - -@ESIntegTestCase.ClusterScope(scope = SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class IndexAuditTrailTests extends SecurityIntegTestCase { - public static final String SECOND_CLUSTER_NODE_PREFIX = "remote_" + SUITE_CLUSTER_NODE_PREFIX; - - private static boolean remoteIndexing; - private static boolean useSSL; - private static InternalTestCluster remoteCluster; - private static Settings remoteSettings; - private static int numShards = -1; - private static int numReplicas = -1; - - private TransportAddress remoteAddress = buildNewFakeTransportAddress(); - private TransportAddress localAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 0); - private IndexNameResolver.Rollover rollover; - private IndexAuditTrail auditor; - private SetOnce enqueuedMessage; - private ThreadPool threadPool; - private boolean includeRequestBody; - - @BeforeClass - public static void configureBeforeClass() { - useSSL = randomBoolean(); - remoteIndexing = randomBoolean(); - if (remoteIndexing == false) { - remoteSettings = Settings.EMPTY; - } - } - - @AfterClass - public static void cleanupAfterTest() { - if (remoteCluster != null) { - remoteCluster.close(); - remoteCluster = null; - - } - remoteSettings = null; - } - - @Override - protected boolean transportSSLEnabled() { - return useSSL; - } - - @Override - public Settings nodeSettings(int nodeOrdinal) { - if (numShards == -1) { - numShards = numberOfShards(); - } - if (numReplicas == -1) { - numReplicas = numberOfReplicas(); - } - - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("xpack.security.audit.index.settings.index.number_of_shards", numShards) - .put("xpack.security.audit.index.settings.index.number_of_replicas", numReplicas) - .build(); - } - - @Before - public void initializeRemoteClusterIfNecessary() throws Exception { - if (remoteIndexing == false) { - logger.info("--> remote indexing disabled."); - return; - } - - if (remoteCluster != null) { - return; - } - - // create another cluster - String cluster2Name = clusterName(Scope.SUITE.name(), randomLong()); - - // Setup a second test cluster with randomization for number of nodes, security enabled, and SSL - final int numNodes = randomIntBetween(1, 2); - final boolean useSecurity = randomBoolean(); - final boolean remoteUseSSL = useSecurity && useSSL; - logger.info("--> remote indexing enabled. security enabled: [{}], SSL enabled: [{}], nodes: [{}]", useSecurity, useSSL, - numNodes); - SecuritySettingsSource cluster2SettingsSource = - new SecuritySettingsSource(useSSL, createTempDir(), Scope.SUITE) { - @Override - public Settings nodeSettings(int nodeOrdinal) { - Settings.Builder builder = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "file") - .putList(SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey()) - .put(TestZenDiscovery.USE_ZEN2.getKey(), getUseZen2()) - .put("xpack.security.audit.index.settings.index.number_of_shards", numShards) - .put("xpack.security.audit.index.settings.index.number_of_replicas", numReplicas) - // Disable native ML autodetect_process as the c++ controller won't be available -// .put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false) - .put(XPackSettings.SECURITY_ENABLED.getKey(), useSecurity); - String transport = builder.get(NetworkModule.TRANSPORT_TYPE_KEY); - if (useSecurity == false && (transport == null || SecurityField.NAME4.equals(transport) - || SecurityField.NIO.equals(transport))) { - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); - } - return builder.build(); - } - - @Override - public Settings transportClientSettings() { - if (useSecurity) { - return super.transportClientSettings(); - } else { - Settings.Builder builder = Settings.builder() - .put(XPackSettings.SECURITY_ENABLED.getKey(), false) - .put(super.transportClientSettings()); - if (builder.get(NetworkModule.TRANSPORT_TYPE_KEY) == null) { - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); - } - return builder.build(); - } - } - - @Override - protected void addDefaultSecurityTransportType(Settings.Builder builder, Settings settings) { - if (useSecurity) { - super.addDefaultSecurityTransportType(builder, settings); - } - } - }; - - - Set> mockPlugins = new HashSet<>(getMockPlugins()); - if (useSecurity == false) { - mockPlugins.add(getTestTransportPlugin()); - } - remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, cluster2Name, - cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, mockPlugins, - useSecurity ? getClientWrapper() : Function.identity()); - remoteCluster.beforeTest(random(), 0.5); - - NodesInfoResponse response = remoteCluster.client().admin().cluster().prepareNodesInfo().execute().actionGet(); - TransportInfo info = response.getNodes().get(0).getTransport(); - TransportAddress inet = info.address().publishAddress(); - - Settings.Builder builder = Settings.builder() - .put("xpack.security.audit.index.client." + XPackSettings.SECURITY_ENABLED.getKey(), useSecurity) - .put(remoteSettings(NetworkAddress.format(inet.address().getAddress()), inet.address().getPort(), cluster2Name)) - .put("xpack.security.audit.index.client.xpack.security.user", SecuritySettingsSource.TEST_USER_NAME + ":" + - SecuritySettingsSourceField.TEST_PASSWORD); - - if (remoteUseSSL) { - cluster2SettingsSource.addClientSSLSettings(builder, "xpack.security.audit.index.client.xpack.security.transport."); - builder.put("xpack.security.audit.index.client.xpack.security.transport.ssl.enabled", true); - } - if (useSecurity == false && builder.get(NetworkModule.TRANSPORT_TYPE_KEY) == null) { - builder.put("xpack.security.audit.index.client." + NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); - } - remoteSettings = builder.build(); - } - - @After - public void afterTest() { - if (threadPool != null) { - threadPool.shutdown(); - } - if (auditor != null) { - auditor.stop(); - } - - if (remoteCluster != null) { - remoteCluster.wipe(excludeTemplates()); - } - } - - @Override - protected Set excludeTemplates() { - return Sets.newHashSet(SecurityIndexManager.SECURITY_TEMPLATE_NAME, IndexAuditTrail.INDEX_TEMPLATE_NAME); - } - - @Override - protected int maximumNumberOfShards() { - return 3; - } - - private Settings commonSettings(IndexNameResolver.Rollover rollover) { - return Settings.builder() - .put("xpack.security.audit.enabled", true) - .put("xpack.security.audit.outputs", "index, logfile") - .put("xpack.security.audit.index.bulk_size", 1) - .put("xpack.security.audit.index.flush_interval", "1ms") - .put("xpack.security.audit.index.rollover", rollover.name().toLowerCase(Locale.ENGLISH)) - .put("xpack.security.audit.index.settings.index.number_of_shards", numShards) - .put("xpack.security.audit.index.settings.index.number_of_replicas", numReplicas) - .build(); - } - - static Settings remoteSettings(String address, int port, String clusterName) { - return Settings.builder() - .put("xpack.security.audit.index.client.hosts", address + ":" + port) - .put("xpack.security.audit.index.client.cluster.name", clusterName) - .build(); - } - - static Settings levelSettings(String[] includes, String[] excludes) { - Settings.Builder builder = Settings.builder(); - if (includes != null) { - builder.putList("xpack.security.audit.index.events.include", includes); - } - if (excludes != null) { - builder.putList("xpack.security.audit.index.events.exclude", excludes); - } - return builder.build(); - } - - private Settings settings(IndexNameResolver.Rollover rollover, String[] includes, String[] excludes) { - Settings.Builder builder = Settings.builder(); - builder.put(levelSettings(includes, excludes)); - builder.put(commonSettings(rollover)); - builder.put("xpack.security.audit.index.events.emit_request_body", includeRequestBody); - return builder.build(); - } - - private Client getClient() { - return remoteIndexing ? remoteCluster.client() : client(); - } - - private void initialize() throws Exception { - initialize(null, null); - } - - private void initialize(String[] includes, String[] excludes) throws Exception { - initialize(includes, excludes, Settings.EMPTY); - } - - private void initialize(final String[] includes, final String[] excludes, final Settings additionalSettings) throws Exception { - rollover = randomFrom(HOURLY, DAILY, WEEKLY, MONTHLY); - includeRequestBody = randomBoolean(); - Settings.Builder builder = Settings.builder(); - if (remoteIndexing) { - builder.put(remoteSettings); - } - builder.put(settings(rollover, includes, excludes)).put(additionalSettings).build(); - // IndexAuditTrail should ignore secure settings - // they are merged on the master node creating the audit index - if (randomBoolean()) { - MockSecureSettings ignored = new MockSecureSettings(); - if (randomBoolean()) { - ignored.setString(KeyStoreWrapper.SEED_SETTING.getKey(), "non-empty-secure-settings"); - } - builder.setSecureSettings(ignored); - } - Settings settings = builder.build(); - - logger.info("--> settings: [{}]", settings); - DiscoveryNode localNode = mock(DiscoveryNode.class); - when(localNode.getHostAddress()).thenReturn(remoteAddress.getAddress()); - when(localNode.getHostName()).thenReturn(remoteAddress.getAddress()); - ClusterService clusterService = mock(ClusterService.class); - ClusterState state = mock(ClusterState.class); - DiscoveryNodes nodes = mock(DiscoveryNodes.class); - when(clusterService.localNode()).thenReturn(localNode); - when(clusterService.state()).thenReturn(client().admin().cluster().prepareState().get().getState()); - when(state.getNodes()).thenReturn(nodes); - when(nodes.isLocalNodeElectedMaster()).thenReturn(true); - threadPool = new TestThreadPool("index audit trail tests"); - enqueuedMessage = new SetOnce<>(); - auditor = new IndexAuditTrail(settings, client(), threadPool, clusterService) { - - @Override - void enqueue(Message message, String type) { - enqueuedMessage.set(message); - super.enqueue(message, type); - } - - @Override - List> remoteTransportClientPlugins() { - return Arrays.asList(LocalStateSecurity.class, getTestTransportPlugin()); - } - }; - auditor.start(); - } - - public void testIndexTemplateUpgrader() throws Exception { - final MetaDataUpgrader metaDataUpgrader = internalCluster().getInstance(MetaDataUpgrader.class); - final Map updatedTemplates = metaDataUpgrader.indexTemplateMetaDataUpgraders.apply(emptyMap()); - final IndexTemplateMetaData indexAuditTrailTemplate = updatedTemplates.get(IndexAuditTrail.INDEX_TEMPLATE_NAME); - assertThat(indexAuditTrailTemplate, notNullValue()); - // test custom index settings override template - assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexAuditTrailTemplate.settings()), is(numReplicas)); - assertThat(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexAuditTrailTemplate.settings()), is(numShards)); - // test upgrade template and installed template are equal - final GetIndexTemplatesRequest request = new GetIndexTemplatesRequest(IndexAuditTrail.INDEX_TEMPLATE_NAME); - final GetIndexTemplatesResponse response = client().admin().indices().getTemplates(request).get(); - assertThat(response.getIndexTemplates(), hasSize(1)); - assertThat(indexAuditTrailTemplate, is(response.getIndexTemplates().get(0))); - } - - public void testProcessorsSetting() { - final boolean explicitProcessors = randomBoolean(); - final int processors; - if (explicitProcessors) { - processors = randomIntBetween(1, 16); - } else { - processors = EsExecutors.PROCESSORS_SETTING.get(Settings.EMPTY); - } - final boolean explicitClientProcessors = randomBoolean(); - final int clientProcessors; - if (explicitClientProcessors) { - clientProcessors = randomIntBetween(1, 16); - } else { - clientProcessors = EsExecutors.PROCESSORS_SETTING.get(Settings.EMPTY); - } - - final Settings.Builder additionalSettingsBuilder = - Settings.builder() - .put("xpack.security.audit.index.client.cluster.name", "remote") - .put("xpack.security.audit.index.client.hosts", "localhost:9300"); - - if (explicitProcessors) { - additionalSettingsBuilder.put(EsExecutors.PROCESSORS_SETTING.getKey(), processors); - } - if (explicitClientProcessors) { - additionalSettingsBuilder.put("xpack.security.audit.index.client.processors", clientProcessors); - } - - final ThrowingRunnable runnable = () -> initialize(null, null, additionalSettingsBuilder.build()); - if (processors == clientProcessors || explicitClientProcessors == false) { - // okay, the client initialized which is all we care about but no nodes are available because we never set up the remote cluster - expectThrows(NoNodeAvailableException.class, runnable); - } else { - final IllegalStateException e = expectThrows(IllegalStateException.class, runnable); - assertThat( - e, - hasToString(containsString( - "explicit processor setting [" + clientProcessors + "]" + - " for audit trail remote client does not match inherited processor setting [" + processors + "]"))); - } - } - - public void testAnonymousAccessDeniedTransport() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - auditor.anonymousAccessDenied(randomAlphaOfLengthBetween(6, 12), "_action", message); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - assertAuditMessage(hit, "transport", "anonymous_access_denied"); - Map sourceMap = hit.getSourceAsMap(); - if (message instanceof RemoteHostMockMessage) { - assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); - } else { - assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); - } - - assertEquals("_action", sourceMap.get("action")); - assertEquals("transport", sourceMap.get("origin_type")); - if (message instanceof IndicesRequest) { - List indices = (List) sourceMap.get("indices"); - assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); - } - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testAnonymousAccessDeniedRest() throws Exception { - initialize(); - RestRequest request = mockRestRequest(); - auditor.anonymousAccessDenied(randomAlphaOfLengthBetween(6, 12), request); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "rest", "anonymous_access_denied"); - Map sourceMap = hit.getSourceAsMap(); - assertThat(NetworkAddress.format(InetAddress.getLoopbackAddress()), equalTo(sourceMap.get("origin_address"))); - assertThat("_uri", equalTo(sourceMap.get("uri"))); - assertThat(sourceMap.get("origin_type"), is("rest")); - assertRequestBody(sourceMap); - } - - public void testAuthenticationFailedTransport() throws Exception { - initialize(); - TransportMessage message = randomBoolean() ? new RemoteHostMockMessage() : new LocalHostMockMessage(); - auditor.authenticationFailed(randomAlphaOfLengthBetween(6, 12), new MockToken(), "_action", message); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - Map sourceMap = hit.getSourceAsMap(); - assertAuditMessage(hit, "transport", "authentication_failed"); - - if (message instanceof RemoteHostMockMessage) { - assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); - } else { - assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); - } - - assertEquals("_principal", sourceMap.get("principal")); - assertEquals("_action", sourceMap.get("action")); - assertEquals("transport", sourceMap.get("origin_type")); - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testAuthenticationFailedTransportNoToken() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - auditor.authenticationFailed(randomAlphaOfLengthBetween(6, 12), "_action", message); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "transport", "authentication_failed"); - Map sourceMap = hit.getSourceAsMap(); - if (message instanceof RemoteHostMockMessage) { - assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); - } else { - assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); - } - - assertThat(sourceMap.get("principal"), nullValue()); - assertEquals("_action", sourceMap.get("action")); - assertEquals("transport", sourceMap.get("origin_type")); - if (message instanceof IndicesRequest) { - List indices = (List) sourceMap.get("indices"); - assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); - } - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testAuthenticationFailedRest() throws Exception { - initialize(); - RestRequest request = mockRestRequest(); - auditor.authenticationFailed(randomAlphaOfLengthBetween(6, 12), new MockToken(), request); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "rest", "authentication_failed"); - Map sourceMap = hit.getSourceAsMap(); - assertThat(sourceMap.get("principal"), is((Object) "_principal")); - assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); - assertThat("_uri", equalTo(sourceMap.get("uri"))); - assertThat(sourceMap.get("origin_type"), is("rest")); - assertRequestBody(sourceMap); - } - - public void testAuthenticationFailedRestNoToken() throws Exception { - initialize(); - RestRequest request = mockRestRequest(); - auditor.authenticationFailed(randomAlphaOfLengthBetween(6, 12), request); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "rest", "authentication_failed"); - Map sourceMap = hit.getSourceAsMap(); - assertThat(sourceMap.get("principal"), nullValue()); - assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); - assertThat("_uri", equalTo(sourceMap.get("uri"))); - assertThat(sourceMap.get("origin_type"), is("rest")); - assertRequestBody(sourceMap); - } - - public void testAuthenticationFailedTransportRealm() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - auditor.authenticationFailed(randomAlphaOfLengthBetween(6, 12), "_realm", new MockToken(), "_action", message); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "transport", "realm_authentication_failed"); - Map sourceMap = hit.getSourceAsMap(); - - if (message instanceof RemoteHostMockMessage) { - assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); - } else { - assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); - } - - assertEquals("transport", sourceMap.get("origin_type")); - assertEquals("_principal", sourceMap.get("principal")); - assertEquals("_action", sourceMap.get("action")); - assertEquals("_realm", sourceMap.get("realm")); - if (message instanceof IndicesRequest) { - List indices = (List) sourceMap.get("indices"); - assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); - } - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testAuthenticationFailedRestRealm() throws Exception { - initialize(); - RestRequest request = mockRestRequest(); - auditor.authenticationFailed(randomAlphaOfLengthBetween(6, 12), "_realm", new MockToken(), request); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "rest", "realm_authentication_failed"); - Map sourceMap = hit.getSourceAsMap(); - assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); - assertThat("_uri", equalTo(sourceMap.get("uri"))); - assertEquals("_realm", sourceMap.get("realm")); - assertThat(sourceMap.get("origin_type"), is("rest")); - assertRequestBody(sourceMap); - } - - public void testAccessGranted() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - final boolean runAs = randomBoolean(); - User user; - if (runAs) { - user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); - } else { - user = new User("_username", new String[]{"r1"}); - } - String role = randomAlphaOfLengthBetween(1, 6); - auditor.accessGranted(randomAlphaOfLengthBetween(6, 12), createAuthentication(user), "_action", message, new String[] { role }); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - assertAuditMessage(hit, "transport", "access_granted"); - Map sourceMap = hit.getSourceAsMap(); - assertEquals("transport", sourceMap.get("origin_type")); - if (runAs) { - assertThat(sourceMap.get("principal"), is("running as")); - assertThat(sourceMap.get("realm"), is("lookRealm")); - assertThat(sourceMap.get("run_by_principal"), is("_username")); - assertThat(sourceMap.get("run_by_realm"), is("authRealm")); - } else { - assertThat(sourceMap.get("principal"), is("_username")); - assertThat(sourceMap.get("realm"), is("authRealm")); - } - assertEquals("_action", sourceMap.get("action")); - assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); - if (message instanceof IndicesRequest) { - List indices = (List) sourceMap.get("indices"); - assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); - } - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testSystemAccessGranted() throws Exception { - initialize(new String[] { "system_access_granted" }, null); - TransportMessage message = randomBoolean() ? new RemoteHostMockMessage() : new LocalHostMockMessage(); - String role = randomAlphaOfLengthBetween(1, 6); - auditor.accessGranted(randomAlphaOfLength(8), createAuthentication(SystemUser.INSTANCE), "internal:_action", message, - new String[] { role }); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - assertAuditMessage(hit, "transport", "access_granted"); - Map sourceMap = hit.getSourceAsMap(); - assertEquals("transport", sourceMap.get("origin_type")); - assertEquals(SystemUser.INSTANCE.principal(), sourceMap.get("principal")); - assertThat(sourceMap.get("realm"), is("authRealm")); - assertEquals("internal:_action", sourceMap.get("action")); - assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testAccessDenied() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - final boolean runAs = randomBoolean(); - User user; - if (runAs) { - user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); - } else { - user = new User("_username", new String[]{"r1"}); - } - String role = randomAlphaOfLengthBetween(1, 6); - auditor.accessDenied(randomAlphaOfLengthBetween(6, 12), createAuthentication(user), "_action", message, new String[] { role }); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - Map sourceMap = hit.getSourceAsMap(); - assertAuditMessage(hit, "transport", "access_denied"); - assertEquals("transport", sourceMap.get("origin_type")); - if (runAs) { - assertThat(sourceMap.get("principal"), is("running as")); - assertThat(sourceMap.get("realm"), is("lookRealm")); - assertThat(sourceMap.get("run_by_principal"), is("_username")); - assertThat(sourceMap.get("run_by_realm"), is("authRealm")); - } else { - assertThat(sourceMap.get("principal"), is("_username")); - assertThat(sourceMap.get("realm"), is("authRealm")); - } - assertEquals("_action", sourceMap.get("action")); - if (message instanceof IndicesRequest) { - List indices = (List) sourceMap.get("indices"); - assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); - } - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); - } - - public void testTamperedRequestRest() throws Exception { - initialize(); - RestRequest request = mockRestRequest(); - auditor.tamperedRequest(randomAlphaOfLengthBetween(6, 12), request); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - assertAuditMessage(hit, "rest", "tampered_request"); - Map sourceMap = hit.getSourceAsMap(); - assertThat(sourceMap.get("principal"), nullValue()); - assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); - assertThat("_uri", equalTo(sourceMap.get("uri"))); - assertThat(sourceMap.get("origin_type"), is("rest")); - assertRequestBody(sourceMap); - } - - public void testTamperedRequest() throws Exception { - initialize(); - TransportRequest message = new RemoteHostMockTransportRequest(); - auditor.tamperedRequest(randomAlphaOfLengthBetween(6, 12), "_action", message); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - Map sourceMap = hit.getSourceAsMap(); - assertAuditMessage(hit, "transport", "tampered_request"); - assertEquals("transport", sourceMap.get("origin_type")); - assertThat(sourceMap.get("principal"), is(nullValue())); - assertEquals("_action", sourceMap.get("action")); - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testTamperedRequestWithUser() throws Exception { - initialize(); - TransportRequest message = new RemoteHostMockTransportRequest(); - final boolean runAs = randomBoolean(); - User user; - if (runAs) { - user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); - } else { - user = new User("_username", new String[]{"r1"}); - } - auditor.tamperedRequest(randomAlphaOfLengthBetween(6, 12), user, "_action", message); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "transport", "tampered_request"); - Map sourceMap = hit.getSourceAsMap(); - assertEquals("transport", sourceMap.get("origin_type")); - if (runAs) { - assertThat(sourceMap.get("principal"), is("running as")); - assertThat(sourceMap.get("run_by_principal"), is("_username")); - } else { - assertEquals("_username", sourceMap.get("principal")); - } - assertEquals("_action", sourceMap.get("action")); - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testConnectionGranted() throws Exception { - initialize(); - InetAddress inetAddress = InetAddress.getLoopbackAddress(); - SecurityIpFilterRule rule = IPFilter.DEFAULT_PROFILE_ACCEPT_ALL; - auditor.connectionGranted(inetAddress, "default", rule); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "ip_filter", "connection_granted"); - Map sourceMap = hit.getSourceAsMap(); - assertEquals("allow default:accept_all", sourceMap.get("rule")); - assertEquals("default", sourceMap.get("transport_profile")); - } - - public void testConnectionDenied() throws Exception { - initialize(); - InetAddress inetAddress = InetAddress.getLoopbackAddress(); - SecurityIpFilterRule rule = new SecurityIpFilterRule(false, "_all"); - auditor.connectionDenied(inetAddress, "default", rule); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "ip_filter", "connection_denied"); - Map sourceMap = hit.getSourceAsMap(); - assertEquals("deny _all", sourceMap.get("rule")); - assertEquals("default", sourceMap.get("transport_profile")); - } - - public void testRunAsGranted() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - User user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); - String role = randomAlphaOfLengthBetween(1, 6); - auditor.runAsGranted(randomAlphaOfLengthBetween(6, 12), createAuthentication(user), "_action", message, new String[] { role }); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - assertAuditMessage(hit, "transport", "run_as_granted"); - Map sourceMap = hit.getSourceAsMap(); - assertEquals("transport", sourceMap.get("origin_type")); - assertThat(sourceMap.get("principal"), is("_username")); - assertThat(sourceMap.get("realm"), is("authRealm")); - assertThat(sourceMap.get("run_as_principal"), is("running as")); - assertThat(sourceMap.get("run_as_realm"), is("lookRealm")); - assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); - assertEquals("_action", sourceMap.get("action")); - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testRunAsDenied() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - User user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); - auditor.runAsDenied(randomAlphaOfLengthBetween(6, 12), createAuthentication(user), "_action", message, new String[] { "r1" }); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - assertAuditMessage(hit, "transport", "run_as_denied"); - Map sourceMap = hit.getSourceAsMap(); - assertEquals("transport", sourceMap.get("origin_type")); - assertThat(sourceMap.get("principal"), is("_username")); - assertThat(sourceMap.get("realm"), is("authRealm")); - assertThat(sourceMap.get("run_as_principal"), is("running as")); - assertThat(sourceMap.get("run_as_realm"), is("lookRealm")); - assertEquals("_action", sourceMap.get("action")); - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - public void testAuthenticationSuccessRest() throws Exception { - initialize(); - RestRequest request = mockRestRequest(); - final boolean runAs = randomBoolean(); - User user; - if (runAs) { - user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); - } else { - user = new User("_username", new String[] { "r1" }); - } - String realm = "_realm"; - auditor.authenticationSuccess(randomAlphaOfLengthBetween(6, 12), realm, user, request); - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - - assertAuditMessage(hit, "rest", "authentication_success"); - Map sourceMap = hit.getSourceAsMap(); - assertThat("_uri", equalTo(sourceMap.get("uri"))); - assertRequestBody(sourceMap); - if (runAs) { - assertThat(sourceMap.get("principal"), is("running as")); - assertThat(sourceMap.get("run_by_principal"), is("_username")); - } else { - assertEquals("_username", sourceMap.get("principal")); - } - assertEquals("_realm", sourceMap.get("realm")); - } - - public void testAuthenticationSuccessTransport() throws Exception { - initialize(); - TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); - final boolean runAs = randomBoolean(); - User user; - if (runAs) { - user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); - } else { - user = new User("_username", new String[] { "r1" }); - } - String realm = "_realm"; - auditor.authenticationSuccess(randomAlphaOfLengthBetween(6, 12), realm, user, "_action", message); - - SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); - Map sourceMap = hit.getSourceAsMap(); - assertAuditMessage(hit, "transport", "authentication_success"); - assertEquals("transport", sourceMap.get("origin_type")); - if (runAs) { - assertThat(sourceMap.get("principal"), is("running as")); - assertThat(sourceMap.get("run_by_principal"), is("_username")); - } else { - assertEquals("_username", sourceMap.get("principal")); - } - assertEquals("_action", sourceMap.get("action")); - assertEquals("_realm", sourceMap.get("realm")); - assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); - } - - private void assertAuditMessage(SearchHit hit, String layer, String type) { - Map sourceMap = hit.getSourceAsMap(); - assertThat(sourceMap.get("@timestamp"), notNullValue()); - DateTime dateTime = ISODateTimeFormat.dateTimeParser().withZoneUTC().parseDateTime((String) sourceMap.get("@timestamp")); - final DateTime now = DateTime.now(DateTimeZone.UTC); - assertThat(dateTime + " should be on/before " + now, dateTime.isAfter(now), equalTo(false)); - - assertThat(remoteAddress.getAddress(), equalTo(sourceMap.get("node_host_name"))); - assertThat(remoteAddress.getAddress(), equalTo(sourceMap.get("node_host_address"))); - - assertEquals(layer, sourceMap.get("layer")); - assertEquals(type, sourceMap.get("event_type")); - } - - private void assertRequestBody(Map sourceMap) { - if (includeRequestBody) { - assertThat(sourceMap.get("request_body"), notNullValue()); - } else { - assertThat(sourceMap.get("request_body"), nullValue()); - } - } - private class LocalHostMockMessage extends TransportMessage { - LocalHostMockMessage() { - remoteAddress(localAddress); - } - } - - private class RemoteHostMockMessage extends TransportMessage { - RemoteHostMockMessage() throws Exception { - remoteAddress(remoteAddress); - } - } - - private class RemoteHostMockTransportRequest extends TransportRequest { - RemoteHostMockTransportRequest() throws Exception { - remoteAddress(remoteAddress); - } - } - - private class MockIndicesTransportMessage extends RemoteHostMockMessage implements IndicesRequest { - MockIndicesTransportMessage() throws Exception { - super(); - } - - @Override - public String[] indices() { - return new String[] { "foo", "bar", "baz" }; - } - - @Override - public IndicesOptions indicesOptions() { - return null; - } - } - - private static class MockToken implements AuthenticationToken { - @Override - public String principal() { - return "_principal"; - } - - @Override - public Object credentials() { - fail("it's not allowed to print the credentials of the auth token"); - return null; - } - - @Override - public void clearCredentials() { - } - } - - private RestRequest mockRestRequest() { - RestRequest request = mock(RestRequest.class); - HttpChannel httpChannel = mock(HttpChannel.class); - when(request.getHttpChannel()).thenReturn(httpChannel); - when(httpChannel.getRemoteAddress()).thenReturn(new InetSocketAddress(InetAddress.getLoopbackAddress(), 9200)); - when(request.uri()).thenReturn("_uri"); - return request; - } - - private SearchHit getIndexedAuditMessage(Message message) throws InterruptedException { - assertNotNull("no audit message was enqueued", message); - final String indexName = IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, message.timestamp, rollover); - ensureYellowAndNoInitializingShards(indexName); - GetSettingsResponse settingsResponse = getClient().admin().indices().prepareGetSettings(indexName).get(); - assertThat(settingsResponse.getSetting(indexName, "index.number_of_shards"), is(Integer.toString(numShards))); - assertThat(settingsResponse.getSetting(indexName, "index.number_of_replicas"), is(Integer.toString(numReplicas))); - - final SetOnce searchResponseSetOnce = new SetOnce<>(); - final boolean found = awaitBusy(() -> { - try { - SearchResponse searchResponse = getClient() - .prepareSearch(indexName) - .setTypes(IndexAuditTrail.DOC_TYPE) - .get(); - if (searchResponse.getHits().getTotalHits().value > 0L) { - searchResponseSetOnce.set(searchResponse); - return true; - } - } catch (Exception e) { - logger.debug("caught exception while executing search", e); - } - return false; - }); - assertThat("no audit document exists!", found, is(true)); - SearchResponse response = searchResponseSetOnce.get(); - assertNotNull(response); - - assertEquals(1, response.getHits().getTotalHits().value); - return response.getHits().getHits()[0]; - } - - @Override - public ClusterHealthStatus ensureYellowAndNoInitializingShards(String... indices) { - if (remoteIndexing == false) { - return super.ensureYellowAndNoInitializingShards(indices); - } - - // pretty ugly but just a rip of ensureYellowAndNoInitializingShards that uses a different client - ClusterHealthResponse actionGet = getClient().admin().cluster().health(Requests.clusterHealthRequest(indices) - .waitForNoRelocatingShards(true) - .waitForYellowStatus() - .waitForEvents(Priority.LANGUID) - .waitForNoInitializingShards(true)) - .actionGet(); - if (actionGet.isTimedOut()) { - logger.info("ensureYellow timed out, cluster state:\n{}\n{}", - getClient().admin().cluster().prepareState().get().getState(), - getClient().admin().cluster().preparePendingClusterTasks().get()); - assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false)); - } - - logger.debug("indices {} are yellow", indices.length == 0 ? "[_all]" : indices); - return actionGet.getStatus(); - } - - private static Authentication createAuthentication(User user) { - final RealmRef lookedUpBy = user.authenticatedUser() == user ? null : new RealmRef("lookRealm", "up", "by"); - return new Authentication(user, new RealmRef("authRealm", "test", "foo"), lookedUpBy); - } -} - diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java deleted file mode 100644 index bc893538642d0..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.audit.index; - -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.xpack.security.audit.AuditTrail; -import org.elasticsearch.xpack.security.audit.AuditTrailService; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import org.junit.After; -import org.junit.Before; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.stream.StreamSupport; - -import static org.elasticsearch.test.InternalTestCluster.clusterName; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; - -/** - * This test checks to ensure that the IndexAuditTrail starts properly when indexing to a remote cluster. The cluster - * started by the integration tests is indexed into by the remote cluster started before the test. - * - * The cluster started by the integrations tests may also index into itself... - */ -@ClusterScope(scope = Scope.TEST, numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) -@TestLogging("org.elasticsearch.xpack.security.audit.index:TRACE") -public class RemoteIndexAuditTrailStartingTests extends SecurityIntegTestCase { - - public static final String SECOND_CLUSTER_NODE_PREFIX = "remote_" + TEST_CLUSTER_NODE_PREFIX; - - private InternalTestCluster remoteCluster; - - private final boolean sslEnabled = randomBoolean(); - private final boolean localAudit = randomBoolean(); - private final String outputs = randomFrom("index", "logfile", "index,logfile"); - - @Override - public boolean transportSSLEnabled() { - return sslEnabled; - } - - @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("xpack.security.audit.enabled", localAudit) - .put("xpack.security.audit.outputs", outputs) - .build(); - } - - @Override - protected Set excludeTemplates() { - return Sets.newHashSet(SecurityIndexManager.SECURITY_TEMPLATE_NAME, IndexAuditTrail.INDEX_TEMPLATE_NAME); - } - - @Override - protected int numberOfShards() { - return 1; // limit ourselves to a single shard in order to avoid timeout issues with large numbers of shards in tests - } - - @Before - public void startRemoteCluster() throws IOException, InterruptedException { - final List addresses = new ArrayList<>(); - // get addresses for current cluster - NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet(); - final String clusterName = response.getClusterName().value(); - for (NodeInfo nodeInfo : response.getNodes()) { - TransportAddress address = nodeInfo.getTransport().address().publishAddress(); - addresses.add(address.address().getHostString() + ":" + address.address().getPort()); - } - - // create another cluster - String cluster2Name = clusterName(Scope.TEST.name(), randomLong()); - - // Setup a second test cluster with a single node, security enabled, and SSL - final int numNodes = 1; - SecuritySettingsSource cluster2SettingsSource = - new SecuritySettingsSource(sslEnabled, createTempDir(), Scope.TEST) { - @Override - public Settings nodeSettings(int nodeOrdinal) { - Settings.Builder builder = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - // Disable native ML autodetect_process as the c++ controller won't be available -// .put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false) - .put("xpack.security.audit.enabled", true) - .put("xpack.security.audit.outputs", randomFrom("index", "index,logfile")) - .putList("xpack.security.audit.index.client.hosts", addresses.toArray(new String[addresses.size()])) - .put("xpack.security.audit.index.client.cluster.name", clusterName) - .put("xpack.security.audit.index.client.xpack.security.user", - TEST_USER_NAME + ":" + SecuritySettingsSourceField.TEST_PASSWORD) - .put("xpack.security.audit.index.settings.index.number_of_shards", 1) - .put("xpack.security.audit.index.settings.index.number_of_replicas", 0); - - addClientSSLSettings(builder, "xpack.security.audit.index.client.xpack.security.transport."); - builder.put("xpack.security.audit.index.client.xpack.security.transport.ssl.enabled", sslEnabled); - return builder.build(); - } - }; - remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, - cluster2Name, cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper()); - remoteCluster.beforeTest(random(), 0.0); - assertNoTimeout(remoteCluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get()); - } - - @After - public void stopRemoteCluster() throws Exception { - List toStop = new ArrayList<>(); - // stop the index audit trail so that the shards aren't locked causing the test to fail - toStop.add(() -> StreamSupport.stream(internalCluster().getInstances(AuditTrailService.class).spliterator(), false) - .map(s -> s.getAuditTrails()).flatMap(List::stream) - .filter(t -> t.name().equals(IndexAuditTrail.NAME)) - .forEach((auditTrail) -> ((IndexAuditTrail) auditTrail).stop())); - // first stop both audit trails otherwise we keep on indexing - if (remoteCluster != null) { - toStop.add(() -> StreamSupport.stream(remoteCluster.getInstances(AuditTrailService.class).spliterator(), false) - .map(s -> s.getAuditTrails()).flatMap(List::stream) - .filter(t -> t.name().equals(IndexAuditTrail.NAME)) - .forEach((auditTrail) -> ((IndexAuditTrail) auditTrail).stop())); - toStop.add(() -> remoteCluster.wipe(excludeTemplates())); - toStop.add(remoteCluster::afterTest); - toStop.add(remoteCluster); - } - - - IOUtils.close(toStop); - } - - public void testThatRemoteAuditInstancesAreStarted() throws Exception { - logger.info("Test configuration: ssl=[{}] localAudit=[{}][{}]", sslEnabled, localAudit, outputs); - // we ensure that all instances present are started otherwise we will have issues - // and race with the shutdown logic - for (InternalTestCluster cluster : Arrays.asList(remoteCluster, internalCluster())) { - for (AuditTrailService auditTrailService : cluster.getInstances(AuditTrailService.class)) { - Optional auditTrail = auditTrailService.getAuditTrails().stream() - .filter(t -> t.name().equals(IndexAuditTrail.NAME)).findAny(); - if (cluster == remoteCluster || (localAudit && outputs.contains("index"))) { - // remote cluster must be present and only if we do local audit and output to an index we are good on the local one - // as well. - assertTrue(auditTrail.isPresent()); - } - if (auditTrail.isPresent()) { - IndexAuditTrail indexAuditTrail = (IndexAuditTrail) auditTrail.get(); - assertBusy(() -> assertSame("trail not started remoteCluster: " + (remoteCluster == cluster), - indexAuditTrail.state(), IndexAuditTrail.State.STARTED)); - } - } - } - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java index e05f4620ccca2..23408f5668ec9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java @@ -62,7 +62,6 @@ protected Settings nodeSettings(int nodeOrdinal) { // enable auditing settingsBuilder.put("xpack.security.audit.enabled", "true"); - settingsBuilder.put("xpack.security.audit.outputs", "logfile"); // add only startup filter policies settingsBuilder.put(startupFilterSettings); return settingsBuilder.build(); diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index d239518df81fe..69389b47accec 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -29,7 +29,6 @@ subprojects { integTestCluster { // Setup auditing so we can use it in some tests setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'logfile' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' // Setup roles used by tests @@ -49,7 +48,6 @@ subprojects { runqa { // Setup auditing so we can use it in some tests setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'logfile' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' // Setup roles used by tests diff --git a/x-pack/qa/audit-tests/build.gradle b/x-pack/qa/audit-tests/build.gradle deleted file mode 100644 index 6afe5f01ae1e3..0000000000000 --- a/x-pack/qa/audit-tests/build.gradle +++ /dev/null @@ -1,40 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" -task copyXPackPluginProps(type: Copy) { // wth is this? - from project(xpackModule('core')).file('src/main/plugin-metadata') - from project(xpackModule('core')).tasks.pluginProperties - from project(xpackModule('security')).file('src/main/plugin-metadata') - from project(xpackModule('security')).tasks.pluginProperties - into outputDir -} -project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - -integTestCluster { - distribution 'default' - setting 'xpack.ilm.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' - setting 'xpack.license.self_generated.type', 'trial' - setting 'logger.level', 'DEBUG' - setupCommand 'setupDummyUser', - 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_user', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } -} diff --git a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java deleted file mode 100644 index f66e089b2d382..0000000000000 --- a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.audit; - -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.TestCluster; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.Arrays; -import java.util.Collection; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; - -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; - -public class IndexAuditIT extends ESIntegTestCase { - private static final String USER = "test_user"; - private static final String PASS = "x-pack-test-password"; - - @Override - protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { - TestCluster testCluster = super.buildTestCluster(scope, seed); - return new TestCluster(seed) { - - @Override - public void afterTest() throws IOException { - testCluster.afterTest(); - } - - @Override - public Client client() { - return testCluster.client(); - } - - @Override - public int size() { - return testCluster.size(); - } - - @Override - public int numDataNodes() { - return testCluster.numDataNodes(); - } - - @Override - public int numDataAndMasterNodes() { - return testCluster.numDataAndMasterNodes(); - } - - @Override - public InetSocketAddress[] httpAddresses() { - return testCluster.httpAddresses(); - } - - @Override - public void close() throws IOException { - testCluster.close(); - } - - @Override - public void ensureEstimatedStats() { - // stats are not going to be accurate for these tests since the index audit trail - // is running and changing the values so we wrap the test cluster to skip these - // checks - } - - @Override - public String getClusterName() { - return testCluster.getClusterName(); - } - - @Override - public Iterable getClients() { - return testCluster.getClients(); - } - - @Override - public NamedWriteableRegistry getNamedWriteableRegistry() { - return testCluster.getNamedWriteableRegistry(); - } - }; - } - - public void testIndexAuditTrailWorking() throws Exception { - Request request = new Request("GET", "/"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))); - request.setOptions(options); - Response response = getRestClient().performRequest(request); - final AtomicReference lastClusterState = new AtomicReference<>(); - final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("principal", USER)); - - assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found); - - SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery( - QueryBuilders.matchQuery("principal", USER)).get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("principal"), is(USER)); - } - - public void testAuditTrailTemplateIsRecreatedAfterDelete() throws Exception { - // this is already "tested" by the test framework since we wipe the templates before and after, - // but lets be explicit about the behavior - awaitIndexTemplateCreation(); - - // delete the template - AcknowledgedResponse deleteResponse = client().admin().indices() - .prepareDeleteTemplate(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet(); - assertThat(deleteResponse.isAcknowledged(), is(true)); - awaitIndexTemplateCreation(); - } - - public void testOpaqueIdWorking() throws Exception { - Request request = new Request("GET", "/"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader(Task.X_OPAQUE_ID, "foo"); - options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))); - request.setOptions(options); - Response response = getRestClient().performRequest(request); - assertThat(response.getStatusLine().getStatusCode(), is(200)); - final AtomicReference lastClusterState = new AtomicReference<>(); - final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("opaque_id", "foo")); - - assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found); - - SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery( - QueryBuilders.matchQuery("opaque_id", "foo")).get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); - - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("opaque_id"), is("foo")); - } - - private boolean awaitSecurityAuditIndex(AtomicReference lastClusterState, - QueryBuilder query) throws InterruptedException { - final AtomicBoolean indexExists = new AtomicBoolean(false); - return awaitBusy(() -> { - if (indexExists.get() == false) { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - lastClusterState.set(state); - for (ObjectCursor cursor : state.getMetaData().getIndices().keys()) { - if (cursor.value.startsWith(".security_audit_log")) { - logger.info("found audit index [{}]", cursor.value); - indexExists.set(true); - break; - } - } - - if (indexExists.get() == false) { - return false; - } - } - - ensureYellowAndNoInitializingShards(".security_audit_log*"); - logger.info("security audit log index is yellow"); - ClusterState state = client().admin().cluster().prepareState().get().getState(); - lastClusterState.set(state); - - logger.info("refreshing audit indices"); - client().admin().indices().prepareRefresh(".security_audit_log*").get(); - logger.info("refreshed audit indices"); - return client().prepareSearch(".security_audit_log*").setQuery(query) - .get().getHits().getTotalHits().value > 0; - }, 60L, TimeUnit.SECONDS); - } - - private void awaitIndexTemplateCreation() throws InterruptedException { - boolean found = awaitBusy(() -> { - GetIndexTemplatesResponse response = client().admin().indices() - .prepareGetTemplates(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet(); - if (response.getIndexTemplates().size() > 0) { - for (IndexTemplateMetaData indexTemplateMetaData : response.getIndexTemplates()) { - if (IndexAuditTrail.INDEX_TEMPLATE_NAME.equals(indexTemplateMetaData.name())) { - return true; - } - } - } - return false; - }); - - assertThat("index template [" + IndexAuditTrail.INDEX_TEMPLATE_NAME + "] was not created", found, is(true)); - } - - @Override - protected Settings externalClusterClientSettings() { - return Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), USER + ":" + PASS) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") - .build(); - } - - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class); - } - -} diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index c6c4634e58b61..04c4367a11305 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -156,7 +156,6 @@ subprojects { setting 'xpack.security.transport.ssl.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' setting 'xpack.security.transport.ssl.keystore.password', 'testnode' dependsOn copyTestNodeKeystore @@ -235,7 +234,6 @@ subprojects { setting 'node.attr.upgraded', 'true' setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' setting 'node.name', "upgraded-node-${stopNode}" dependsOn copyTestNodeKeystore extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java deleted file mode 100644 index cc704d71d0e54..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.upgrades; - -import org.elasticsearch.Version; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.Booleans; -import org.hamcrest.Matchers; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.hasSize; - -public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { - - public void testAuditLogs() throws Exception { - assertBusy(() -> { - assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(expectedNumUniqueNodeNameBuckets()); - }); - } - - private int expectedNumUniqueNodeNameBuckets() throws IOException { - switch (CLUSTER_TYPE) { - case OLD: - // There are three nodes in the initial test cluster - return 3; - case MIXED: - if (false == masterIsNewVersion()) { - return 3; - } - if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { - // One of the old nodes has been removed and we've added a new node - return 4; - } - // Two of the old nodes have been removed and we've added two new nodes - return 5; - case UPGRADED: - return 6; - default: - throw new IllegalArgumentException("Unsupported cluster type [" + CLUSTER_TYPE + "]"); - } - } - - private void assertAuditDocsExist() throws Exception { - Response response = client().performRequest(new Request("GET", "/.security_audit_log*/_count")); - assertEquals(200, response.getStatusLine().getStatusCode()); - Map responseMap = entityAsMap(response); - assertNotNull(responseMap.get("count")); - assertThat((Integer) responseMap.get("count"), Matchers.greaterThanOrEqualTo(1)); - } - - private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { - // call API that will hit all nodes - Map nodesResponse = entityAsMap(client().performRequest(new Request("GET", "/_nodes/_all/info/version"))); - logger.info("all nodes {}", nodesResponse); - - Request aggRequest = new Request("GET", "/.security_audit_log*/_search"); - aggRequest.setJsonEntity( - "{\n" + - " \"aggs\" : {\n" + - " \"nodes\" : {\n" + - " \"terms\" : { \"field\" : \"node_name\" }\n" + - " }\n" + - " }\n" + - "}"); - aggRequest.addParameter("pretty", "true"); - Response aggResponse = client().performRequest(aggRequest); - Map aggResponseMap = entityAsMap(aggResponse); - logger.debug("aggResponse {}", aggResponseMap); - Map aggregations = (Map) aggResponseMap.get("aggregations"); - assertNotNull(aggregations); - Map nodesAgg = (Map) aggregations.get("nodes"); - assertNotNull(nodesAgg); - List buckets = (List) nodesAgg.get("buckets"); - assertNotNull(buckets); - assertThat("Found node buckets " + buckets, buckets, hasSize(numBuckets)); - } - - /** - * Has the master been upgraded to the new version? - */ - private boolean masterIsNewVersion() throws IOException { - Map map = entityAsMap(client().performRequest(new Request("GET", "/_nodes/_master"))); - map = (Map) map.get("nodes"); - assertThat(map.values(), hasSize(1)); - map = (Map) map.values().iterator().next(); - Version masterVersion = Version.fromString(map.get("version").toString()); - return Version.CURRENT.equals(masterVersion); - } -} From 64adb5ad5bdeb68490f6664c6907bfe30cf02d78 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 24 Jan 2019 11:39:46 +0100 Subject: [PATCH 10/20] Set acking timeout to 0 on dynamic mapping update (#31140) As acking can fail for any reason (unrelated node being too slow, node disconnecting), it should not be required for acking to succeed in order for index requests with dynamic mapping updates to successfully complete. Relates to #30672 and Closes #30844 --- .../action/index/MappingUpdatedAction.java | 7 ++--- .../master/IndexingMasterFailoverIT.java | 2 +- .../cluster/routing/PrimaryAllocationIT.java | 31 +++++++++++++++++++ .../indices/state/RareClusterStateIT.java | 22 +++++++++++++ 4 files changed, 56 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 770c6bca26b2f..c34a4196bb524 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.action.index; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; @@ -67,7 +66,7 @@ private PutMappingRequestBuilder updateMappingRequest(Index index, String type, throw new IllegalArgumentException("_default_ mapping should not be updated"); } return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString(), XContentType.JSON) - .setMasterNodeTimeout(timeout).setTimeout(timeout); + .setMasterNodeTimeout(timeout).setTimeout(TimeValue.ZERO); } /** @@ -84,8 +83,6 @@ public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdat * been applied to the master node and propagated to data nodes. */ public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) { - if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { - throw new ElasticsearchTimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); - } + updateMappingRequest(index, type, mappingUpdate, timeout).get(); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 2865201f0f9d0..461c92d69f444 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -66,13 +66,13 @@ protected Settings nodeSettings(int nodeOrdinal) { * This retry logic is implemented in TransportMasterNodeAction and tested by the following master failover scenario. */ @TestLogging("_root:DEBUG") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30844") public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable { logger.info("--> start 4 nodes, 3 master, 1 data"); final Settings sharedSettings = Settings.builder() .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly + .put(TestZenDiscovery.USE_ZEN2.getKey(), false) .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index b106944e97065..a64f509363854 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -20,8 +20,10 @@ */ import com.carrotsearch.hppc.cursors.IntObjectCursor; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterState; @@ -30,6 +32,7 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -91,6 +94,34 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); } + public void testBulkWeirdScenario() throws Exception { + String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNodes(2); + + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + ensureGreen(); + + BulkResponse bulkResponse = client().prepareBulk() + .add(client().prepareIndex().setIndex("test").setType("_doc").setId("1").setSource("field1", "value1")) + .add(client().prepareUpdate().setIndex("test").setType("_doc").setId("1").setDoc("field2", "value2")) + .execute().actionGet(); + + assertThat(bulkResponse.hasFailures(), equalTo(false)); + assertThat(bulkResponse.getItems().length, equalTo(2)); + + logger.info(Strings.toString(bulkResponse, true, true)); + + internalCluster().assertSeqNos(); + + assertThat(bulkResponse.getItems()[0].getResponse().getId(), equalTo("1")); + assertThat(bulkResponse.getItems()[0].getResponse().getVersion(), equalTo(1L)); + assertThat(bulkResponse.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat(bulkResponse.getItems()[1].getResponse().getId(), equalTo("1")); + assertThat(bulkResponse.getItems()[1].getResponse().getVersion(), equalTo(2L)); + assertThat(bulkResponse.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.UPDATED)); + } + private void createStaleReplicaScenario(String master) throws Exception { client().prepareIndex("test", "type1").setSource(jsonBuilder() .startObject().field("field", "value1").endObject()).get(); diff --git a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 4302549e2f1fd..a34312b847e3b 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -57,6 +58,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; @@ -397,6 +399,24 @@ public void onFailure(Exception e) { assertBusy(() -> assertTrue(client().prepareGet("index", "type", "1").get().isExists())); + // index another document, this time using dynamic mappings. + // The ack timeout of 0 on dynamic mapping updates makes it possible for the document to be indexed on the primary, even + // if the dynamic mapping update is not applied on the replica yet. + ActionFuture dynamicMappingsFut = client().prepareIndex("index", "type", "2").setSource("field2", 42).execute(); + + // ...and wait for second mapping to be available on master + assertBusy(() -> { + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); + final IndexService indexService = indicesService.indexServiceSafe(index); + assertNotNull(indexService); + final MapperService mapperService = indexService.mapperService(); + DocumentMapper mapper = mapperService.documentMapper("type"); + assertNotNull(mapper); + assertNotNull(mapper.mappers().getMapper("field2")); + }); + + assertBusy(() -> assertTrue(client().prepareGet("index", "type", "2").get().isExists())); + // The mappings have not been propagated to the replica yet as a consequence the document count not be indexed // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled // and not just because it takes time to replicate the indexing request to the replica @@ -415,6 +435,8 @@ public void onFailure(Exception e) { assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded }); + + assertThat(dynamicMappingsFut.get().getResult(), equalTo(CREATED)); } } From bcf5a4ca479cedd7bf146dd4c8eedaddbba4de7d Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 24 Jan 2019 10:58:02 +0000 Subject: [PATCH 11/20] Mute ClusterDisruptionIT testAckedIndexing Due to https://github.com/elastic/elasticsearch/issues/37810 --- .../java/org/elasticsearch/discovery/ClusterDisruptionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index a8ce5830106d8..f1e78fd3c6ae6 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -84,6 +84,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37810") public void testAckedIndexing() throws Exception { final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5; From b6317ed70bd7772b17e6c8b038b06d8575ba9a2a Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 24 Jan 2019 11:53:49 +0100 Subject: [PATCH 12/20] disabling bwc test while backporting https://github.com/elastic/elasticsearch/pull/37639 --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index c5611e8b453fb..d4e2616c20263 100644 --- a/build.gradle +++ b/build.gradle @@ -159,8 +159,8 @@ task verifyVersions { * the enabled state of every bwc task. It should be set back to true * after the backport of the backcompat code is complete. */ -final boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ +final boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "backporting https://github.com/elastic/elasticsearch/pull/37639" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From f707fa9e0af4264f2aebb24b43543d15c7a8875f Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Thu, 24 Jan 2019 13:41:58 +0200 Subject: [PATCH 13/20] SQL: Introduce SQL DATE data type (#37693) * SQL: Introduce SQL DATE data type Support ANSI SQL's DATE type by introducing a runtime-only ES SQL date type. Closes: #37340 --- .../reference/sql/functions/grouping.asciidoc | 6 + .../sql/language/data-types.asciidoc | 13 +- .../elasticsearch/xpack/sql/jdbc/EsType.java | 1 + .../xpack/sql/jdbc/JdbcDateUtils.java | 7 +- .../xpack/sql/jdbc/JdbcResultSet.java | 10 +- .../xpack/sql/jdbc/TypeConverter.java | 2 + .../xpack/sql/qa/jdbc/CsvSpecTestCase.java | 1 + .../xpack/sql/qa/jdbc/JdbcAssert.java | 4 + .../sql/qa/src/main/resources/date.csv-spec | 77 ++++++ .../qa/src/main/resources/datetime.sql-spec | 9 +- .../extractor/CompositeKeyExtractor.java | 4 +- .../search/extractor/FieldHitExtractor.java | 4 +- .../xpack/sql/expression/Expressions.java | 23 +- .../expression/function/aggregate/Max.java | 2 +- .../function/grouping/Histogram.java | 2 +- .../scalar/datetime/BaseDateTimeFunction.java | 2 +- .../whitelist/InternalSqlScriptUtils.java | 4 +- .../expression/gen/script/ScriptWeaver.java | 4 +- .../DateTimeArithmeticOperation.java | 7 +- .../predicate/operator/arithmetic/Sub.java | 12 + .../xpack/sql/parser/ExpressionBuilder.java | 6 +- .../xpack/sql/planner/QueryFolder.java | 7 +- .../xpack/sql/planner/QueryTranslator.java | 12 +- .../querydsl/agg/GroupByDateHistogram.java | 5 + .../xpack/sql/querydsl/agg/GroupByKey.java | 2 + .../xpack/sql/type/DataType.java | 9 +- .../xpack/sql/type/DataTypeConversion.java | 173 +++++++++--- .../xpack/sql/util/DateUtils.java | 68 ++++- .../analyzer/VerifierErrorMessagesTests.java | 8 +- .../extractor/CompositeKeyExtractorTests.java | 4 +- .../extractor/FieldHitExtractorTests.java | 2 +- .../function/scalar/CastProcessorTests.java | 2 +- .../scalar/datetime/DateTimeTestUtils.java | 6 +- .../sql/parser/EscapedFunctionsTests.java | 2 +- .../logical/command/sys/SysParserTests.java | 2 +- .../logical/command/sys/SysTypesTests.java | 2 +- .../sql/planner/QueryTranslatorTests.java | 50 +++- .../sql/type/DataTypeConversionTests.java | 254 ++++++++++++++---- 38 files changed, 657 insertions(+), 151 deletions(-) create mode 100644 x-pack/plugin/sql/qa/src/main/resources/date.csv-spec diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc index 0eee0426ce65a..261066799f893 100644 --- a/docs/reference/sql/functions/grouping.asciidoc +++ b/docs/reference/sql/functions/grouping.asciidoc @@ -76,3 +76,9 @@ Instead one can rewrite the query to move the expression on the histogram _insid ---- include-tagged::{sql-specs}/docs.csv-spec[histogramDateTimeExpression] ---- + +[IMPORTANT] +When the histogram in SQL is applied on **DATE** type instead of **DATETIME**, the interval specified is truncated to +the multiple of a day. E.g.: for `HISTOGRAM(CAST(birth_date AS DATE), INTERVAL '2 3:04' DAY TO MINUTE)` the interval +actually used will be `INTERVAL '2' DAY`. If the interval specified is less than 1 day, e.g.: +`HISTOGRAM(CAST(birth_date AS DATE), INTERVAL '20' HOUR)` then the interval used will be `INTERVAL '1' DAY`. diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index 60bdf0c5f66d3..b42620e0c5415 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -5,9 +5,6 @@ beta[] -Most of {es} <> are available in {es-sql}, as indicated below. -As one can see, all of {es} <> are mapped to the data type with the same -name in {es-sql}, with the exception of **date** data type which is mapped to **datetime** in {es-sql}: [cols="^,^m,^,^"] @@ -46,6 +43,13 @@ s|SQL precision |=== +[NOTE] +Most of {es} <> are available in {es-sql}, as indicated above. +As one can see, all of {es} <> are mapped to the data type with the same +name in {es-sql}, with the exception of **date** data type which is mapped to **datetime** in {es-sql}. +This is to avoid confusion with the ANSI SQL **DATE** (date only) type, which is also supported by {es-sql} +in queries (with the use of <>/<>), +but doesn't correspond to an actual mapping in {es} (see the <> below). Obviously, not all types in {es} have an equivalent in SQL and vice-versa hence why, {es-sql} uses the data type _particularities_ of the former over the latter as ultimately {es} is the backing store. @@ -53,6 +57,8 @@ uses the data type _particularities_ of the former over the latter as ultimately In addition to the types above, {es-sql} also supports at _runtime_ SQL-specific types that do not have an equivalent in {es}. Such types cannot be loaded from {es} (as it does not know about them) however can be used inside {es-sql} in queries or their results. +[[es-sql-only-types]] + The table below indicates these types: [cols="^m,^"] @@ -62,6 +68,7 @@ s|SQL type s|SQL precision +| date | 24 | interval_year | 7 | interval_month | 7 | interval_day | 23 diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java index 097bc476bcb09..6d6231bf4303f 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java @@ -28,6 +28,7 @@ public enum EsType implements SQLType { OBJECT( Types.STRUCT), NESTED( Types.STRUCT), BINARY( Types.VARBINARY), + DATE( Types.DATE), DATETIME( Types.TIMESTAMP), IP( Types.VARCHAR), INTERVAL_YEAR( ExtraTypes.INTERVAL_YEAR), diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java index 8fbef88dca5ab..f034f67f186e5 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java @@ -41,10 +41,9 @@ final class JdbcDateUtils { .appendFraction(MILLI_OF_SECOND, 3, 3, true) .appendOffsetId() .toFormatter(Locale.ROOT); - + static long asMillisSinceEpoch(String date) { - ZonedDateTime zdt = ISO_WITH_MILLIS.parse(date, ZonedDateTime::from); - return zdt.toInstant().toEpochMilli(); + return ISO_WITH_MILLIS.parse(date, ZonedDateTime::from).toInstant().toEpochMilli(); } static Date asDate(String date) { @@ -71,7 +70,7 @@ static R asDateTimeField(Object value, Function asDateTimeMethod, } } - private static long utcMillisRemoveTime(long l) { + static long utcMillisRemoveTime(long l) { return l - (l % DAY_IN_MILLIS); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java index 8c01b3112effa..f1bce51dd3464 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java @@ -33,6 +33,9 @@ import java.util.function.Function; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.asDateTimeField; +import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.asMillisSinceEpoch; +import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.utcMillisRemoveTime; class JdbcResultSet implements ResultSet, JdbcWrapper { @@ -252,8 +255,11 @@ private Long dateTime(int columnIndex) throws SQLException { if (val == null) { return null; } - return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asMillisSinceEpoch, Function.identity()); - }; + return asDateTimeField(val, JdbcDateUtils::asMillisSinceEpoch, Function.identity()); + } + if (EsType.DATE == type) { + return utcMillisRemoveTime(asMillisSinceEpoch(val.toString())); + } return val == null ? null : (Long) val; } catch (ClassCastException cce) { throw new SQLException( diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 9274e9061d453..469a2d37e5ef4 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -213,6 +213,8 @@ static Object convert(Object v, EsType columnType, String typeString) throws SQL return doubleValue(v); // Double might be represented as string for infinity and NaN values case FLOAT: return floatValue(v); // Float might be represented as string for infinity and NaN values + case DATE: + return JdbcDateUtils.asDateTimeField(v, JdbcDateUtils::asDate, Date::new); case DATETIME: return JdbcDateUtils.asDateTimeField(v, JdbcDateUtils::asTimestamp, Timestamp::new); case INTERVAL_YEAR: diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java index d8b6375e7ca96..47e0e9c8f90df 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java @@ -36,6 +36,7 @@ public static List readScriptSpec() throws Exception { tests.addAll(readScriptSpec("/fulltext.csv-spec", parser)); tests.addAll(readScriptSpec("/agg.csv-spec", parser)); tests.addAll(readScriptSpec("/columns.csv-spec", parser)); + tests.addAll(readScriptSpec("/date.csv-spec", parser)); tests.addAll(readScriptSpec("/datetime.csv-spec", parser)); tests.addAll(readScriptSpec("/alias.csv-spec", parser)); tests.addAll(readScriptSpec("/null.csv-spec", parser)); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 2817ab6df729e..bcd3d4073eabe 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -139,6 +139,7 @@ public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, if (expectedType == Types.TIMESTAMP_WITH_TIMEZONE) { expectedType = Types.TIMESTAMP; } + // since csv doesn't support real, we use float instead..... if (expectedType == Types.FLOAT && expected instanceof CsvResultSet) { expectedType = Types.REAL; @@ -204,6 +205,9 @@ private static void doAssertResultSetData(ResultSet expected, ResultSet actual, // fix for CSV which returns the shortName not fully-qualified name if (!columnClassName.contains(".")) { switch (columnClassName) { + case "Date": + columnClassName = "java.sql.Date"; + break; case "Timestamp": columnClassName = "java.sql.Timestamp"; break; diff --git a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec new file mode 100644 index 0000000000000..f744ea9ca6c70 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec @@ -0,0 +1,77 @@ +// +// Date +// + +dateExtractDateParts +SELECT +DAY(CAST(birth_date AS DATE)) d, +DAY_OF_MONTH(CAST(birth_date AS DATE)) dm, +DAY_OF_WEEK(CAST(birth_date AS DATE)) dw, +DAY_OF_YEAR(CAST(birth_date AS DATE)) dy, +ISO_DAY_OF_WEEK(CAST(birth_date AS DATE)) iso_dw, +WEEK(CAST(birth_date AS DATE)) w, +IW(CAST(birth_date AS DATE)) iso_w, +QUARTER(CAST(birth_date AS DATE)) q, +YEAR(CAST(birth_date AS DATE)) y, +birth_date, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + + d:i | dm:i | dw:i | dy:i | iso_dw:i | w:i |iso_w:i | q:i | y:i | birth_date:ts | l:s +2 |2 |4 |245 |3 |36 |35 |3 |1953 |1953-09-02T00:00:00Z |Facello +2 |2 |3 |154 |2 |23 |22 |2 |1964 |1964-06-02T00:00:00Z |Simmel +3 |3 |5 |337 |4 |49 |49 |4 |1959 |1959-12-03T00:00:00Z |Bamford +1 |1 |7 |121 |6 |18 |18 |2 |1954 |1954-05-01T00:00:00Z |Koblick +21 |21 |6 |21 |5 |4 |3 |1 |1955 |1955-01-21T00:00:00Z |Maliniak +20 |20 |2 |110 |1 |17 |16 |2 |1953 |1953-04-20T00:00:00Z |Preusig +23 |23 |5 |143 |4 |21 |21 |2 |1957 |1957-05-23T00:00:00Z |Zielinski +19 |19 |4 |50 |3 |8 |8 |1 |1958 |1958-02-19T00:00:00Z |Kalloufi +19 |19 |7 |110 |6 |16 |16 |2 |1952 |1952-04-19T00:00:00Z |Peac +; + + +dateExtractTimePartsTimeSecond +SELECT +SECOND(CAST(birth_date AS DATE)) d, +MINUTE(CAST(birth_date AS DATE)) m, +HOUR(CAST(birth_date AS DATE)) h +FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + + d:i | m:i | h:i +0 |0 |0 +0 |0 |0 +0 |0 |0 +0 |0 |0 +0 |0 |0 +0 |0 |0 +0 |0 |0 +0 |0 |0 +0 |0 |0 +; + +dateAsFilter +SELECT birth_date, last_name FROM "test_emp" WHERE birth_date <= CAST('1955-01-21' AS DATE) ORDER BY emp_no LIMIT 5; + + birth_date:ts | last_name:s +1953-09-02T00:00:00Z |Facello +1954-05-01T00:00:00Z |Koblick +1955-01-21T00:00:00Z |Maliniak +1953-04-20T00:00:00Z |Preusig +1952-04-19T00:00:00Z |Peac +; + +dateAndFunctionAsGroupingKey +SELECT MONTH(CAST(birth_date AS DATE)) AS m, CAST(SUM(emp_no) AS INT) s FROM test_emp GROUP BY m ORDER BY m LIMIT 5; + + m:i | s:i +null |100445 +1 |60288 +2 |80388 +3 |20164 +4 |80401 +; + +dateAndInterval +SELECT YEAR(CAST('2019-01-21' AS DATE) + INTERVAL '1-2' YEAR TO MONTH) AS y, MONTH(INTERVAL '1-2' YEAR TO MONTH + CAST('2019-01-21' AS DATE)) AS m; + +y:i | m:i +2020 | 3 +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime.sql-spec index 3748a116b7450..1bdc090ea232f 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime.sql-spec @@ -6,8 +6,9 @@ // Time NOT IMPLEMENTED in H2 on TIMESTAMP WITH TIME ZONE - hence why these are moved to CSV // -// WEEK_OF_YEAR moved to CSV tests, because H2 builds its Calendar with the local Locale, we consider ROOT as the default Locale -// This has implications on the results, which could change given specific locales where the rules for determining the start of a year are different. +// WEEK_OF_YEAR moved to CSV tests, because H2 builds its Calendar with the local Locale, +// we consider ROOT as the default Locale. This has implications on the results, which could +// change given specific locales where the rules for determining the start of a year are different. // // DateTime @@ -31,10 +32,10 @@ SELECT MONTHNAME(CAST('2018-09-03' AS TIMESTAMP)) month FROM "test_emp" limit 1; dayNameFromStringDateTime SELECT DAYNAME(CAST('2018-09-03' AS TIMESTAMP)) day FROM "test_emp" limit 1; -quarterSelect +dateTimeQuarter SELECT QUARTER(hire_date) q, hire_date FROM test_emp ORDER BY hire_date LIMIT 15; -dayOfWeek +dateTimeDayOfWeek SELECT DAY_OF_WEEK(birth_date) day, birth_date FROM test_emp ORDER BY DAY_OF_WEEK(birth_date); // diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java index 0c374038953ab..61e1e6bc67ef0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java @@ -95,7 +95,7 @@ public Object extract(Bucket bucket) { if (object == null) { return object; } else if (object instanceof Long) { - object = DateUtils.of(((Long) object).longValue(), zoneId); + object = DateUtils.asDateTime(((Long) object).longValue(), zoneId); } else { throw new SqlIllegalArgumentException("Invalid date key returned: {}", object); } @@ -129,4 +129,4 @@ public boolean equals(Object obj) { public String toString() { return "|" + key + "|"; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index ecb61e686a109..503da62dc30ee 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -130,11 +130,11 @@ private Object unwrapMultiValue(Object values) { } if (dataType == DataType.DATETIME) { if (values instanceof String) { - return DateUtils.of(Long.parseLong(values.toString())); + return DateUtils.asDateTime(Long.parseLong(values.toString())); } // returned by nested types... if (values instanceof DateTime) { - return DateUtils.of((DateTime) values); + return DateUtils.asDateTime((DateTime) values); } } if (values instanceof Long || values instanceof Double || values instanceof String || values instanceof Boolean) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java index 3198604a94c9a..ee9f98e104877 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.expression; -import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -16,11 +15,13 @@ import java.util.Collection; import java.util.List; import java.util.Locale; +import java.util.StringJoiner; import java.util.function.Predicate; import static java.lang.String.format; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; +import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; public final class Expressions { @@ -155,7 +156,7 @@ public static List pipe(List expressions) { } public static TypeResolution typeMustBeBoolean(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, dt -> dt == DataType.BOOLEAN, operationName, paramOrd, "boolean"); + return typeMustBe(e, dt -> dt == BOOLEAN, operationName, paramOrd, "boolean"); } public static TypeResolution typeMustBeInteger(Expression e, String operationName, ParamOrdinal paramOrd) { @@ -171,11 +172,11 @@ public static TypeResolution typeMustBeString(Expression e, String operationName } public static TypeResolution typeMustBeDate(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, dt -> dt == DataType.DATETIME, operationName, paramOrd, "date"); + return typeMustBe(e, DataType::isDateBased, operationName, paramOrd, "date", "datetime"); } public static TypeResolution typeMustBeNumericOrDate(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, dt -> dt.isNumeric() || dt == DataType.DATETIME, operationName, paramOrd, "numeric", "date"); + return typeMustBe(e, dt -> dt.isNumeric() || dt.isDateBased(), operationName, paramOrd, "date", "datetime", "numeric"); } public static TypeResolution typeMustBe(Expression e, @@ -188,8 +189,20 @@ public static TypeResolution typeMustBe(Expression e, new TypeResolution(format(Locale.ROOT, "[%s]%s argument must be [%s], found value [%s] type [%s]", operationName, paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : " " + paramOrd.name().toLowerCase(Locale.ROOT), - Strings.arrayToDelimitedString(acceptedTypes, " or "), + acceptedTypesForErrorMsg(acceptedTypes), Expressions.name(e), e.dataType().esType)); } + + private static String acceptedTypesForErrorMsg(String... acceptedTypes) { + StringJoiner sj = new StringJoiner(", "); + for (int i = 0; i < acceptedTypes.length - 1; i++) { + sj.add(acceptedTypes[i]); + } + if (acceptedTypes.length > 1) { + return sj.toString() + " or " + acceptedTypes[acceptedTypes.length - 1]; + } else { + return acceptedTypes[0]; + } + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java index e66dfdebc6b36..8aa72dea7d1da 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java @@ -47,4 +47,4 @@ public String innerName() { protected TypeResolution resolveType() { return Expressions.typeMustBeNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java index 46614755b7e8f..3dd4bdc992cb0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java @@ -42,7 +42,7 @@ protected TypeResolution resolveType() { TypeResolution resolution = Expressions.typeMustBeNumericOrDate(field(), "HISTOGRAM", ParamOrdinal.FIRST); if (resolution == TypeResolution.TYPE_RESOLVED) { // interval must be Literal interval - if (field().dataType() == DataType.DATETIME) { + if (field().dataType().isDateBased()) { resolution = Expressions.typeMustBe(interval, DataTypes::isInterval, "(Date) HISTOGRAM", ParamOrdinal.SECOND, "interval"); } else { resolution = Expressions.typeMustBeNumeric(interval, "(Numeric) HISTOGRAM", ParamOrdinal.SECOND); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java index 345498afd00f1..fa949007ef58a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java @@ -74,4 +74,4 @@ public boolean equals(Object obj) { public int hashCode() { return Objects.hash(field(), zoneId()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 01d56188ed2ed..f56181bae13de 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -357,11 +357,11 @@ private static Object asDateTime(Object dateTime, boolean lenient) { return ((JodaCompatibleZonedDateTime) dateTime).getZonedDateTime(); } if (dateTime instanceof ZonedDateTime) { - return (ZonedDateTime) dateTime; + return dateTime; } if (false == lenient) { if (dateTime instanceof Number) { - return DateUtils.of(((Number) dateTime).longValue()); + return DateUtils.asDateTime(((Number) dateTime).longValue()); } throw new SqlIllegalArgumentException("Invalid date encountered [{}]", dateTime); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java index cd13570a1ad10..5b75878920243 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java @@ -79,7 +79,7 @@ default ScriptTemplate scriptWithScalar(ScalarFunctionAttribute scalar) { default ScriptTemplate scriptWithAggregate(AggregateFunctionAttribute aggregate) { String template = "{}"; - if (aggregate.dataType() == DataType.DATETIME) { + if (aggregate.dataType().isDateBased()) { template = "{sql}.asDateTime({})"; } return new ScriptTemplate(processScript(template), @@ -89,7 +89,7 @@ default ScriptTemplate scriptWithAggregate(AggregateFunctionAttribute aggregate) default ScriptTemplate scriptWithGrouping(GroupingFunctionAttribute grouping) { String template = "{}"; - if (grouping.dataType() == DataType.DATETIME) { + if (grouping.dataType().isDateBased()) { template = "{sql}.asDateTime({})"; } return new ScriptTemplate(processScript(template), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index da42ffe523b64..5be5e28718459 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -45,12 +45,15 @@ protected TypeResolution resolveType() { if (DataTypeConversion.commonType(l, r) == null) { return new TypeResolution(format("[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); } else { - return TypeResolution.TYPE_RESOLVED; + return resolveWithIntervals(); } } // fall-back to default checks return super.resolveType(); } - + + protected TypeResolution resolveWithIntervals() { + return TypeResolution.TYPE_RESOLVED; + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java index 32acfa8ed685d..e2454ffd26742 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java @@ -9,6 +9,9 @@ import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; /** * Subtraction function ({@code a - b}). @@ -28,4 +31,13 @@ protected NodeInfo info() { protected Sub replaceChildren(Expression newLeft, Expression newRight) { return new Sub(source(), newLeft, newRight); } + + @Override + protected TypeResolution resolveWithIntervals() { + if (right().dataType().isDateBased() && DataTypes.isInterval(left().dataType())) { + return new TypeResolution(format(null, "Cannot subtract a {}[{}] from an interval[{}]; do you mean the reverse?", + right().dataType().esType, right().source().text(), left().source().text())); + } + return TypeResolution.TYPE_RESOLVED; + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index 68baa84a802f6..432872891e5c2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -411,6 +411,8 @@ public DataType visitPrimitiveDataType(PrimitiveDataTypeContext ctx) { case "float": case "double": return DataType.DOUBLE; + case "date": + return DataType.DATE; case "datetime": case "timestamp": return DataType.DATETIME; @@ -793,7 +795,7 @@ public Literal visitDateEscapedLiteral(DateEscapedLiteralContext ctx) { } catch(IllegalArgumentException ex) { throw new ParsingException(source, "Invalid date received; {}", ex.getMessage()); } - return new Literal(source, DateUtils.of(dt), DataType.DATETIME); + return new Literal(source, DateUtils.asDateOnly(dt), DataType.DATE); } @Override @@ -829,7 +831,7 @@ public Literal visitTimestampEscapedLiteral(TimestampEscapedLiteralContext ctx) } catch (IllegalArgumentException ex) { throw new ParsingException(source, "Invalid timestamp received; {}", ex.getMessage()); } - return new Literal(source, DateUtils.of(dt), DataType.DATETIME); + return new Literal(source, DateUtils.asDateTime(dt), DataType.DATETIME); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index 5189a0ca4981e..da409439558c7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -61,7 +61,6 @@ import org.elasticsearch.xpack.sql.rule.Rule; import org.elasticsearch.xpack.sql.rule.RuleExecutor; import org.elasticsearch.xpack.sql.session.EmptyExecutable; -import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.Check; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -284,7 +283,7 @@ protected PhysicalPlan rule(AggregateExec a) { if (matchingGroup != null) { if (exp instanceof Attribute || exp instanceof ScalarFunction || exp instanceof GroupingFunction) { Processor action = null; - ZoneId zi = DataType.DATETIME == exp.dataType() ? DateUtils.UTC : null; + ZoneId zi = exp.dataType().isDateBased() ? DateUtils.UTC : null; /* * special handling of dates since aggs return the typed Date object which needs * extraction instead of handling this in the scroller, the folder handles this @@ -335,7 +334,7 @@ protected PhysicalPlan rule(AggregateExec a) { // check if the field is a date - if so mark it as such to interpret the long as a date // UTC is used since that's what the server uses and there's no conversion applied // (like for date histograms) - ZoneId zi = DataType.DATETIME == child.dataType() ? DateUtils.UTC : null; + ZoneId zi = child.dataType().isDateBased() ? DateUtils.UTC : null; queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, zi)); } // handle histogram @@ -359,7 +358,7 @@ else if (child instanceof GroupingFunction) { matchingGroup = groupingContext.groupFor(ne); Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(ne)); - ZoneId zi = DataType.DATETIME == ne.dataType() ? DateUtils.UTC : null; + ZoneId zi = ne.dataType().isDateBased() ? DateUtils.UTC : null; queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, zi)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 489e1506edf1a..1a5ceb686e609 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -91,8 +91,8 @@ import org.elasticsearch.xpack.sql.querydsl.query.TermsQuery; import org.elasticsearch.xpack.sql.querydsl.query.WildcardQuery; import org.elasticsearch.xpack.sql.tree.Source; -import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.Check; +import org.elasticsearch.xpack.sql.util.DateUtils; import org.elasticsearch.xpack.sql.util.ReflectionUtils; import java.util.Arrays; @@ -106,6 +106,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.sql.expression.Foldables.doubleValuesOf; import static org.elasticsearch.xpack.sql.expression.Foldables.valueOf; +import static org.elasticsearch.xpack.sql.type.DataType.DATE; final class QueryTranslator { @@ -275,8 +276,15 @@ else if (exp instanceof GroupingFunction) { Expression field = h.field(); // date histogram - if (h.dataType() == DataType.DATETIME) { + if (h.dataType().isDateBased()) { long intervalAsMillis = Intervals.inMillis(h.interval()); + + // When the histogram in SQL is applied on DATE type instead of DATETIME, the interval + // specified is truncated to the multiple of a day. If the interval specified is less + // than 1 day, then the interval used will be `INTERVAL '1' DAY`. + if (h.dataType() == DATE) { + intervalAsMillis = DateUtils.minDayInterval(intervalAsMillis); + } // TODO: set timezone if (field instanceof FieldAttribute) { key = new GroupByDateHistogram(aggId, nameOf(field), intervalAsMillis, h.zoneId()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java index 936f565827980..24367fc5e1f2f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java @@ -37,6 +37,11 @@ private GroupByDateHistogram(String id, String fieldName, ScriptTemplate script, } + // For testing + public long interval() { + return interval; + } + @Override protected CompositeValuesSourceBuilder createSourceBuilder() { return new DateHistogramValuesSourceBuilder(id()) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java index 8626ea18e30c5..6f26ee1dd960c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java @@ -39,6 +39,8 @@ public final CompositeValuesSourceBuilder asValueSource() { builder.valueType(ValueType.DOUBLE); } else if (script.outputType().isString()) { builder.valueType(ValueType.STRING); + } else if (script.outputType() == DataType.DATE) { + builder.valueType(ValueType.LONG); } else if (script.outputType() == DataType.DATETIME) { builder.valueType(ValueType.DATE); } else if (script.outputType() == DataType.BOOLEAN) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index f233632d0f656..3210c9ceb8a8a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -41,7 +41,8 @@ public enum DataType { OBJECT( JDBCType.STRUCT, -1, 0, 0, false, false, false), NESTED( JDBCType.STRUCT, -1, 0, 0, false, false, false), BINARY( JDBCType.VARBINARY, -1, Integer.MAX_VALUE, 0, false, false, false), - // since ODBC and JDBC interpret precision for Date as display size, + DATE( JDBCType.DATE, Long.BYTES, 10, 10, false, false, true), + // since ODBC and JDBC interpret precision for Date as display size // the precision is 23 (number of chars in ISO8601 with millis) + Z (the UTC timezone) // see https://github.com/elastic/elasticsearch/issues/30386#issuecomment-386807288 DATETIME( JDBCType.TIMESTAMP, Long.BYTES, 24, 24, false, false, true), @@ -102,7 +103,7 @@ public enum DataType { odbcToEs.put("SQL_LONGVARBINARY", BINARY); // Date - odbcToEs.put("SQL_DATE", DATETIME); + odbcToEs.put("SQL_DATE", DATE); odbcToEs.put("SQL_TIME", DATETIME); odbcToEs.put("SQL_TIMESTAMP", DATETIME); @@ -214,6 +215,10 @@ public boolean isString() { public boolean isPrimitive() { return this != OBJECT && this != NESTED; } + + public boolean isDateBased() { + return this == DATE || this == DATETIME; + } public static DataType fromOdbcType(String odbcType) { return odbcToEs.get(odbcType); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index f3cf3d2bac1ac..a578c6a7e0644 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -17,6 +17,7 @@ import java.util.function.LongFunction; import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; +import static org.elasticsearch.xpack.sql.type.DataType.DATE; import static org.elasticsearch.xpack.sql.type.DataType.DATETIME; import static org.elasticsearch.xpack.sql.type.DataType.LONG; import static org.elasticsearch.xpack.sql.type.DataType.NULL; @@ -73,7 +74,34 @@ public static DataType commonType(DataType left, DataType right) { return left; } } + // interval and dates + if (left == DATE) { + if (DataTypes.isInterval(right)) { + return left; + } + } + if (right == DATE) { + if (DataTypes.isInterval(left)) { + return right; + } + } + if (left == DATETIME) { + if (right == DATE) { + return left; + } + if (DataTypes.isInterval(right)) { + return left; + } + } + if (right == DATETIME) { + if (left == DATE) { + return right; + } + if (DataTypes.isInterval(left)) { + return right; + } + } if (DataTypes.isInterval(left)) { // intervals widening if (DataTypes.isInterval(right)) { @@ -82,12 +110,6 @@ public static DataType commonType(DataType left, DataType right) { } } - if (DataTypes.isInterval(right)) { - if (left == DATETIME) { - return left; - } - } - // none found return null; } @@ -145,6 +167,8 @@ private static Conversion conversion(DataType from, DataType to) { return conversionToFloat(from); case DOUBLE: return conversionToDouble(from); + case DATE: + return conversionToDate(from); case DATETIME: return conversionToDateTime(from); case BOOLEAN: @@ -156,9 +180,12 @@ private static Conversion conversion(DataType from, DataType to) { } private static Conversion conversionToString(DataType from) { - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_STRING; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_STRING; + } return Conversion.OTHER_TO_STRING; } @@ -182,9 +209,12 @@ private static Conversion conversionToLong(DataType from) { if (from.isString()) { return Conversion.STRING_TO_LONG; } - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_LONG; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_LONG; + } return null; } @@ -201,9 +231,12 @@ private static Conversion conversionToInt(DataType from) { if (from.isString()) { return Conversion.STRING_TO_INT; } - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_INT; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_INT; + } return null; } @@ -220,9 +253,12 @@ private static Conversion conversionToShort(DataType from) { if (from.isString()) { return Conversion.STRING_TO_SHORT; } - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_SHORT; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_SHORT; + } return null; } @@ -239,9 +275,12 @@ private static Conversion conversionToByte(DataType from) { if (from.isString()) { return Conversion.STRING_TO_BYTE; } - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_BYTE; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_BYTE; + } return null; } @@ -258,9 +297,12 @@ private static Conversion conversionToFloat(DataType from) { if (from.isString()) { return Conversion.STRING_TO_FLOAT; } - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_FLOAT; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_FLOAT; + } return null; } @@ -277,13 +319,16 @@ private static Conversion conversionToDouble(DataType from) { if (from.isString()) { return Conversion.STRING_TO_DOUBLE; } - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_DOUBLE; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_DOUBLE; + } return null; } - private static Conversion conversionToDateTime(DataType from) { + private static Conversion conversionToDate(DataType from) { if (from.isRational()) { return Conversion.RATIONAL_TO_DATE; } @@ -296,6 +341,28 @@ private static Conversion conversionToDateTime(DataType from) { if (from.isString()) { return Conversion.STRING_TO_DATE; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_DATE; + } + return null; + } + + private static Conversion conversionToDateTime(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_DATETIME; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_DATETIME; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_DATETIME; // We emit an int here which is ok because of Java's casting rules + } + if (from.isString()) { + return Conversion.STRING_TO_DATETIME; + } + if (from == DATE) { + return Conversion.DATE_TO_DATETIME; + } return null; } @@ -306,36 +373,39 @@ private static Conversion conversionToBoolean(DataType from) { if (from.isString()) { return Conversion.STRING_TO_BOOLEAN; } - if (from == DATETIME) { + if (from == DATE) { return Conversion.DATE_TO_BOOLEAN; } + if (from == DATETIME) { + return Conversion.DATETIME_TO_BOOLEAN; + } return null; } public static byte safeToByte(long x) { if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) { - throw new SqlIllegalArgumentException("[" + x + "] out of [Byte] range"); + throw new SqlIllegalArgumentException("[" + x + "] out of [byte] range"); } return (byte) x; } public static short safeToShort(long x) { if (x > Short.MAX_VALUE || x < Short.MIN_VALUE) { - throw new SqlIllegalArgumentException("[" + x + "] out of [Short] range"); + throw new SqlIllegalArgumentException("[" + x + "] out of [short] range"); } return (short) x; } public static int safeToInt(long x) { if (x > Integer.MAX_VALUE || x < Integer.MIN_VALUE) { - throw new SqlIllegalArgumentException("[" + x + "] out of [Int] range"); + throw new SqlIllegalArgumentException("[" + x + "] out of [integer] range"); } return (int) x; } public static long safeToLong(double x) { if (x > Long.MAX_VALUE || x < Long.MIN_VALUE) { - throw new SqlIllegalArgumentException("[" + x + "] out of [Long] range"); + throw new SqlIllegalArgumentException("[" + x + "] out of [long] range"); } return Math.round(x); } @@ -358,7 +428,7 @@ public static Number toInteger(double x, DataType dataType) { public static boolean convertToBoolean(String val) { String lowVal = val.toLowerCase(Locale.ROOT); if (Booleans.isBoolean(lowVal) == false) { - throw new SqlIllegalArgumentException("cannot cast [" + val + "] to [Boolean]"); + throw new SqlIllegalArgumentException("cannot cast [" + val + "] to [boolean]"); } return Booleans.parseBoolean(lowVal); } @@ -384,53 +454,68 @@ public enum Conversion { IDENTITY(Function.identity()), NULL(value -> null), - DATE_TO_STRING(o -> DateUtils.toString((ZonedDateTime) o)), + DATE_TO_STRING(o -> DateUtils.toDateString((ZonedDateTime) o)), + DATETIME_TO_STRING(o -> DateUtils.toString((ZonedDateTime) o)), OTHER_TO_STRING(String::valueOf), RATIONAL_TO_LONG(fromDouble(DataTypeConversion::safeToLong)), INTEGER_TO_LONG(fromLong(value -> value)), - STRING_TO_LONG(fromString(Long::valueOf, "Long")), - DATE_TO_LONG(fromDate(value -> value)), + STRING_TO_LONG(fromString(Long::valueOf, "long")), + DATE_TO_LONG(fromDateTime(value -> value)), + DATETIME_TO_LONG(fromDateTime(value -> value)), RATIONAL_TO_INT(fromDouble(value -> safeToInt(safeToLong(value)))), INTEGER_TO_INT(fromLong(DataTypeConversion::safeToInt)), BOOL_TO_INT(fromBool(value -> value ? 1 : 0)), - STRING_TO_INT(fromString(Integer::valueOf, "Int")), - DATE_TO_INT(fromDate(DataTypeConversion::safeToInt)), + STRING_TO_INT(fromString(Integer::valueOf, "integer")), + DATE_TO_INT(fromDateTime(DataTypeConversion::safeToInt)), + DATETIME_TO_INT(fromDateTime(DataTypeConversion::safeToInt)), RATIONAL_TO_SHORT(fromDouble(value -> safeToShort(safeToLong(value)))), INTEGER_TO_SHORT(fromLong(DataTypeConversion::safeToShort)), BOOL_TO_SHORT(fromBool(value -> value ? (short) 1 : (short) 0)), - STRING_TO_SHORT(fromString(Short::valueOf, "Short")), - DATE_TO_SHORT(fromDate(DataTypeConversion::safeToShort)), + STRING_TO_SHORT(fromString(Short::valueOf, "short")), + DATE_TO_SHORT(fromDateTime(DataTypeConversion::safeToShort)), + DATETIME_TO_SHORT(fromDateTime(DataTypeConversion::safeToShort)), RATIONAL_TO_BYTE(fromDouble(value -> safeToByte(safeToLong(value)))), INTEGER_TO_BYTE(fromLong(DataTypeConversion::safeToByte)), BOOL_TO_BYTE(fromBool(value -> value ? (byte) 1 : (byte) 0)), - STRING_TO_BYTE(fromString(Byte::valueOf, "Byte")), - DATE_TO_BYTE(fromDate(DataTypeConversion::safeToByte)), + STRING_TO_BYTE(fromString(Byte::valueOf, "byte")), + DATE_TO_BYTE(fromDateTime(DataTypeConversion::safeToByte)), + DATETIME_TO_BYTE(fromDateTime(DataTypeConversion::safeToByte)), // TODO floating point conversions are lossy but conversions to integer conversions are not. Are we ok with that? RATIONAL_TO_FLOAT(fromDouble(value -> (float) value)), INTEGER_TO_FLOAT(fromLong(value -> (float) value)), BOOL_TO_FLOAT(fromBool(value -> value ? 1f : 0f)), - STRING_TO_FLOAT(fromString(Float::valueOf, "Float")), - DATE_TO_FLOAT(fromDate(value -> (float) value)), + STRING_TO_FLOAT(fromString(Float::valueOf, "float")), + DATE_TO_FLOAT(fromDateTime(value -> (float) value)), + DATETIME_TO_FLOAT(fromDateTime(value -> (float) value)), RATIONAL_TO_DOUBLE(fromDouble(Double::valueOf)), INTEGER_TO_DOUBLE(fromLong(Double::valueOf)), BOOL_TO_DOUBLE(fromBool(value -> value ? 1d : 0d)), - STRING_TO_DOUBLE(fromString(Double::valueOf, "Double")), - DATE_TO_DOUBLE(fromDate(Double::valueOf)), + STRING_TO_DOUBLE(fromString(Double::valueOf, "double")), + DATE_TO_DOUBLE(fromDateTime(Double::valueOf)), + DATETIME_TO_DOUBLE(fromDateTime(Double::valueOf)), RATIONAL_TO_DATE(toDate(RATIONAL_TO_LONG)), INTEGER_TO_DATE(toDate(INTEGER_TO_LONG)), BOOL_TO_DATE(toDate(BOOL_TO_INT)), - STRING_TO_DATE(fromString(DateUtils::of, "Date")), + STRING_TO_DATE(fromString(DateUtils::asDateOnly, "date")), + DATETIME_TO_DATE(fromDatetimeToDate()), + + RATIONAL_TO_DATETIME(toDateTime(RATIONAL_TO_LONG)), + INTEGER_TO_DATETIME(toDateTime(INTEGER_TO_LONG)), + BOOL_TO_DATETIME(toDateTime(BOOL_TO_INT)), + STRING_TO_DATETIME(fromString(DateUtils::asDateTime, "datetime")), + DATE_TO_DATETIME(value -> value), NUMERIC_TO_BOOLEAN(fromLong(value -> value != 0)), - STRING_TO_BOOLEAN(fromString(DataTypeConversion::convertToBoolean, "Boolean")), - DATE_TO_BOOLEAN(fromDate(value -> value != 0)), + STRING_TO_BOOLEAN(fromString(DataTypeConversion::convertToBoolean, "boolean")), + DATE_TO_BOOLEAN(fromDateTime(value -> value != 0)), + DATETIME_TO_BOOLEAN(fromDateTime(value -> value != 0)), BOOL_TO_LONG(fromBool(value -> value ? 1L : 0L)), @@ -470,13 +555,21 @@ private static Function fromString(Function conv private static Function fromBool(Function converter) { return (Object l) -> converter.apply(((Boolean) l)); } - - private static Function fromDate(Function converter) { - return l -> ((ZonedDateTime) l).toEpochSecond(); + + private static Function fromDateTime(Function converter) { + return l -> converter.apply(((ZonedDateTime) l).toEpochSecond()); + } + + private static Function toDateTime(Conversion conversion) { + return l -> DateUtils.asDateTime(((Number) conversion.convert(l)).longValue()); } private static Function toDate(Conversion conversion) { - return l -> DateUtils.of(((Number) conversion.convert(l)).longValue()); + return l -> DateUtils.asDateOnly(((Number) conversion.convert(l)).longValue()); + } + + private static Function fromDatetimeToDate() { + return l -> DateUtils.asDateOnly((ZonedDateTime) l); } public Object convert(Object l) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java index 6aa56914a63d7..bdd455fe10f63 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java @@ -17,38 +17,74 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; -public class DateUtils { +import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; + +public final class DateUtils { + + private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; // TODO: do we have a java.time based parser we can use instead? private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); - public static ZoneId UTC = ZoneId.of("Z"); + public static final ZoneId UTC = ZoneId.of("Z"); private DateUtils() {} + /** + * Creates an date for SQL DATE type from the millis since epoch. + */ + public static ZonedDateTime asDateOnly(long millis) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC).toLocalDate().atStartOfDay(UTC); + } /** - * Creates a date from the millis since epoch (thus the time-zone is UTC). + * Creates a datetime from the millis since epoch (thus the time-zone is UTC). */ - public static ZonedDateTime of(long millis) { + public static ZonedDateTime asDateTime(long millis) { return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC); } /** - * Creates a date from the millis since epoch then translates the date into the given timezone. + * Creates a datetime from the millis since epoch then translates the date into the given timezone. */ - public static ZonedDateTime of(long millis, ZoneId id) { + public static ZonedDateTime asDateTime(long millis, ZoneId id) { return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), id); } + /** + * Parses the given string into a Date (SQL DATE type) using UTC as a default timezone. + */ + public static ZonedDateTime asDateOnly(String dateFormat) { + return asDateOnly(UTC_DATE_FORMATTER.parseDateTime(dateFormat)); + } + + public static ZonedDateTime asDateOnly(DateTime dateTime) { + LocalDateTime ldt = LocalDateTime.of( + dateTime.getYear(), + dateTime.getMonthOfYear(), + dateTime.getDayOfMonth(), + 0, + 0, + 0, + 0); + + return ZonedDateTime.ofStrict(ldt, + ZoneOffset.ofTotalSeconds(dateTime.getZone().getOffset(dateTime) / 1000), + org.elasticsearch.common.time.DateUtils.dateTimeZoneToZoneId(dateTime.getZone())); + } + + public static ZonedDateTime asDateOnly(ZonedDateTime zdt) { + return zdt.toLocalDate().atStartOfDay(zdt.getZone()); + } + /** * Parses the given string into a DateTime using UTC as a default timezone. */ - public static ZonedDateTime of(String dateFormat) { - return of(UTC_DATE_FORMATTER.parseDateTime(dateFormat)); + public static ZonedDateTime asDateTime(String dateFormat) { + return asDateTime(UTC_DATE_FORMATTER.parseDateTime(dateFormat)); } - public static ZonedDateTime of(DateTime dateTime) { + public static ZonedDateTime asDateTime(DateTime dateTime) { LocalDateTime ldt = LocalDateTime.of( dateTime.getYear(), dateTime.getMonthOfYear(), @@ -62,8 +98,20 @@ public static ZonedDateTime of(DateTime dateTime) { ZoneOffset.ofTotalSeconds(dateTime.getZone().getOffset(dateTime) / 1000), org.elasticsearch.common.time.DateUtils.dateTimeZoneToZoneId(dateTime.getZone())); } + public static String toString(ZonedDateTime dateTime) { return StringUtils.toString(dateTime); } -} \ No newline at end of file + + public static String toDateString(ZonedDateTime date) { + return date.format(ISO_LOCAL_DATE); + } + + public static long minDayInterval(long l) { + if (l < DAY_IN_MILLIS ) { + return DAY_IN_MILLIS; + } + return l - (l % DAY_IN_MILLIS); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index e45da9d08fee9..946e8f93a7091 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -198,6 +198,12 @@ public void testExtractNonDateTime() { assertEquals("1:8: Invalid datetime field [ABS]. Use any datetime function.", error("SELECT EXTRACT(ABS FROM date) FROM test")); } + public void testSubtractFromInterval() { + assertEquals("1:8: Cannot subtract a datetime[CAST('2000-01-01' AS DATETIME)] " + + "from an interval[INTERVAL 1 MONTH]; do you mean the reverse?", + error("SELECT INTERVAL 1 MONTH - CAST('2000-01-01' AS DATETIME)")); + } + public void testMultipleColumns() { // xxx offset is that of the order by field assertEquals("1:43: Unknown column [xxx]\nline 1:8: Unknown column [xxx]", @@ -378,7 +384,7 @@ public void testNotSupportedAggregateOnDate() { } public void testNotSupportedAggregateOnString() { - assertEquals("1:8: [MAX(keyword)] argument must be [numeric or date], found value [keyword] type [keyword]", + assertEquals("1:8: [MAX(keyword)] argument must be [date, datetime or numeric], found value [keyword] type [keyword]", error("SELECT MAX(keyword) FROM test")); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java index 135ae74dd2075..0561b6820641d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java @@ -62,7 +62,7 @@ public void testExtractDate() { long millis = System.currentTimeMillis(); Bucket bucket = new TestBucket(singletonMap(extractor.key(), millis), randomLong(), new Aggregations(emptyList())); - assertEquals(DateUtils.of(millis, extractor.zoneId()), extractor.extract(bucket)); + assertEquals(DateUtils.asDateTime(millis, extractor.zoneId()), extractor.extract(bucket)); } public void testExtractIncorrectDateKey() { @@ -82,4 +82,4 @@ public void testExtractIncorrectDateKey() { private static ZoneId randomSafeZone() { return randomValueOtherThanMany(zi -> zi.getId().startsWith("SystemV"), () -> randomZone()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 395f3bf270aa6..2e66192fbcbfc 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -145,7 +145,7 @@ public void testGetDate() { DocumentField field = new DocumentField("my_date_field", documentFieldValues); hit.fields(singletonMap("my_date_field", field)); FieldHitExtractor extractor = new FieldHitExtractor("my_date_field", DataType.DATETIME, true); - assertEquals(DateUtils.of(millis), extractor.extract(hit)); + assertEquals(DateUtils.asDateTime(millis), extractor.extract(hit)); } public void testGetSource() throws IOException { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java index 831978705d061..7e9cb64b01950 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java @@ -39,7 +39,7 @@ public void testApply() { assertEquals(null, proc.process(null)); assertEquals(1, proc.process("1")); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> proc.process("1.2")); - assertEquals("cannot cast [1.2] to [Int]", e.getMessage()); + assertEquals("cannot cast [1.2] to [integer]", e.getMessage()); } { CastProcessor proc = new CastProcessor(Conversion.BOOL_TO_INT); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java index 164fe1fe931a4..2ae6e571ac9d2 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java @@ -26,6 +26,10 @@ public static ZonedDateTime dateTime(int year, int month, int day, int hour, int } public static ZonedDateTime dateTime(long millisSinceEpoch) { - return DateUtils.of(millisSinceEpoch); + return DateUtils.asDateTime(millisSinceEpoch); + } + + public static ZonedDateTime date(long millisSinceEpoch) { + return DateUtils.asDateOnly(millisSinceEpoch); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java index f3bf9fc03e777..01b1d0d077930 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java @@ -170,7 +170,7 @@ public void testFunctionWithFunctionWithArgAndParams() { public void testDateLiteral() { Literal l = dateLiteral("2012-01-01"); - assertThat(l.dataType(), is(DataType.DATETIME)); + assertThat(l.dataType(), is(DataType.DATE)); } public void testDateLiteralValidation() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java index 6ed46b74d4512..e737258ef1982 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java @@ -61,7 +61,7 @@ public void testSysTypes() throws Exception { Command cmd = sql("SYS TYPES").v1(); List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", - "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATETIME", + "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "DATETIME", "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index 92f734e539780..41ddb518ce6d9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -44,7 +44,7 @@ public void testSysTypes() throws Exception { Command cmd = sql("SYS TYPES").v1(); List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", - "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATETIME", + "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "DATETIME", "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 8ee94194845a5..704e4d7147e78 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.expression.function.grouping.Histogram; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.optimizer.Optimizer; @@ -34,6 +35,7 @@ import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation; import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByDateHistogram; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; import org.elasticsearch.xpack.sql.querydsl.query.NotQuery; import org.elasticsearch.xpack.sql.querydsl.query.Query; @@ -180,7 +182,7 @@ public void testDateRangeCast() { assertTrue(query instanceof RangeQuery); RangeQuery rq = (RangeQuery) query; assertEquals("date", rq.field()); - assertEquals(DateUtils.of("1969-05-13T12:34:56Z"), rq.lower()); + assertEquals(DateUtils.asDateTime("1969-05-13T12:34:56Z"), rq.lower()); } public void testLikeConstructsNotSupported() { @@ -482,6 +484,52 @@ public void testGroupByHistogram() { assertEquals(FieldAttribute.class, field.getClass()); assertEquals(DataType.DATETIME, field.dataType()); } + + public void testGroupByHistogramWithDate() { + LogicalPlan p = plan("SELECT MAX(int) FROM test GROUP BY HISTOGRAM(CAST(date AS DATE), INTERVAL 2 MONTHS)"); + assertTrue(p instanceof Aggregate); + Aggregate a = (Aggregate) p; + List groupings = a.groupings(); + assertEquals(1, groupings.size()); + Expression exp = groupings.get(0); + assertEquals(Histogram.class, exp.getClass()); + Histogram h = (Histogram) exp; + assertEquals("+0-2", h.interval().fold().toString()); + Expression field = h.field(); + assertEquals(Cast.class, field.getClass()); + assertEquals(DataType.DATE, field.dataType()); + } + + public void testGroupByHistogramWithDateAndSmallInterval() { + PhysicalPlan p = optimizeAndPlan("SELECT MAX(int) FROM test GROUP BY " + + "HISTOGRAM(CAST(date AS DATE), INTERVAL 5 MINUTES)"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.queryContainer().aggs().groups().size()); + assertEquals(GroupByDateHistogram.class, eqe.queryContainer().aggs().groups().get(0).getClass()); + assertEquals(86400000L, ((GroupByDateHistogram) eqe.queryContainer().aggs().groups().get(0)).interval()); + } + + public void testGroupByHistogramWithDateTruncateIntervalToDayMultiples() { + { + PhysicalPlan p = optimizeAndPlan("SELECT MAX(int) FROM test GROUP BY " + + "HISTOGRAM(CAST(date AS DATE), INTERVAL '2 3:04' DAY TO MINUTE)"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.queryContainer().aggs().groups().size()); + assertEquals(GroupByDateHistogram.class, eqe.queryContainer().aggs().groups().get(0).getClass()); + assertEquals(172800000L, ((GroupByDateHistogram) eqe.queryContainer().aggs().groups().get(0)).interval()); + } + { + PhysicalPlan p = optimizeAndPlan("SELECT MAX(int) FROM test GROUP BY " + + "HISTOGRAM(CAST(date AS DATE), INTERVAL 4409 MINUTES)"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.queryContainer().aggs().groups().size()); + assertEquals(GroupByDateHistogram.class, eqe.queryContainer().aggs().groups().get(0).getClass()); + assertEquals(259200000L, ((GroupByDateHistogram) eqe.queryContainer().aggs().groups().get(0)).interval()); + } + } public void testCountAndCountDistinctFolding() { PhysicalPlan p = optimizeAndPlan("SELECT COUNT(DISTINCT keyword) dkey, COUNT(keyword) key FROM test"); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index ac744c3365a54..c42159bfaa35d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -12,16 +12,27 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.time.ZonedDateTime; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.date; import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.sql.type.DataType.BYTE; +import static org.elasticsearch.xpack.sql.type.DataType.DATE; import static org.elasticsearch.xpack.sql.type.DataType.DATETIME; import static org.elasticsearch.xpack.sql.type.DataType.DOUBLE; import static org.elasticsearch.xpack.sql.type.DataType.FLOAT; import static org.elasticsearch.xpack.sql.type.DataType.INTEGER; +import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_HOUR_TO_MINUTE; +import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_HOUR_TO_SECOND; +import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_MONTH; +import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_SECOND; +import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_YEAR; +import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_YEAR_TO_MONTH; import static org.elasticsearch.xpack.sql.type.DataType.IP; import static org.elasticsearch.xpack.sql.type.DataType.KEYWORD; import static org.elasticsearch.xpack.sql.type.DataType.LONG; @@ -33,17 +44,30 @@ import static org.elasticsearch.xpack.sql.type.DataType.values; import static org.elasticsearch.xpack.sql.type.DataTypeConversion.commonType; import static org.elasticsearch.xpack.sql.type.DataTypeConversion.conversionFor; +import static org.elasticsearch.xpack.sql.util.DateUtils.asDateTime; public class DataTypeConversionTests extends ESTestCase { - public void testConversionToString() { - Conversion conversion = conversionFor(DOUBLE, KEYWORD); - assertNull(conversion.convert(null)); - assertEquals("10.0", conversion.convert(10.0)); - conversion = conversionFor(DATETIME, KEYWORD); - assertNull(conversion.convert(null)); - assertEquals("1970-01-01T00:00:00.000Z", conversion.convert(dateTime(0))); + public void testConversionToString() { + DataType to = KEYWORD; + { + Conversion conversion = conversionFor(DOUBLE, to); + assertNull(conversion.convert(null)); + assertEquals("10.0", conversion.convert(10.0)); + } + { + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals("1973-11-29", conversion.convert(DateUtils.asDateOnly(123456789101L))); + assertEquals("1966-02-02", conversion.convert(DateUtils.asDateOnly(-123456789101L))); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals("1973-11-29T21:33:09.101Z", conversion.convert(asDateTime(123456789101L))); + assertEquals("1966-02-02T02:26:50.899Z", conversion.convert(asDateTime(-123456789101L))); + } } /** @@ -58,7 +82,7 @@ public void testConversionToLong() { assertEquals(10L, conversion.convert(10.1)); assertEquals(11L, conversion.convert(10.6)); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Double.MAX_VALUE)); - assertEquals("[" + Double.MAX_VALUE + "] out of [Long] range", e.getMessage()); + assertEquals("[" + Double.MAX_VALUE + "] out of [long] range", e.getMessage()); } { Conversion conversion = conversionFor(INTEGER, to); @@ -72,12 +96,74 @@ public void testConversionToLong() { assertEquals(1L, conversion.convert(true)); assertEquals(0L, conversion.convert(false)); } - Conversion conversion = conversionFor(KEYWORD, to); - assertNull(conversion.convert(null)); - assertEquals(1L, conversion.convert("1")); - assertEquals(0L, conversion.convert("-0")); - Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); - assertEquals("cannot cast [0xff] to [Long]", e.getMessage()); + { + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals(123379200L, conversion.convert(DateUtils.asDateOnly(123456789101L))); + assertEquals(-123465600L, conversion.convert(DateUtils.asDateOnly(-123456789101L))); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals(123456789L, conversion.convert(asDateTime(123456789101L))); + assertEquals(-123456790L, conversion.convert(asDateTime(-123456789101L))); + } + { + Conversion conversion = conversionFor(KEYWORD, to); + assertNull(conversion.convert(null)); + assertEquals(1L, conversion.convert("1")); + assertEquals(0L, conversion.convert("-0")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [long]", e.getMessage()); + } + } + + public void testConversionToDate() { + DataType to = DATE; + { + Conversion conversion = conversionFor(DOUBLE, to); + assertNull(conversion.convert(null)); + assertEquals(date(10L), conversion.convert(10.0)); + assertEquals(date(10L), conversion.convert(10.1)); + assertEquals(date(11L), conversion.convert(10.6)); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Double.MAX_VALUE)); + assertEquals("[" + Double.MAX_VALUE + "] out of [long] range", e.getMessage()); + } + { + Conversion conversion = conversionFor(INTEGER, to); + assertNull(conversion.convert(null)); + assertEquals(date(10L), conversion.convert(10)); + assertEquals(date(-134L), conversion.convert(-134)); + } + { + Conversion conversion = conversionFor(BOOLEAN, to); + assertNull(conversion.convert(null)); + assertEquals(date(1), conversion.convert(true)); + assertEquals(date(0), conversion.convert(false)); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals(date(123456780000L), conversion.convert(asDateTime(123456789101L))); + assertEquals(date(-123456789101L), conversion.convert(asDateTime(-123456789101L))); + } + { + Conversion conversion = conversionFor(KEYWORD, to); + assertNull(conversion.convert(null)); + + assertEquals(date(0L), conversion.convert("1970-01-01T00:10:01Z")); + assertEquals(date(1483228800000L), conversion.convert("2017-01-01T00:11:00Z")); + assertEquals(date(-1672531200000L), conversion.convert("1917-01-01T00:11:00Z")); + assertEquals(date(18000000L), conversion.convert("1970-01-01T03:10:20-05:00")); + + // double check back and forth conversion + ZonedDateTime zdt = TestUtils.now(); + Conversion forward = conversionFor(DATE, KEYWORD); + Conversion back = conversionFor(KEYWORD, DATE); + assertEquals(DateUtils.asDateOnly(zdt), back.convert(forward.convert(zdt))); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [date]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); + } } public void testConversionToDateTime() { @@ -89,7 +175,7 @@ public void testConversionToDateTime() { assertEquals(dateTime(10L), conversion.convert(10.1)); assertEquals(dateTime(11L), conversion.convert(10.6)); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Double.MAX_VALUE)); - assertEquals("[" + Double.MAX_VALUE + "] out of [Long] range", e.getMessage()); + assertEquals("[" + Double.MAX_VALUE + "] out of [long] range", e.getMessage()); } { Conversion conversion = conversionFor(INTEGER, to); @@ -103,84 +189,121 @@ public void testConversionToDateTime() { assertEquals(dateTime(1), conversion.convert(true)); assertEquals(dateTime(0), conversion.convert(false)); } - Conversion conversion = conversionFor(KEYWORD, to); - assertNull(conversion.convert(null)); + { + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals(dateTime(123379200000L), conversion.convert(DateUtils.asDateOnly(123456789101L))); + assertEquals(dateTime(-123465600000L), conversion.convert(DateUtils.asDateOnly(-123456789101L))); + } + { + Conversion conversion = conversionFor(KEYWORD, to); + assertNull(conversion.convert(null)); + + assertEquals(dateTime(1000L), conversion.convert("1970-01-01T00:00:01Z")); + assertEquals(dateTime(1483228800000L), conversion.convert("2017-01-01T00:00:00Z")); + assertEquals(dateTime(1483228800000L), conversion.convert("2017-01-01T00:00:00Z")); + assertEquals(dateTime(18000000L), conversion.convert("1970-01-01T00:00:00-05:00")); - assertEquals(dateTime(1000L), conversion.convert("1970-01-01T00:00:01Z")); - assertEquals(dateTime(1483228800000L), conversion.convert("2017-01-01T00:00:00Z")); - assertEquals(dateTime(18000000L), conversion.convert("1970-01-01T00:00:00-05:00")); - - // double check back and forth conversion - ZonedDateTime dt = TestUtils.now(); - Conversion forward = conversionFor(DATETIME, KEYWORD); - Conversion back = conversionFor(KEYWORD, DATETIME); - assertEquals(dt, back.convert(forward.convert(dt))); - Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); - assertEquals("cannot cast [0xff] to [Date]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); + // double check back and forth conversion + ZonedDateTime dt = TestUtils.now(); + Conversion forward = conversionFor(DATETIME, KEYWORD); + Conversion back = conversionFor(KEYWORD, DATETIME); + assertEquals(dt, back.convert(forward.convert(dt))); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [datetime]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); + } } public void testConversionToDouble() { + DataType to = DOUBLE; { - Conversion conversion = conversionFor(FLOAT, DOUBLE); + Conversion conversion = conversionFor(FLOAT, to); assertNull(conversion.convert(null)); assertEquals(10.0, (double) conversion.convert(10.0f), 0.00001); assertEquals(10.1, (double) conversion.convert(10.1f), 0.00001); assertEquals(10.6, (double) conversion.convert(10.6f), 0.00001); } { - Conversion conversion = conversionFor(INTEGER, DOUBLE); + Conversion conversion = conversionFor(INTEGER, to); assertNull(conversion.convert(null)); assertEquals(10.0, (double) conversion.convert(10), 0.00001); assertEquals(-134.0, (double) conversion.convert(-134), 0.00001); } { - Conversion conversion = conversionFor(BOOLEAN, DOUBLE); + Conversion conversion = conversionFor(BOOLEAN, to); assertNull(conversion.convert(null)); assertEquals(1.0, (double) conversion.convert(true), 0); assertEquals(0.0, (double) conversion.convert(false), 0); } { - Conversion conversion = conversionFor(KEYWORD, DOUBLE); + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals(1.233792E8, (double) conversion.convert(DateUtils.asDateOnly(123456789101L)), 0); + assertEquals(-1.234656E8, (double) conversion.convert(DateUtils.asDateOnly(-123456789101L)), 0); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals(1.23456789E8, (double) conversion.convert(asDateTime(123456789101L)), 0); + assertEquals(-1.2345679E8, (double) conversion.convert(asDateTime(-123456789101L)), 0); + } + { + Conversion conversion = conversionFor(KEYWORD, to); assertNull(conversion.convert(null)); assertEquals(1.0, (double) conversion.convert("1"), 0); assertEquals(0.0, (double) conversion.convert("-0"), 0); assertEquals(12.776, (double) conversion.convert("12.776"), 0.00001); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); - assertEquals("cannot cast [0xff] to [Double]", e.getMessage()); + assertEquals("cannot cast [0xff] to [double]", e.getMessage()); } } public void testConversionToBoolean() { + DataType to = BOOLEAN; { - Conversion conversion = conversionFor(FLOAT, BOOLEAN); + Conversion conversion = conversionFor(FLOAT, to); assertNull(conversion.convert(null)); assertEquals(true, conversion.convert(10.0f)); assertEquals(true, conversion.convert(-10.0f)); assertEquals(false, conversion.convert(0.0f)); } { - Conversion conversion = conversionFor(INTEGER, BOOLEAN); + Conversion conversion = conversionFor(INTEGER, to); assertNull(conversion.convert(null)); assertEquals(true, conversion.convert(10)); assertEquals(true, conversion.convert(-10)); assertEquals(false, conversion.convert(0)); } { - Conversion conversion = conversionFor(LONG, BOOLEAN); + Conversion conversion = conversionFor(LONG, to); assertNull(conversion.convert(null)); assertEquals(true, conversion.convert(10L)); assertEquals(true, conversion.convert(-10L)); assertEquals(false, conversion.convert(0L)); } { - Conversion conversion = conversionFor(DOUBLE, BOOLEAN); + Conversion conversion = conversionFor(DOUBLE, to); assertNull(conversion.convert(null)); assertEquals(true, conversion.convert(10.0d)); assertEquals(true, conversion.convert(-10.0d)); assertEquals(false, conversion.convert(0.0d)); } { - Conversion conversion = conversionFor(KEYWORD, BOOLEAN); + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(DateUtils.asDateOnly(123456789101L))); + assertEquals(true, conversion.convert(DateUtils.asDateOnly(-123456789101L))); + assertEquals(false, conversion.convert(DateUtils.asDateOnly(0L))); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(asDateTime(123456789101L))); + assertEquals(true, conversion.convert(asDateTime(-123456789101L))); + assertEquals(false, conversion.convert(asDateTime(0L))); + } + { + Conversion conversion = conversionFor(KEYWORD, to); assertNull(conversion.convert(null)); // We only handled upper and lower case true and false assertEquals(true, conversion.convert("true")); @@ -189,29 +312,42 @@ public void testConversionToBoolean() { assertEquals(false, conversion.convert("fAlSe")); // Everything else should fail Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("10")); - assertEquals("cannot cast [10] to [Boolean]", e.getMessage()); + assertEquals("cannot cast [10] to [boolean]", e.getMessage()); e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("-1")); - assertEquals("cannot cast [-1] to [Boolean]", e.getMessage()); + assertEquals("cannot cast [-1] to [boolean]", e.getMessage()); e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0")); - assertEquals("cannot cast [0] to [Boolean]", e.getMessage()); + assertEquals("cannot cast [0] to [boolean]", e.getMessage()); e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("blah")); - assertEquals("cannot cast [blah] to [Boolean]", e.getMessage()); + assertEquals("cannot cast [blah] to [boolean]", e.getMessage()); e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("Yes")); - assertEquals("cannot cast [Yes] to [Boolean]", e.getMessage()); + assertEquals("cannot cast [Yes] to [boolean]", e.getMessage()); e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("nO")); - assertEquals("cannot cast [nO] to [Boolean]", e.getMessage()); + assertEquals("cannot cast [nO] to [boolean]", e.getMessage()); } } public void testConversionToInt() { + DataType to = INTEGER; { - Conversion conversion = conversionFor(DOUBLE, INTEGER); + Conversion conversion = conversionFor(DOUBLE, to); assertNull(conversion.convert(null)); assertEquals(10, conversion.convert(10.0)); assertEquals(10, conversion.convert(10.1)); assertEquals(11, conversion.convert(10.6)); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Long.MAX_VALUE)); - assertEquals("[" + Long.MAX_VALUE + "] out of [Int] range", e.getMessage()); + assertEquals("[" + Long.MAX_VALUE + "] out of [integer] range", e.getMessage()); + } + { + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals(123379200, conversion.convert(DateUtils.asDateOnly(123456789101L))); + assertEquals(-123465600, conversion.convert(DateUtils.asDateOnly(-123456789101L))); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals(123456789, conversion.convert(asDateTime(123456789101L))); + assertEquals(-123456790, conversion.convert(asDateTime(-123456789101L))); } } @@ -223,7 +359,7 @@ public void testConversionToShort() { assertEquals((short) 10, conversion.convert(10.1)); assertEquals((short) 11, conversion.convert(10.6)); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Integer.MAX_VALUE)); - assertEquals("[" + Integer.MAX_VALUE + "] out of [Short] range", e.getMessage()); + assertEquals("[" + Integer.MAX_VALUE + "] out of [short] range", e.getMessage()); } } @@ -235,7 +371,7 @@ public void testConversionToByte() { assertEquals((byte) 10, conversion.convert(10.1)); assertEquals((byte) 11, conversion.convert(10.6)); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Short.MAX_VALUE)); - assertEquals("[" + Short.MAX_VALUE + "] out of [Byte] range", e.getMessage()); + assertEquals("[" + Short.MAX_VALUE + "] out of [byte] range", e.getMessage()); } } @@ -264,16 +400,30 @@ public void testCommonType() { assertEquals(NULL, commonType(NULL, NULL)); assertEquals(INTEGER, commonType(INTEGER, KEYWORD)); assertEquals(LONG, commonType(TEXT, LONG)); - assertEquals(null, commonType(TEXT, KEYWORD)); + assertNull(commonType(TEXT, KEYWORD)); assertEquals(SHORT, commonType(SHORT, BYTE)); assertEquals(FLOAT, commonType(BYTE, FLOAT)); assertEquals(FLOAT, commonType(FLOAT, INTEGER)); assertEquals(DOUBLE, commonType(DOUBLE, FLOAT)); + + // dates/datetimes and intervals + assertEquals(DATETIME, commonType(DATE, DATETIME)); + assertEquals(DATETIME, commonType(DATETIME, DATE)); + assertEquals(DATETIME, commonType(DATETIME, randomInterval())); + assertEquals(DATETIME, commonType(randomInterval(), DATETIME)); + assertEquals(DATE, commonType(DATE, randomInterval())); + assertEquals(DATE, commonType(randomInterval(), DATE)); + + assertEquals(INTERVAL_YEAR_TO_MONTH, commonType(INTERVAL_YEAR_TO_MONTH, INTERVAL_MONTH)); + assertEquals(INTERVAL_HOUR_TO_SECOND, commonType(INTERVAL_HOUR_TO_MINUTE, INTERVAL_HOUR_TO_SECOND)); + assertNull(commonType(INTERVAL_SECOND, INTERVAL_YEAR)); } public void testEsDataTypes() { for (DataType type : values()) { - assertEquals(type, fromTypeName(type.esType)); + if (type != DATE) { // Doesn't have a corresponding type in ES + assertEquals(type, fromTypeName(type.esType)); + } } } @@ -298,4 +448,8 @@ public void testIpToString() { Conversion stringToIp = conversionFor(KEYWORD, IP); assertEquals("10.0.0.1", ipToString.convert(stringToIp.convert(Literal.of(s, "10.0.0.1")))); } + + private DataType randomInterval() { + return randomFrom(Stream.of(DataType.values()).filter(DataTypes::isInterval).collect(Collectors.toList())); + } } From 2bf269e628f5c008b851e7fed408b2814fb8ca4c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 24 Jan 2019 12:43:48 +0100 Subject: [PATCH 14/20] Fix docs for MappingUpdatedAction Follow-up to #31140 --- .../cluster/action/index/MappingUpdatedAction.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index c34a4196bb524..14c360168f904 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.action.index; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.inject.Inject; @@ -78,11 +79,12 @@ public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdat } /** - * Update mappings synchronously on the master node, waiting for at most - * {@code timeout}. When this method returns successfully mappings have - * been applied to the master node and propagated to data nodes. + * Update mappings on the master node, waiting for the change to be committed, + * but not for the mapping update to be applied on all nodes. The timeout specified by + * {@code timeout} is the master node timeout ({@link MasterNodeRequest#masterNodeTimeout()}), + * potentially waiting for a master node to be available. */ - public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) { - updateMappingRequest(index, type, mappingUpdate, timeout).get(); + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue masterNodeTimeout) { + updateMappingRequest(index, type, mappingUpdate, masterNodeTimeout).get(); } } From bc201427912a4d33b3aab91abb418aa0004dea2a Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 24 Jan 2019 13:47:21 +0200 Subject: [PATCH 15/20] Consolidate testclusters tests into a single project (#37362) --- .../testclusters/TestClustersPluginIT.java | 57 ++++++++++++++----- .../src/testKit/testclusters/build.gradle | 54 +++++++++++------- .../src/testKit/testclusters/settings.gradle | 4 ++ .../alpha/build.gradle | 21 ------- .../bravo/build.gradle | 24 -------- .../testclusters_multiproject/build.gradle | 32 ----------- .../charlie/build.gradle | 5 -- .../testclusters_multiproject/settings.gradle | 3 - 8 files changed, 80 insertions(+), 120 deletions(-) delete mode 100644 buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle delete mode 100644 buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle delete mode 100644 buildSrc/src/testKit/testclusters_multiproject/build.gradle delete mode 100644 buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle delete mode 100644 buildSrc/src/testKit/testclusters_multiproject/settings.gradle diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index ee366ac7b7c65..9b7c5686e8102 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -37,19 +37,19 @@ public void testListClusters() { } public void testUseClusterByOne() { - BuildResult result = getTestClustersRunner("user1").build(); + BuildResult result = getTestClustersRunner(":user1").build(); assertTaskSuccessful(result, ":user1"); assertStartedAndStoppedOnce(result); } public void testUseClusterByOneWithDryRun() { - BuildResult result = getTestClustersRunner("--dry-run", "user1").build(); + BuildResult result = getTestClustersRunner("--dry-run", ":user1").build(); assertNull(result.task(":user1")); assertNotStarted(result); } public void testUseClusterByTwo() { - BuildResult result = getTestClustersRunner("user1", "user2").build(); + BuildResult result = getTestClustersRunner(":user1", ":user2").build(); assertTaskSuccessful(result, ":user1", ":user2"); assertStartedAndStoppedOnce(result); } @@ -57,14 +57,14 @@ public void testUseClusterByTwo() { public void testUseClusterByUpToDateTask() { // Run it once, ignoring the result and again to make sure it's considered up to date. // Gradle randomly considers tasks without inputs and outputs as as up-to-date or success on the first run - getTestClustersRunner("upToDate1", "upToDate2").build(); - BuildResult result = getTestClustersRunner("upToDate1", "upToDate2").build(); + getTestClustersRunner(":upToDate1", ":upToDate2").build(); + BuildResult result = getTestClustersRunner(":upToDate1", ":upToDate2").build(); assertTaskUpToDate(result, ":upToDate1", ":upToDate2"); assertNotStarted(result); } public void testUseClusterBySkippedTask() { - BuildResult result = getTestClustersRunner("skipped1", "skipped2").build(); + BuildResult result = getTestClustersRunner(":skipped1", ":skipped2").build(); assertTaskSkipped(result, ":skipped1", ":skipped2"); assertNotStarted(result); } @@ -82,17 +82,44 @@ public void testUseClusterBySkippedAndWorkingTask() { } public void testMultiProject() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters_multiproject")) - .withArguments("user1", "user2", "-s", "-i", "--parallel", "-Dlocal.repo.path=" + getLocalTestRepoPath()) - .withPluginClasspath() - .build(); - assertTaskSuccessful(result, ":user1", ":user2"); + BuildResult result = getTestClustersRunner( + "user1", "user2", "-s", "-i", "--parallel", "-Dlocal.repo.path=" + getLocalTestRepoPath() + ).build(); + + assertTaskSuccessful( + result, + ":user1", ":user2", ":alpha:user1", ":alpha:user2", ":bravo:user1", ":bravo:user2" + ); + assertStartedAndStoppedOnce(result); + assertOutputOnlyOnce( + result.getOutput(), + "Starting `node{:alpha:myTestCluster}`", + "Stopping `node{::myTestCluster}`" + ); + assertOutputOnlyOnce( + result.getOutput(), + "Starting `node{::myTestCluster}`", + "Stopping `node{:bravo:myTestCluster}`" + ); + } + + public void testIncremental() { + BuildResult result = getTestClustersRunner("clean", ":user1").build(); + assertTaskSuccessful(result, ":user1"); + assertStartedAndStoppedOnce(result); + + result = getTestClustersRunner(":user1").build(); + assertTaskSuccessful(result, ":user1"); + assertStartedAndStoppedOnce(result); + + result = getTestClustersRunner("clean", ":user1").build(); + assertTaskSuccessful(result, ":user1"); + assertStartedAndStoppedOnce(result); assertStartedAndStoppedOnce(result); } public void testUseClusterByFailingOne() { - BuildResult result = getTestClustersRunner("itAlwaysFails").buildAndFail(); + BuildResult result = getTestClustersRunner(":itAlwaysFails").buildAndFail(); assertTaskFailed(result, ":itAlwaysFails"); assertStartedAndStoppedOnce(result); assertOutputContains( @@ -103,7 +130,7 @@ public void testUseClusterByFailingOne() { } public void testUseClusterByFailingDependency() { - BuildResult result = getTestClustersRunner("dependsOnFailed").buildAndFail(); + BuildResult result = getTestClustersRunner(":dependsOnFailed").buildAndFail(); assertTaskFailed(result, ":itAlwaysFails"); assertNull(result.task(":dependsOnFailed")); assertStartedAndStoppedOnce(result); @@ -115,7 +142,7 @@ public void testUseClusterByFailingDependency() { } public void testConfigurationLocked() { - BuildResult result = getTestClustersRunner("illegalConfigAlter").buildAndFail(); + BuildResult result = getTestClustersRunner(":illegalConfigAlter").buildAndFail(); assertTaskFailed(result, ":illegalConfigAlter"); assertOutputContains( result.getOutput(), diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index 67c9afdbc82c3..d02240c0ad2cd 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -1,31 +1,45 @@ plugins { id 'elasticsearch.testclusters' + id 'base' } -testClusters { - myTestCluster { - distribution = 'ZIP' - version = System.getProperty("test.version_under_test") +allprojects { all -> + repositories { + maven { + url System.getProperty("local.repo.path") + } + String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision") + if (luceneSnapshotRevision != null) { + maven { + url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + } + } + jcenter() } -} -repositories { - maven { - url System.getProperty("local.repo.path") - } -} + if (project == rootProject || project.name == "alpha" || project.name == "bravo") { + apply plugin: 'elasticsearch.testclusters' -task user1 { - useCluster testClusters.myTestCluster - doLast { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" - } -} + all.testClusters { + myTestCluster { + distribution = 'ZIP' + version = System.getProperty("test.version_under_test") + javaHome = file(System.getProperty('java.home')) + } + } -task user2 { - useCluster testClusters.myTestCluster - doLast { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" + task user1 { + useCluster testClusters.myTestCluster + doFirst { + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" + } + } + task user2 { + useCluster testClusters.myTestCluster + doFirst { + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" + } + } } } diff --git a/buildSrc/src/testKit/testclusters/settings.gradle b/buildSrc/src/testKit/testclusters/settings.gradle index e69de29bb2d1d..6549a93801b63 100644 --- a/buildSrc/src/testKit/testclusters/settings.gradle +++ b/buildSrc/src/testKit/testclusters/settings.gradle @@ -0,0 +1,4 @@ +include 'dummyPlugin' +include ':alpha' +include ':bravo' +include ':charlie' \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle deleted file mode 100644 index 783e6d9a80efb..0000000000000 --- a/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle +++ /dev/null @@ -1,21 +0,0 @@ -plugins { - id 'elasticsearch.testclusters' -} -testClusters { - myTestCluster { - distribution = 'ZIP' - version = System.getProperty("test.version_under_test") - } -} -task user1 { - useCluster testClusters.myTestCluster - doFirst { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" - } -} -task user2 { - useCluster testClusters.myTestCluster - doFirst { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" - } -} diff --git a/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle deleted file mode 100644 index d13cab6eaa934..0000000000000 --- a/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle +++ /dev/null @@ -1,24 +0,0 @@ -plugins { - id 'elasticsearch.testclusters' -} - -testClusters { - myTestCluster { - distribution = 'ZIP' - version = System.getProperty("test.version_under_test") - } -} - -task user1 { - useCluster testClusters.myTestCluster - doFirst { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" - } -} - -task user2 { - useCluster testClusters.myTestCluster - doFirst { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" - } -} diff --git a/buildSrc/src/testKit/testclusters_multiproject/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/build.gradle deleted file mode 100644 index 18f7b277d01e3..0000000000000 --- a/buildSrc/src/testKit/testclusters_multiproject/build.gradle +++ /dev/null @@ -1,32 +0,0 @@ -plugins { - id 'elasticsearch.testclusters' -} - -allprojects { - repositories { - maven { - url System.getProperty("local.repo.path") - } - } -} - -testClusters { - myTestCluster { - distribution = 'ZIP' - version = System.getProperty("test.version_under_test") - } -} - -task user1 { - useCluster testClusters.myTestCluster - doFirst { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" - } -} - -task user2 { - useCluster testClusters.myTestCluster - doFirst { - println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" - } -} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle deleted file mode 100644 index f63a77aaea42d..0000000000000 --- a/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle +++ /dev/null @@ -1,5 +0,0 @@ -task hello() { - doLast { - println "This task does not use the testclusters plugin. So it will have no extension." - } -} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters_multiproject/settings.gradle b/buildSrc/src/testKit/testclusters_multiproject/settings.gradle deleted file mode 100644 index aa91948920148..0000000000000 --- a/buildSrc/src/testKit/testclusters_multiproject/settings.gradle +++ /dev/null @@ -1,3 +0,0 @@ -include ':alpha' -include ':bravo' -include ':charlie' \ No newline at end of file From 20533c59909fd6008c8d3e1d3789ec884546a853 Mon Sep 17 00:00:00 2001 From: Yulong Date: Thu, 24 Jan 2019 20:12:32 +0800 Subject: [PATCH 16/20] Add built-in user and role for code plugin (#37030) * Add built-in roles for code plugin * Fix rest-client get-roles test count * Fix broken test --- .../SecurityDocumentationIT.java | 4 +- .../authz/store/ReservedRolesStore.java | 17 +++++- .../authz/store/ReservedRolesStoreTests.java | 55 +++++++++++++++++++ 3 files changed, 72 insertions(+), 4 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 183a8942a7b2b..b7261b2dd9581 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -638,8 +638,8 @@ public void testGetRoles() throws Exception { List roles = response.getRoles(); assertNotNull(response); - // 21 system roles plus the three we created - assertThat(roles.size(), equalTo(24)); + // 23 system roles plus the three we created + assertThat(roles.size(), equalTo(26)); } { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index dfd276f4ee9f0..8cb151da4e2b1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -107,11 +107,14 @@ private static Map initializeReservedRoles() { "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token" }, new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*", ".reporting-*").privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(".kibana*", ".reporting-*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder() .indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder() - .indices(".management-beats").privileges("create_index", "read", "write").build() + .indices(".management-beats").privileges("create_index", "read", "write").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(".code-*").privileges("all").build(), }, null, new ConditionalClusterPrivilege[] { new ManageApplicationPrivileges(Collections.singleton("kibana-*")) }, @@ -166,6 +169,16 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .put("rollup_admin", new RoleDescriptor("rollup_admin", new String[] { "manage_rollup" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("code_admin", new RoleDescriptor("code_admin", new String[] {}, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices(".code-*").privileges("all").build() + }, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("code_user", new RoleDescriptor("code_user", new String[] {}, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices(".code-*").privileges("read").build() + }, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .immutableMap(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 5a567ad13ff80..7f4dbcee4ed5d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -158,6 +158,9 @@ public void testIsReserved() { assertThat(ReservedRolesStore.isReserved(APMSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(RemoteMonitoringUser.COLLECTION_ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(RemoteMonitoringUser.INDEXING_ROLE_NAME), is(true)); + assertThat(ReservedRolesStore.isReserved("code_admin"), is(true)); + assertThat(ReservedRolesStore.isReserved("code_user"), is(true)); + } public void testIngestAdminRole() { @@ -984,4 +987,56 @@ public void testLogstashAdminRole() { assertThat(logstashAdminRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); assertThat(logstashAdminRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(true)); } + + public void testCodeAdminRole() { + RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("code_admin"); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + + Role codeAdminRole = Role.builder(roleDescriptor, null).build(); + + + assertThat(codeAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".code-"), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), + is(false)); + + final String index = ".code-" + randomIntBetween(0, 5); + + assertThat(codeAdminRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); + assertThat(codeAdminRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(true)); + } + + public void testCodeUserRole() { + RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("code_user"); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + + Role codeUserRole = Role.builder(roleDescriptor, null).build(); + + + assertThat(codeUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test("foo"), is(false)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(".reporting"), is(false)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(".code-"), is(true)); + assertThat(codeUserRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), + is(false)); + + final String index = ".code-" + randomIntBetween(0, 5); + + assertThat(codeUserRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(false)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(false)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); + assertThat(codeUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); + } } From c7b16162ae456ca6d06cafc2435cfff1d00c1998 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 24 Jan 2019 13:52:22 +0100 Subject: [PATCH 17/20] Remove unused ThreadBarrier class (#37666) This class is pretty complex and only used in a test where we can simply fail the test with an assertion error. --- .../common/util/concurrent/ThreadBarrier.java | 304 ------------------ .../util/concurrent/EsExecutorsTests.java | 39 +-- 2 files changed, 17 insertions(+), 326 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java deleted file mode 100644 index 0b2b1a5a54c9e..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.util.concurrent; - -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * A synchronization aid that allows a set of threads to all wait for each other - * to reach a common barrier point. Barriers are useful in programs involving a - * fixed sized party of threads that must occasionally wait for each other. - * ThreadBarrier adds a cause to - * {@link BrokenBarrierException} thrown by a {@link #reset()} operation defined - * by {@link CyclicBarrier}. - *

    - * Sample usage:
    - *

      - *
    • Barrier as a synchronization and Exception handling aid
    • - *
    • Barrier as a trigger for elapsed notification events
    • - *
    - *
    - *    class MyTestClass implements RemoteEventListener
    - *    {
    - *      final ThreadBarrier barrier;
    - *
    - *      class Worker implements Runnable
    - *        {
    - *          public void run()
    - *            {
    - *              barrier.await();    //wait for all threads to reach run
    - *              try
    - *                {
    - *                  prepare();
    - *                  barrier.await();    //wait for all threads to prepare
    - *                  process();
    - *                  barrier.await();    //wait for all threads to process
    - *                }
    - *              catch(Exception e){
    - *                  log("Worker thread caught exception", e);
    - *                  barrier.reset(e);
    - *                }
    - *            }
    - *        }
    - *
    - *      public void testThreads() {
    - *          barrier = new ThreadBarrier(N_THREADS + 1);
    - *          for (int i = 0; i < N; ++i)
    - *           new Thread(new Worker()).start();
    - *
    - *          try{
    - *              barrier.await();    //wait for all threads to reach run
    - *              barrier.await();    //wait for all threads to prepare
    - *              barrier.await();    //wait for all threads to process
    - *            }
    - *          catch(BrokenBarrierException bbe) {
    - *              Assert.fail(bbe);
    - *            }
    - *       }
    - *
    - *      int actualNotificationCount = 0;
    - *      public synchronized void notify (RemoteEvent event) {
    - *          try{
    - *              actualNotificationCount++;
    - *              if (actualNotificationCount == EXPECTED_COUNT)
    - *                  barrier.await();    //signal when all notifications arrive
    - *
    - *               // too many notifications?
    - *               Assert.assertFalse("Exceeded notification count",
    - *                                          actualNotificationCount > EXPECTED_COUNT);
    - *            }
    - *          catch(Exception e) {
    - *              log("Worker thread caught exception", e);
    - *              barrier.reset(e);
    - *            }
    - *        }
    - *
    - *      public void testNotify() {
    - *          barrier = new ThreadBarrier(N_LISTENERS + 1);
    - *          registerNotification();
    - *          triggerNotifications();
    - *
    - *          //wait until either all notifications arrive, or
    - *          //until a MAX_TIMEOUT is reached.
    - *          barrier.await(MAX_TIMEOUT);
    - *
    - *          //check if all notifications were accounted for or timed-out
    - *          Assert.assertEquals("Notification count",
    - *                                      EXPECTED_COUNT, actualNotificationCount);
    - *
    - *          //inspect that the barrier isn't broken
    - *          barrier.inspect(); //throws BrokenBarrierException if broken
    - *        }
    - *    }
    - * 
    - * - * - */ -public class ThreadBarrier extends CyclicBarrier { - /** - * The cause of a {@link BrokenBarrierException} and {@link TimeoutException} - * thrown from an await() when {@link #reset(Exception)} was invoked. - */ - private Exception cause; - - public ThreadBarrier(int parties) { - super(parties); - } - - public ThreadBarrier(int parties, Runnable barrierAction) { - super(parties, barrierAction); - } - - @Override - public int await() throws InterruptedException, BrokenBarrierException { - try { - breakIfBroken(); - return super.await(); - } catch (BrokenBarrierException bbe) { - initCause(bbe); - throw bbe; - } - } - - @Override - public int await(long timeout, TimeUnit unit) throws InterruptedException, BrokenBarrierException, TimeoutException { - try { - breakIfBroken(); - return super.await(timeout, unit); - } catch (BrokenBarrierException bbe) { - initCause(bbe); - throw bbe; - } catch (TimeoutException te) { - initCause(te); - throw te; - } - } - - /** - * Resets the barrier to its initial state. If any parties are - * currently waiting at the barrier, they will return with a - * {@link BrokenBarrierException}. Note that resets after - * a breakage has occurred for other reasons can be complicated to - * carry out; threads need to re-synchronize in some other way, - * and choose one to perform the reset. It may be preferable to - * instead create a new barrier for subsequent use. - * - * @param cause The cause of the BrokenBarrierException - */ - public synchronized void reset(Exception cause) { - if (!isBroken()) { - super.reset(); - } - - if (this.cause == null) { - this.cause = cause; - } - } - - /** - * Queries if this barrier is in a broken state. Note that if - * {@link #reset(Exception)} is invoked the barrier will remain broken, while - * {@link #reset()} will reset the barrier to its initial state and - * {@link #isBroken()} will return false. - * - * @return {@code true} if one or more parties broke out of this barrier due - * to interruption or timeout since construction or the last reset, - * or a barrier action failed due to an exception; {@code false} - * otherwise. - * @see #inspect() - */ - @Override - public synchronized boolean isBroken() { - return this.cause != null || super.isBroken(); - } - - /** - * Inspects if the barrier is broken. If for any reason, the barrier - * was broken, a {@link BrokenBarrierException} will be thrown. Otherwise, - * would return gracefully. - * - * @throws BrokenBarrierException With a nested broken cause. - */ - public synchronized void inspect() throws BrokenBarrierException { - try { - breakIfBroken(); - } catch (BrokenBarrierException bbe) { - initCause(bbe); - throw bbe; - } - } - - /** - * breaks this barrier if it has been reset or broken for any other reason. - *

    - * Note: This call is not atomic in respect to await/reset calls. A - * breakIfBroken() may be context switched to invoke a reset() prior to - * await(). This resets the barrier to its initial state - parties not - * currently waiting at the barrier will not be accounted for! An await that - * wasn't time limited, will block indefinitely. - * - * @throws BrokenBarrierException an empty BrokenBarrierException. - */ - private synchronized void breakIfBroken() - throws BrokenBarrierException { - if (isBroken()) { - throw new BrokenBarrierException(); - } - } - - /** - * Initializes the cause of this throwable to the specified value. The cause - * is the throwable that was initialized by {@link #reset(Exception)}. - * - * @param t throwable. - */ - private synchronized void initCause(Throwable t) { - t.initCause(this.cause); - } - - /** - * A Barrier action to be used in conjunction with {@link ThreadBarrier} to - * measure performance between barrier awaits. This runnable will execute - * when the barrier is tripped. Make sure to reset() the timer before next - * Measurement. - * - * @see ThreadBarrier#ThreadBarrier(int, Runnable) - *

    - * Usage example:
    - *

    
    -     *   BarrierTimer timer = new BarrierTimer();
    -     *   ThreadBarrier barrier = new ThreadBarrier( nTHREADS + 1, timer );
    -     *   ..
    -     *   barrier.await(); // starts timer when all threads trip on await
    -     *   barrier.await(); // stops  timer when all threads trip on await
    -     *   ..
    -     *   long time = timer.getTimeInNanos();
    -     *   long tpi = time / ((long)nREPEATS * nTHREADS); //throughput per thread iteration
    -     *   long secs = timer.getTimeInSeconds();    //total runtime in seconds
    -     *   ..
    -     *   timer.reset();  // reuse timer
    -     * 
    - */ - public static class BarrierTimer implements Runnable { - volatile boolean started; - volatile long startTime; - volatile long endTime; - - @Override - public void run() { - long t = System.nanoTime(); - if (!started) { - started = true; - startTime = t; - } else - endTime = t; - } - - /** - * resets (clears) this timer before next execution. - */ - public void reset() { - started = false; - } - - /** - * Returns the elapsed time between two successive barrier executions. - * - * @return elapsed time in nanoseconds. - */ - public long getTimeInNanos() { - return endTime - startTime; - } - - /** - * Returns the elapsed time between two successive barrier executions. - * - * @return elapsed time in seconds. - */ - public double getTimeInSeconds() { - long time = endTime - startTime; - return (time) / 1000000000.0; - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index ff916c91613dc..3109473d56616 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -25,6 +25,7 @@ import org.hamcrest.Matcher; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -169,7 +170,7 @@ public void run() { public void testScaleUp() throws Exception { final int min = between(1, 3); final int max = between(min + 1, 6); - final ThreadBarrier barrier = new ThreadBarrier(max + 1); + final CyclicBarrier barrier = new CyclicBarrier(max + 1); ThreadPoolExecutor pool = EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), randomTimeUnit(), @@ -179,16 +180,13 @@ public void testScaleUp() throws Exception { for (int i = 0; i < max; ++i) { final CountDownLatch latch = new CountDownLatch(1); - pool.execute(new Runnable() { - @Override - public void run() { - latch.countDown(); - try { - barrier.await(); - barrier.await(); - } catch (Exception e) { - barrier.reset(e); - } + pool.execute(() -> { + latch.countDown(); + try { + barrier.await(); + barrier.await(); + } catch (Exception e) { + throw new AssertionError(e); } }); @@ -207,7 +205,7 @@ public void run() { public void testScaleDown() throws Exception { final int min = between(1, 3); final int max = between(min + 1, 6); - final ThreadBarrier barrier = new ThreadBarrier(max + 1); + final CyclicBarrier barrier = new CyclicBarrier(max + 1); final ThreadPoolExecutor pool = EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), TimeUnit.MILLISECONDS, @@ -217,16 +215,13 @@ public void testScaleDown() throws Exception { for (int i = 0; i < max; ++i) { final CountDownLatch latch = new CountDownLatch(1); - pool.execute(new Runnable() { - @Override - public void run() { - latch.countDown(); - try { - barrier.await(); - barrier.await(); - } catch (Exception e) { - barrier.reset(e); - } + pool.execute(() -> { + latch.countDown(); + try { + barrier.await(); + barrier.await(); + } catch (Exception e) { + throw new AssertionError(e); } }); From 93579293095374457c84494888b95b761cb7bd62 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Thu, 24 Jan 2019 15:03:49 +0200 Subject: [PATCH 18/20] SQL: Improve handling of invalid args for PERCENTILE/PERCENTILE_RANK (#37803) Improve the Exception and the error message returned when 2nd argument of PERCENTILE and PERCENTILE_RANK is not a constant. --- .../expression/function/aggregate/Percentile.java | 6 +++--- .../function/aggregate/PercentileRank.java | 6 +++--- .../analyzer/VerifierErrorMessagesTests.java | 15 ++++----------- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java index 593466f4c4773..295932cd99c5e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.expression.function.aggregate; -import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; @@ -17,6 +16,7 @@ import java.util.List; import static java.util.Collections.singletonList; +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; public class Percentile extends NumericAggregate implements EnclosedAgg { @@ -43,8 +43,8 @@ public Percentile replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { if (!percent.foldable()) { - throw new SqlIllegalArgumentException("2nd argument of PERCENTILE must be constant, received [{}]", - Expressions.name(percent)); + return new TypeResolution(format(null, "2nd argument of PERCENTILE must be a constant, received [{}]", + Expressions.name(percent))); } TypeResolution resolution = super.resolveType(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java index 72614f8265f48..92bc794b248da 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.expression.function.aggregate; -import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; @@ -17,6 +16,7 @@ import java.util.List; import static java.util.Collections.singletonList; +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; public class PercentileRank extends AggregateFunction implements EnclosedAgg { @@ -43,8 +43,8 @@ public Expression replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { if (!value.foldable()) { - throw new SqlIllegalArgumentException("2nd argument of PERCENTILE_RANK must be constant, received [{}]", - Expressions.name(value)); + return new TypeResolution(format(null, "2nd argument of PERCENTILE_RANK must be a constant, received [{}]", + Expressions.name(value))); } TypeResolution resolution = super.resolveType(); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index 946e8f93a7091..3316b179f50cb 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.analysis.analyzer; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.TestUtils; import org.elasticsearch.xpack.sql.analysis.AnalysisException; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; @@ -539,19 +538,13 @@ public void testAggsInHistogram() { } public void testErrorMessageForPercentileWithSecondArgBasedOnAField() { - Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), indexResolution, new Verifier(new Metrics())); - SqlIllegalArgumentException e = expectThrows(SqlIllegalArgumentException.class, () -> analyzer.analyze(parser.createStatement( - "SELECT PERCENTILE(int, ABS(int)) FROM test"), true)); - assertEquals("2nd argument of PERCENTILE must be constant, received [ABS(int)]", - e.getMessage()); + assertEquals("1:8: 2nd argument of PERCENTILE must be a constant, received [ABS(int)]", + error("SELECT PERCENTILE(int, ABS(int)) FROM test")); } public void testErrorMessageForPercentileRankWithSecondArgBasedOnAField() { - Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), indexResolution, new Verifier(new Metrics())); - SqlIllegalArgumentException e = expectThrows(SqlIllegalArgumentException.class, () -> analyzer.analyze(parser.createStatement( - "SELECT PERCENTILE_RANK(int, ABS(int)) FROM test"), true)); - assertEquals("2nd argument of PERCENTILE_RANK must be constant, received [ABS(int)]", - e.getMessage()); + assertEquals("1:8: 2nd argument of PERCENTILE_RANK must be a constant, received [ABS(int)]", + error("SELECT PERCENTILE_RANK(int, ABS(int)) FROM test")); } } From feab59df039d99a177bee4e4583be0cd8346c2c1 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 24 Jan 2019 14:09:03 +0100 Subject: [PATCH 19/20] Bubble exceptions up in ClusterApplierService (#37729) Exceptions thrown by the cluster applier service's settings and cluster appliers are bubbled up, and block the state from being applied instead of silently being ignored. In combination with the cluster state publishing lag detector, this will throw a node out of the cluster that can't properly apply cluster state updates. --- .../service/ClusterApplierService.java | 69 +++---- .../elasticsearch/ingest/IngestService.java | 10 +- .../coordination/CoordinatorTests.java | 174 ++++++++---------- .../service/ClusterApplierServiceTests.java | 96 ++++++++++ .../ingest/IngestServiceTests.java | 24 ++- .../MockSinglePrioritizingExecutor.java | 6 + 6 files changed, 234 insertions(+), 145 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index fa3d4997efb4b..496ee9040a899 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -390,31 +390,24 @@ protected void runTask(UpdateTask task) { newClusterState = task.apply(previousClusterState); } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); - if (logger.isTraceEnabled()) { - logger.trace(() -> new ParameterizedMessage( - "failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}{}{}", - executionTime, - previousClusterState.version(), - task.source, - previousClusterState.nodes(), - previousClusterState.routingTable(), - previousClusterState.getRoutingNodes()), - e); - } + logger.trace(() -> new ParameterizedMessage( + "failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}", + executionTime, previousClusterState.version(), task.source, previousClusterState), e); warnAboutSlowTaskIfNeeded(executionTime, task.source); task.listener.onFailure(task.source, e); return; } if (previousClusterState == newClusterState) { - task.listener.onSuccess(task.source); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); logger.debug("processing [{}]: took [{}] no change in cluster state", task.source, executionTime); warnAboutSlowTaskIfNeeded(executionTime, task.source); + task.listener.onSuccess(task.source); } else { if (logger.isTraceEnabled()) { - logger.trace("cluster state updated, source [{}]\n{}", task.source, newClusterState); - } else if (logger.isDebugEnabled()) { + logger.debug("cluster state updated, version [{}], source [{}]\n{}", newClusterState.version(), task.source, + newClusterState); + } else { logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), task.source); } try { @@ -424,20 +417,19 @@ protected void runTask(UpdateTask task) { executionTime, newClusterState.version(), newClusterState.stateUUID()); warnAboutSlowTaskIfNeeded(executionTime, task.source); + task.listener.onSuccess(task.source); } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); - final long version = newClusterState.version(); - final String stateUUID = newClusterState.stateUUID(); - final String fullState = newClusterState.toString(); - logger.warn(() -> new ParameterizedMessage( - "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", - executionTime, - version, - stateUUID, - task.source, - fullState), - e); - // TODO: do we want to call updateTask.onFailure here? + if (logger.isTraceEnabled()) { + logger.warn(new ParameterizedMessage( + "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", + executionTime, newClusterState.version(), newClusterState.stateUUID(), task.source, newClusterState), e); + } else { + logger.warn(new ParameterizedMessage( + "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]", + executionTime, newClusterState.version(), newClusterState.stateUUID(), task.source), e); + } + task.listener.onFailure(task.source, e); } } } @@ -454,17 +446,14 @@ private void applyChanges(UpdateTask task, ClusterState previousClusterState, Cl } } + logger.trace("connecting to nodes of cluster state with version {}", newClusterState.version()); nodeConnectionsService.connectToNodes(newClusterState.nodes()); - logger.debug("applying cluster state version {}", newClusterState.version()); - try { - // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency - if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) { - final Settings incomingSettings = clusterChangedEvent.state().metaData().settings(); - clusterSettings.applySettings(incomingSettings); - } - } catch (Exception ex) { - logger.warn("failed to apply cluster settings", ex); + // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency + if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) { + logger.debug("applying settings from cluster state with version {}", newClusterState.version()); + final Settings incomingSettings = clusterChangedEvent.state().metaData().settings(); + clusterSettings.applySettings(incomingSettings); } logger.debug("apply cluster state with version {}", newClusterState.version()); @@ -476,18 +465,12 @@ private void applyChanges(UpdateTask task, ClusterState previousClusterState, Cl state.set(newClusterState); callClusterStateListeners(clusterChangedEvent); - - task.listener.onSuccess(task.source); } private void callClusterStateAppliers(ClusterChangedEvent clusterChangedEvent) { clusterStateAppliers.forEach(applier -> { - try { - logger.trace("calling [{}] with change to version [{}]", applier, clusterChangedEvent.state().version()); - applier.applyClusterState(clusterChangedEvent); - } catch (Exception ex) { - logger.warn("failed to notify ClusterStateApplier", ex); - } + logger.trace("calling [{}] with change to version [{}]", applier, clusterChangedEvent.state().version()); + applier.applyClusterState(clusterChangedEvent); }); } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 6951e33d5e741..00b04bff2e5fd 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -19,6 +19,8 @@ package org.elasticsearch.ingest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; @@ -69,6 +71,8 @@ public class IngestService implements ClusterStateApplier { public static final String NOOP_PIPELINE_NAME = "_none"; + private static final Logger logger = LogManager.getLogger(IngestService.class); + private final ClusterService clusterService; private final ScriptService scriptService; private final Map processorFactories; @@ -256,7 +260,11 @@ Map pipelines() { public void applyClusterState(final ClusterChangedEvent event) { ClusterState state = event.state(); Map originalPipelines = pipelines; - innerUpdatePipelines(event.previousState(), state); + try { + innerUpdatePipelines(event.previousState(), state); + } catch (ElasticsearchParseException e) { + logger.warn("failed to update ingest pipelines", e); + } //pipelines changed, so add the old metrics to the new metrics if (originalPipelines != pipelines) { pipelines.forEach((id, pipeline) -> { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 7db63ab120e91..be40f0c888362 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -26,10 +26,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.coordination.ClusterStatePublisher.AckListener; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; @@ -39,7 +39,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.cluster.service.ClusterApplier; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterApplierService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -52,6 +54,7 @@ import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.zen.PublishClusterStateStats; import org.elasticsearch.discovery.zen.UnicastHostsProvider.HostsResolver; @@ -931,7 +934,7 @@ private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock cluster.runFor(defaultMillis(FOLLOWER_CHECK_TIMEOUT_SETTING) + defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "detecting disconnection"); - assertThat(leader.clusterApplier.lastAppliedClusterState.blocks().global(), hasItem(expectedBlock)); + assertThat(leader.getLastAppliedClusterState().blocks().global(), hasItem(expectedBlock)); // TODO reboot the leader and verify that the same block is applied when it restarts } @@ -1525,12 +1528,12 @@ class ClusterNode { private Coordinator coordinator; private final DiscoveryNode localNode; private final MockPersistedState persistedState; - private FakeClusterApplier clusterApplier; private AckedFakeThreadPoolMasterService masterService; + private DisruptableClusterApplierService clusterApplierService; + private ClusterService clusterService; private TransportService transportService; private DisruptableMockTransport mockTransport; private List> extraJoinValidators = new ArrayList<>(); - private ClusterStateApplyResponse clusterStateApplyResponse = ClusterStateApplyResponse.SUCCEED; ClusterNode(int nodeIndex, boolean masterEligible) { this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier); @@ -1565,37 +1568,46 @@ protected Optional getDisruptableMockTransport(Transpo final Settings settings = Settings.builder() .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap - - final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - clusterApplier = new FakeClusterApplier(settings, clusterSettings); - masterService = new AckedFakeThreadPoolMasterService("test_node", "test", - runnable -> deterministicTaskQueue.scheduleNow(onNode(runnable))); transportService = mockTransport.createTransportService( settings, deterministicTaskQueue.getThreadPool(this::onNode), NOOP_TRANSPORT_INTERCEPTOR, a -> localNode, null, emptySet()); + masterService = new AckedFakeThreadPoolMasterService(localNode.getId(), "test", + runnable -> deterministicTaskQueue.scheduleNow(onNode(runnable))); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterApplierService = new DisruptableClusterApplierService(localNode.getId(), settings, clusterSettings, + deterministicTaskQueue, this::onNode); + clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); + clusterService.setNodeConnectionsService( + new NodeConnectionsService(clusterService.getSettings(), deterministicTaskQueue.getThreadPool(this::onNode), + transportService) { + @Override + public void connectToNodes(DiscoveryNodes discoveryNodes) { + // override this method as it does blocking calls + } + }); final Collection> onJoinValidators = Collections.singletonList((dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs))); coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(), ESAllocationTestCase.createAllocationService(Settings.EMPTY), masterService, this::getPersistedState, - Cluster.this::provideUnicastHosts, clusterApplier, onJoinValidators, Randomness.get()); + Cluster.this::provideUnicastHosts, clusterApplierService, onJoinValidators, Randomness.get()); masterService.setClusterStatePublisher(coordinator); logger.trace("starting up [{}]", localNode); transportService.start(); transportService.acceptIncomingRequests(); - masterService.start(); coordinator.start(); + clusterService.start(); coordinator.startInitialJoin(); } void close() { logger.trace("taking down [{}]", localNode); - //transportService.stop(); // does blocking stuff :/ - masterService.stop(); coordinator.stop(); - //transportService.close(); // does blocking stuff :/ - masterService.close(); + clusterService.stop(); + //transportService.stop(); // does blocking stuff :/ + clusterService.close(); coordinator.close(); + //transportService.close(); // does blocking stuff :/ } ClusterNode restartedNode() { @@ -1634,11 +1646,11 @@ ClusterState improveConfiguration(ClusterState currentState) { } void setClusterStateApplyResponse(ClusterStateApplyResponse clusterStateApplyResponse) { - this.clusterStateApplyResponse = clusterStateApplyResponse; + clusterApplierService.clusterStateApplyResponse = clusterStateApplyResponse; } ClusterStateApplyResponse getClusterStateApplyResponse() { - return clusterStateApplyResponse; + return clusterApplierService.clusterStateApplyResponse; } Runnable onNode(Runnable runnable) { @@ -1739,7 +1751,7 @@ void onDisconnectEventFrom(ClusterNode clusterNode) { } ClusterState getLastAppliedClusterState() { - return clusterApplier.lastAppliedClusterState; + return clusterApplierService.state(); } void applyInitialConfiguration() { @@ -1769,84 +1781,6 @@ void applyInitialConfiguration() { private boolean isNotUsefullyBootstrapped() { return getLocalNode().isMasterNode() == false || coordinator.isInitialConfigurationSet() == false; } - - private class FakeClusterApplier implements ClusterApplier { - - final ClusterName clusterName; - private final ClusterSettings clusterSettings; - ClusterState lastAppliedClusterState; - - private FakeClusterApplier(Settings settings, ClusterSettings clusterSettings) { - clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - this.clusterSettings = clusterSettings; - } - - @Override - public void setInitialState(ClusterState initialState) { - assert lastAppliedClusterState == null; - assert initialState != null; - lastAppliedClusterState = initialState; - } - - @Override - public void onNewClusterState(String source, Supplier clusterStateSupplier, ClusterApplyListener listener) { - switch (clusterStateApplyResponse) { - case SUCCEED: - deterministicTaskQueue.scheduleNow(onNode(new Runnable() { - @Override - public void run() { - final ClusterState oldClusterState = clusterApplier.lastAppliedClusterState; - final ClusterState newClusterState = clusterStateSupplier.get(); - assert oldClusterState.version() <= newClusterState.version() : "updating cluster state from version " - + oldClusterState.version() + " to stale version " + newClusterState.version(); - clusterApplier.lastAppliedClusterState = newClusterState; - final Settings incomingSettings = newClusterState.metaData().settings(); - clusterSettings.applySettings(incomingSettings); // TODO validation might throw exceptions here. - listener.onSuccess(source); - } - - @Override - public String toString() { - return "apply cluster state from [" + source + "]"; - } - })); - break; - case FAIL: - deterministicTaskQueue.scheduleNow(onNode(new Runnable() { - @Override - public void run() { - listener.onFailure(source, new ElasticsearchException("cluster state application failed")); - } - - @Override - public String toString() { - return "fail to apply cluster state from [" + source + "]"; - } - })); - break; - case HANG: - if (randomBoolean()) { - deterministicTaskQueue.scheduleNow(onNode(new Runnable() { - @Override - public void run() { - final ClusterState oldClusterState = clusterApplier.lastAppliedClusterState; - final ClusterState newClusterState = clusterStateSupplier.get(); - assert oldClusterState.version() <= newClusterState.version() : - "updating cluster state from version " - + oldClusterState.version() + " to stale version " + newClusterState.version(); - clusterApplier.lastAppliedClusterState = newClusterState; - } - - @Override - public String toString() { - return "apply cluster state from [" + source + "] without ack"; - } - })); - } - break; - } - } - } } private List provideUnicastHosts(HostsResolver ignored) { @@ -1938,6 +1872,52 @@ public void onNodeAck(DiscoveryNode node, Exception e) { } } + static class DisruptableClusterApplierService extends ClusterApplierService { + private final String nodeName; + private final DeterministicTaskQueue deterministicTaskQueue; + ClusterStateApplyResponse clusterStateApplyResponse = ClusterStateApplyResponse.SUCCEED; + + DisruptableClusterApplierService(String nodeName, Settings settings, ClusterSettings clusterSettings, + DeterministicTaskQueue deterministicTaskQueue, Function runnableWrapper) { + super(nodeName, settings, clusterSettings, deterministicTaskQueue.getThreadPool(runnableWrapper)); + this.nodeName = nodeName; + this.deterministicTaskQueue = deterministicTaskQueue; + addStateApplier(event -> { + switch (clusterStateApplyResponse) { + case SUCCEED: + case HANG: + final ClusterState oldClusterState = event.previousState(); + final ClusterState newClusterState = event.state(); + assert oldClusterState.version() <= newClusterState.version() : "updating cluster state from version " + + oldClusterState.version() + " to stale version " + newClusterState.version(); + break; + case FAIL: + throw new ElasticsearchException("simulated cluster state applier failure"); + } + }); + } + + @Override + protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { + return new MockSinglePrioritizingExecutor(nodeName, deterministicTaskQueue); + } + + @Override + public void onNewClusterState(String source, Supplier clusterStateSupplier, ClusterApplyListener listener) { + if (clusterStateApplyResponse == ClusterStateApplyResponse.HANG) { + if (randomBoolean()) { + // apply cluster state, but don't notify listener + super.onNewClusterState(source, clusterStateSupplier, (source1, e) -> { + // ignore result + }); + } + } else { + super.onNewClusterState(source, clusterStateSupplier, listener); + } + } + + } + private static DiscoveryNode createDiscoveryNode(int nodeIndex, boolean masterEligible) { final TransportAddress address = buildNewFakeTransportAddress(); return new DiscoveryNode("", "node" + nodeIndex, diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 2690909489c75..770ae68e1285f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -28,8 +28,10 @@ import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; @@ -53,6 +55,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; public class ClusterApplierServiceTests extends ESTestCase { @@ -357,6 +360,97 @@ public void onFailure(String source, Exception e) { assertTrue(applierCalled.get()); } + public void testClusterStateApplierBubblesUpExceptionsInApplier() throws InterruptedException { + AtomicReference error = new AtomicReference<>(); + clusterApplierService.addStateApplier(event -> { + throw new RuntimeException("dummy exception"); + }); + + CountDownLatch latch = new CountDownLatch(1); + clusterApplierService.onNewClusterState("test", () -> ClusterState.builder(clusterApplierService.state()).build(), + new ClusterApplyListener() { + + @Override + public void onSuccess(String source) { + latch.countDown(); + fail("should not be called"); + } + + @Override + public void onFailure(String source, Exception e) { + assertTrue(error.compareAndSet(null, e)); + latch.countDown(); + } + } + ); + + latch.await(); + assertNotNull(error.get()); + assertThat(error.get().getMessage(), containsString("dummy exception")); + } + + public void testClusterStateApplierBubblesUpExceptionsInSettingsApplier() throws InterruptedException { + AtomicReference error = new AtomicReference<>(); + clusterApplierService.clusterSettings.addSettingsUpdateConsumer(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, + v -> {}); + + CountDownLatch latch = new CountDownLatch(1); + clusterApplierService.onNewClusterState("test", () -> ClusterState.builder(clusterApplierService.state()) + .metaData(MetaData.builder(clusterApplierService.state().metaData()) + .persistentSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), false).build()) + .build()) + .build(), + new ClusterApplyListener() { + + @Override + public void onSuccess(String source) { + latch.countDown(); + fail("should not be called"); + } + + @Override + public void onFailure(String source, Exception e) { + assertTrue(error.compareAndSet(null, e)); + latch.countDown(); + } + } + ); + + latch.await(); + assertNotNull(error.get()); + assertThat(error.get().getMessage(), containsString("illegal value can't update")); + } + + public void testClusterStateApplierSwallowsExceptionInListener() throws InterruptedException { + AtomicReference error = new AtomicReference<>(); + AtomicBoolean applierCalled = new AtomicBoolean(); + clusterApplierService.addListener(event -> { + assertTrue(applierCalled.compareAndSet(false, true)); + throw new RuntimeException("dummy exception"); + }); + + CountDownLatch latch = new CountDownLatch(1); + clusterApplierService.onNewClusterState("test", () -> ClusterState.builder(clusterApplierService.state()).build(), + new ClusterApplyListener() { + + @Override + public void onSuccess(String source) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Exception e) { + error.compareAndSet(null, e); + } + } + ); + + latch.await(); + assertNull(error.get()); + assertTrue(applierCalled.get()); + } + public void testClusterStateApplierCanCreateAnObserver() throws InterruptedException { AtomicReference error = new AtomicReference<>(); AtomicBoolean applierCalled = new AtomicBoolean(); @@ -407,10 +501,12 @@ public void onFailure(String source, Exception e) { static class TimedClusterApplierService extends ClusterApplierService { + final ClusterSettings clusterSettings; public volatile Long currentTimeOverride = null; TimedClusterApplierService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { super("test_node", settings, clusterSettings, threadPool); + this.clusterSettings = clusterSettings; } @Override diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 3dde7babb0a96..e5aea1f5d5ce1 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -19,6 +19,9 @@ package org.elasticsearch.ingest; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; @@ -39,11 +42,13 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.CustomTypeSafeMatcher; import org.mockito.ArgumentMatcher; @@ -254,7 +259,7 @@ public void testPut() { assertThat(pipeline.getProcessors().size(), equalTo(0)); } - public void testPutWithErrorResponse() { + public void testPutWithErrorResponse() throws IllegalAccessException { IngestService ingestService = createWithProcessors(); String id = "_id"; Pipeline pipeline = ingestService.getPipeline(id); @@ -265,11 +270,22 @@ public void testPutWithErrorResponse() { new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON); ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test1", + IngestService.class.getCanonicalName(), + Level.WARN, + "failed to update ingest pipelines")); + Logger ingestLogger = LogManager.getLogger(IngestService.class); + Loggers.addAppender(ingestLogger, mockAppender); try { ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - fail("should fail"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("[processors] required property is missing")); + mockAppender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(ingestLogger, mockAppender); + mockAppender.stop(); } pipeline = ingestService.getPipeline(id); assertNotNull(pipeline); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/MockSinglePrioritizingExecutor.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/MockSinglePrioritizingExecutor.java index cc21fef5f5559..bcc10f1521b29 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/MockSinglePrioritizingExecutor.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/MockSinglePrioritizingExecutor.java @@ -53,6 +53,12 @@ protected void afterExecute(Runnable r, Throwable t) { throw new KillWorkerError(); } + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) { + // ensures we don't block + return false; + } + private static final class KillWorkerError extends Error { } } From 7517e3a7bddfc211e3c55fb4463c532d8c330a47 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 24 Jan 2019 08:39:24 -0500 Subject: [PATCH 20/20] Optimize warning header de-duplication (#37725) Now that warning headers no longer contain a timestamp of when the warning was generated, we no longer need to extract the warning value from the warning to determine whether or not the warning value is duplicated. Instead, we can compare strings directly. Further, when de-duplicating warning headers, are constantly rebuilding sets. Instead of doing that, we can carry about the set with us and rebuild it if we find a new warning value. This commit applies both of these optimizations. --- .../common/logging/DeprecationLogger.java | 2 +- .../common/util/concurrent/ThreadContext.java | 107 +++++++++++++----- 2 files changed, 82 insertions(+), 27 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 0c77271c7ed0f..81d272923db22 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -232,7 +232,7 @@ void deprecated(final Set threadContexts, final String message, f while (iterator.hasNext()) { try { final ThreadContext next = iterator.next(); - next.addResponseHeader("Warning", warningHeaderValue, DeprecationLogger::extractWarningValueFromWarningHeader); + next.addResponseHeader("Warning", warningHeaderValue); } catch (final IllegalStateException e) { // ignored; it should be removed shortly } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index bd3507ef7764a..0fa0e832a0a2b 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -36,14 +36,18 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.BinaryOperator; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.Collectors; +import java.util.stream.Collector; import java.util.stream.Stream; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; @@ -258,11 +262,11 @@ public Map getHeaders() { * @return Never {@code null}. */ public Map> getResponseHeaders() { - Map> responseHeaders = threadLocal.get().responseHeaders; + Map> responseHeaders = threadLocal.get().responseHeaders; HashMap> map = new HashMap<>(responseHeaders.size()); - for (Map.Entry> entry : responseHeaders.entrySet()) { - map.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); + for (Map.Entry> entry : responseHeaders.entrySet()) { + map.put(entry.getKey(), Collections.unmodifiableList(new ArrayList<>(entry.getValue()))); } return Collections.unmodifiableMap(map); @@ -405,7 +409,7 @@ default void restore() { private static final class ThreadContextStruct { private final Map requestHeaders; private final Map transientHeaders; - private final Map> responseHeaders; + private final Map> responseHeaders; private final boolean isSystemContext; private long warningHeadersSize; //saving current warning headers' size not to recalculate the size with every new warning header private ThreadContextStruct(StreamInput in) throws IOException { @@ -416,7 +420,23 @@ private ThreadContextStruct(StreamInput in) throws IOException { } this.requestHeaders = requestHeaders; - this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString); + this.responseHeaders = in.readMap(StreamInput::readString, input -> { + final int size = input.readVInt(); + if (size == 0) { + return Collections.emptySet(); + } else if (size == 1) { + return Collections.singleton(input.readString()); + } else { + // use a linked hash set to preserve order + final LinkedHashSet values = new LinkedHashSet<>(size); + for (int i = 0; i < size; i++) { + final String value = input.readString(); + final boolean added = values.add(value); + assert added : value; + } + return values; + } + }); this.transientHeaders = Collections.emptyMap(); isSystemContext = false; // we never serialize this it's a transient flag this.warningHeadersSize = 0L; @@ -430,7 +450,7 @@ private ThreadContextStruct setSystemContext() { } private ThreadContextStruct(Map requestHeaders, - Map> responseHeaders, + Map> responseHeaders, Map transientHeaders, boolean isSystemContext) { this.requestHeaders = requestHeaders; this.responseHeaders = responseHeaders; @@ -440,7 +460,7 @@ private ThreadContextStruct(Map requestHeaders, } private ThreadContextStruct(Map requestHeaders, - Map> responseHeaders, + Map> responseHeaders, Map transientHeaders, boolean isSystemContext, long warningHeadersSize) { this.requestHeaders = requestHeaders; @@ -481,19 +501,19 @@ private ThreadContextStruct putHeaders(Map headers) { } } - private ThreadContextStruct putResponseHeaders(Map> headers) { + private ThreadContextStruct putResponseHeaders(Map> headers) { assert headers != null; if (headers.isEmpty()) { return this; } - final Map> newResponseHeaders = new HashMap<>(this.responseHeaders); - for (Map.Entry> entry : headers.entrySet()) { + final Map> newResponseHeaders = new HashMap<>(this.responseHeaders); + for (Map.Entry> entry : headers.entrySet()) { String key = entry.getKey(); - final List existingValues = newResponseHeaders.get(key); + final Set existingValues = newResponseHeaders.get(key); if (existingValues != null) { - List newValues = Stream.concat(entry.getValue().stream(), - existingValues.stream()).distinct().collect(Collectors.toList()); - newResponseHeaders.put(key, Collections.unmodifiableList(newValues)); + final Set newValues = + Stream.concat(entry.getValue().stream(), existingValues.stream()).collect(LINKED_HASH_SET_COLLECTOR); + newResponseHeaders.put(key, Collections.unmodifiableSet(newValues)); } else { newResponseHeaders.put(key, entry.getValue()); } @@ -523,20 +543,19 @@ private ThreadContextStruct putResponse(final String key, final String value, fi } } - final Map> newResponseHeaders = new HashMap<>(this.responseHeaders); - final List existingValues = newResponseHeaders.get(key); + final Map> newResponseHeaders; + final Set existingValues = responseHeaders.get(key); if (existingValues != null) { - final Set existingUniqueValues = existingValues.stream().map(uniqueValue).collect(Collectors.toSet()); - assert existingValues.size() == existingUniqueValues.size() : - "existing values: [" + existingValues + "], existing unique values [" + existingUniqueValues + "]"; - if (existingUniqueValues.contains(uniqueValue.apply(value))) { + if (existingValues.contains(uniqueValue.apply(value))) { return this; } - final List newValues = new ArrayList<>(existingValues); - newValues.add(value); - newResponseHeaders.put(key, Collections.unmodifiableList(newValues)); + // preserve insertion order + final Set newValues = Stream.concat(existingValues.stream(), Stream.of(value)).collect(LINKED_HASH_SET_COLLECTOR); + newResponseHeaders = new HashMap<>(responseHeaders); + newResponseHeaders.put(key, Collections.unmodifiableSet(newValues)); } else { - newResponseHeaders.put(key, Collections.singletonList(value)); + newResponseHeaders = new HashMap<>(responseHeaders); + newResponseHeaders.put(key, Collections.singleton(value)); } //check if we can add another warning header - if max count within limits @@ -588,7 +607,7 @@ private void writeTo(StreamOutput out, Map defaultHeaders) throw out.writeString(entry.getValue()); } - out.writeMapOfLists(responseHeaders, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(responseHeaders, StreamOutput::writeString, StreamOutput::writeStringCollection); } } @@ -751,4 +770,40 @@ public AbstractRunnable unwrap() { return in; } } + + private static final Collector, Set> LINKED_HASH_SET_COLLECTOR = new LinkedHashSetCollector<>(); + + private static class LinkedHashSetCollector implements Collector, Set> { + @Override + public Supplier> supplier() { + return LinkedHashSet::new; + } + + @Override + public BiConsumer, T> accumulator() { + return Set::add; + } + + @Override + public BinaryOperator> combiner() { + return (left, right) -> { + left.addAll(right); + return left; + }; + } + + @Override + public Function, Set> finisher() { + return Function.identity(); + } + + private static final Set CHARACTERISTICS = + Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.IDENTITY_FINISH)); + + @Override + public Set characteristics() { + return CHARACTERISTICS; + } + } + }