diff --git a/.buildkite/hooks/pre-command.bat b/.buildkite/hooks/pre-command.bat index ddf938a0dd0b9..fe7c2371de0e5 100644 --- a/.buildkite/hooks/pre-command.bat +++ b/.buildkite/hooks/pre-command.bat @@ -18,4 +18,6 @@ set JOB_BRANCH=%BUILDKITE_BRANCH% set GRADLE_BUILD_CACHE_USERNAME=vault read -field=username secret/ci/elastic-elasticsearch/migrated/gradle-build-cache set GRADLE_BUILD_CACHE_PASSWORD=vault read -field=password secret/ci/elastic-elasticsearch/migrated/gradle-build-cache +bash.exe -c "nohup bash .buildkite/scripts/setup-monitoring.sh /dev/null 2>&1 &" + exit /b 0 diff --git a/.buildkite/scripts/setup-monitoring.sh b/.buildkite/scripts/setup-monitoring.sh index 95a5b90effea2..11f00be23d675 100755 --- a/.buildkite/scripts/setup-monitoring.sh +++ b/.buildkite/scripts/setup-monitoring.sh @@ -2,23 +2,50 @@ set -euo pipefail +AGENT_VERSION="8.10.1" + ELASTIC_AGENT_URL=$(vault read -field=url secret/ci/elastic-elasticsearch/elastic-agent-token) ELASTIC_AGENT_TOKEN=$(vault read -field=token secret/ci/elastic-elasticsearch/elastic-agent-token) -if [[ ! -d /opt/elastic-agent ]]; then - sudo mkdir /opt/elastic-agent - sudo chown -R buildkite-agent:buildkite-agent /opt/elastic-agent - cd /opt/elastic-agent +ELASTIC_AGENT_DIR=/opt/elastic-agent +IS_WINDOWS="" + +# Windows +if uname -a | grep -q MING; then + ELASTIC_AGENT_DIR=/c/elastic-agent + IS_WINDOWS="true" + + # Make sudo a no-op on Windows + sudo() { + "$@" + } +fi + +if [[ ! -d $ELASTIC_AGENT_DIR ]]; then + sudo mkdir $ELASTIC_AGENT_DIR - archive=elastic-agent-8.10.1-linux-x86_64.tar.gz - if [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then - archive=elastic-agent-8.10.1-linux-arm64.tar.gz + if [[ "$IS_WINDOWS" != "true" ]]; then + sudo chown -R buildkite-agent:buildkite-agent $ELASTIC_AGENT_DIR + fi + + cd $ELASTIC_AGENT_DIR + + archive="elastic-agent-$AGENT_VERSION-linux-x86_64.tar.gz" + if [[ "$IS_WINDOWS" == "true" ]]; then + archive="elastic-agent-$AGENT_VERSION-windows-x86_64.zip" + elif [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then + archive="elastic-agent-$AGENT_VERSION-linux-arm64.tar.gz" fi curl -L -O "https://artifacts.elastic.co/downloads/beats/elastic-agent/$archive" - tar xzf "$archive" --directory=. --strip-components=1 + if [[ "$IS_WINDOWS" == "true" ]]; then + unzip "$archive" + mv elastic-agent-*/* . + else + tar xzf "$archive" --directory=. --strip-components=1 + fi fi -cd /opt/elastic-agent +cd $ELASTIC_AGENT_DIR sudo ./elastic-agent install -f --url="$ELASTIC_AGENT_URL" --enrollment-token="$ELASTIC_AGENT_TOKEN" diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index d3d528cbff494..2b549f83f1935 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -14,7 +14,7 @@ log4j = 2.19.0 slf4j = 2.0.6 ecsLogging = 1.2.0 jna = 5.12.1 -netty = 4.1.107.Final +netty = 4.1.109.Final commons_lang3 = 3.9 google_oauth_client = 1.34.1 diff --git a/distribution/tools/geoip-cli/build.gradle b/distribution/tools/geoip-cli/build.gradle index cc7ac34a8acb0..1cd502fa91d51 100644 --- a/distribution/tools/geoip-cli/build.gradle +++ b/distribution/tools/geoip-cli/build.gradle @@ -17,5 +17,6 @@ dependencies { compileOnly project(":libs:elasticsearch-cli") compileOnly project(":libs:elasticsearch-x-content") testImplementation project(":test:framework") - testImplementation "org.apache.commons:commons-compress:1.24.0" + testImplementation "org.apache.commons:commons-compress:1.26.1" + testImplementation "commons-io:commons-io:2.15.1" } diff --git a/docs/changelog/107493.yaml b/docs/changelog/107493.yaml new file mode 100644 index 0000000000000..dfd45e1493c95 --- /dev/null +++ b/docs/changelog/107493.yaml @@ -0,0 +1,5 @@ +pr: 107493 +summary: Remote cluster - API key security model - cluster privileges +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/108089.yaml b/docs/changelog/108089.yaml new file mode 100644 index 0000000000000..02fb6349185a6 --- /dev/null +++ b/docs/changelog/108089.yaml @@ -0,0 +1,6 @@ +pr: 108089 +summary: "ES|QL: limit query depth to 500 levels" +area: ES|QL +type: bug +issues: + - 107752 diff --git a/docs/changelog/108101.yaml b/docs/changelog/108101.yaml new file mode 100644 index 0000000000000..e935ec1beecd6 --- /dev/null +++ b/docs/changelog/108101.yaml @@ -0,0 +1,6 @@ +pr: 108101 +summary: "ESQL: Fix error message when failing to resolve aggregate groupings" +area: ES|QL +type: bug +issues: + - 108053 diff --git a/docs/changelog/108106.yaml b/docs/changelog/108106.yaml new file mode 100644 index 0000000000000..e9dd438e620c4 --- /dev/null +++ b/docs/changelog/108106.yaml @@ -0,0 +1,6 @@ +pr: 108106 +summary: Simulate should succeed if `ignore_missing_pipeline` +area: Ingest Node +type: bug +issues: + - 107314 diff --git a/docs/changelog/108144.yaml b/docs/changelog/108144.yaml new file mode 100644 index 0000000000000..6ff5b1d600d0e --- /dev/null +++ b/docs/changelog/108144.yaml @@ -0,0 +1,5 @@ +pr: 108144 +summary: Bump Tika dependency to 2.9.2 +area: Ingest Node +type: upgrade +issues: [] diff --git a/docs/changelog/108155.yaml b/docs/changelog/108155.yaml new file mode 100644 index 0000000000000..57db86b4005b9 --- /dev/null +++ b/docs/changelog/108155.yaml @@ -0,0 +1,5 @@ +pr: 108155 +summary: Upgrade to Netty 4.1.109 +area: Network +type: upgrade +issues: [] diff --git a/docs/changelog/108165.yaml b/docs/changelog/108165.yaml new file mode 100644 index 0000000000000..b88b0f5e217dd --- /dev/null +++ b/docs/changelog/108165.yaml @@ -0,0 +1,5 @@ +pr: 108165 +summary: Add `BlockHash` for 3 `BytesRefs` +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/reference/ilm/ilm-tutorial.asciidoc b/docs/reference/ilm/ilm-tutorial.asciidoc index 0885f685ed091..36d89b7df6f21 100644 --- a/docs/reference/ilm/ilm-tutorial.asciidoc +++ b/docs/reference/ilm/ilm-tutorial.asciidoc @@ -7,31 +7,33 @@ ++++ When you continuously index timestamped documents into {es}, -you typically use a <> so you can periodically roll over to a +you typically use a <> so you can periodically <> to a new index. -This enables you to implement a hot-warm-cold architecture to meet your performance +This enables you to implement a <> to meet your performance requirements for your newest data, control costs over time, enforce retention policies, and still get the most out of your data. -TIP: Data streams are best suited for +TIP: <> are best suited for <> use cases. If you need to update or delete existing time series data, you can perform update or delete operations directly on the data stream backing index. If you frequently send multiple documents using the same `_id` expecting last-write-wins, you may -want to use an index alias with a write index instead. You can still use ILM to manage and rollover +want to use an index alias with a write index instead. You can still use <> to manage and <> the alias's indices. Skip to <>. +[discrete] +[[manage-time-series-data-with-data-streams]] +=== Manage time series data with data streams + To automate rollover and management of a data stream with {ilm-init}, you: . <> that defines the appropriate -phases and actions. -. <> to create the data stream and +<> and <>. +. <> to <> and apply the ILM policy and the indices settings and mappings configurations for the backing indices. . <> as expected. -For an introduction to rolling indices, see <>. - IMPORTANT: When you enable {ilm} for {beats} or the {ls} {es} output plugin, lifecycle policies are set up automatically. You do not need to take any other actions. @@ -41,7 +43,7 @@ or the {ilm-init} APIs. [discrete] [[ilm-gs-create-policy]] -=== Create a lifecycle policy +==== Create a lifecycle policy A lifecycle policy specifies the phases in the index lifecycle and the actions to perform in each phase. A lifecycle can have up to five phases: @@ -101,7 +103,7 @@ PUT _ilm/policy/timeseries_policy [discrete] [[ilm-gs-apply-policy]] -=== Create an index template to create the data stream and apply the lifecycle policy +==== Create an index template to create the data stream and apply the lifecycle policy To set up a data stream, first create an index template to specify the lifecycle policy. Because the template is for a data stream, it must also include a `data_stream` definition. @@ -148,7 +150,7 @@ PUT _index_template/timeseries_template [discrete] [[ilm-gs-create-the-data-stream]] -=== Create the data stream +==== Create the data stream To get things started, index a document into the name or wildcard pattern defined in the `index_patterns` of the <>. As long @@ -184,12 +186,12 @@ stream's write index. This process repeats each time a rollover condition is met. You can search across all of the data stream's backing indices, managed by the `timeseries_policy`, with the `timeseries` data stream name. -You will point ingest towards the alias which will route write operations to its current write index. Read operations will be handled by all -backing indices. +Write operations should be sent to the data stream name, which will route them to its current write index. +Read operations against the data stream will be handled by all its backing indices. [discrete] [[ilm-gs-check-progress]] -=== Check lifecycle progress +==== Check lifecycle progress To get status information for managed indices, you use the {ilm-init} explain API. This lets you find out things like: @@ -304,7 +306,7 @@ as expected. [discrete] [[ilm-gs-alias-apply-policy]] -=== Create an index template to apply the lifecycle policy +==== Create an index template to apply the lifecycle policy To automatically apply a lifecycle policy to the new write index on rollover, specify the policy in the index template used to create new indices. @@ -362,7 +364,7 @@ DELETE _index_template/timeseries_template [discrete] [[ilm-gs-alias-bootstrap]] -=== Bootstrap the initial time series index with a write index alias +==== Bootstrap the initial time series index with a write index alias To get things started, you need to bootstrap an initial index and designate it as the write index for the rollover alias specified in your index template. @@ -393,11 +395,11 @@ This matches the `timeseries-*` pattern, so the settings from `timeseries_templa This process repeats each time rollover conditions are met. You can search across all of the indices managed by the `timeseries_policy` with the `timeseries` alias. -Write operations are routed to the current write index. +Write operations should be sent towards the alias, which will route them to its current write index. [discrete] [[ilm-gs-alias-check-progress]] -=== Check lifecycle progress +==== Check lifecycle progress Retrieving the status information for managed indices is very similar to the data stream case. See the data stream <> for more information. diff --git a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc index 506e834e0b1c1..5f462b14405ba 100644 --- a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc @@ -1,8 +1,6 @@ [[remote-clusters-api-key]] === Add remote clusters using API key authentication -beta::[] - API key authentication enables a local cluster to authenticate itself with a remote cluster via a <>. The API key needs to be created by an administrator of the remote diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index ec61c4c59fc74..848a29c64279c 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -79,7 +79,6 @@ you configure the remotes. `cluster.remote..credentials` (<>, <>):: [[remote-cluster-credentials-setting]] -beta:[] Per cluster setting for configuring <>. This setting takes the encoded value of a <> and must be set diff --git a/docs/reference/modules/remote-cluster-network.asciidoc b/docs/reference/modules/remote-cluster-network.asciidoc index ac2b4cbf65d0a..c57f0bb31e270 100644 --- a/docs/reference/modules/remote-cluster-network.asciidoc +++ b/docs/reference/modules/remote-cluster-network.asciidoc @@ -1,8 +1,6 @@ [[remote-cluster-network-settings]] ==== Advanced remote cluster (API key based model) settings -beta::[] - Use the following advanced settings to configure the remote cluster interface (API key based model) independently of the <>. You can also configure both interfaces together using the <>. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 8dcdfb009dab5..25217302b7631 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -45,8 +45,7 @@ with either of the connection modes. ==== Security models API key based security model:: -beta:[] -For clusters on version 8.10 or later, you can use an API key to authenticate +For clusters on version 8.14 or later, you can use an API key to authenticate and authorize cross-cluster operations to a remote cluster. This model offers administrators of both the local and the remote cluster fine-grained access controls. <>. diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc index 1902c22edcef5..dba4fdbe5f67e 100644 --- a/docs/reference/release-notes/8.13.0.asciidoc +++ b/docs/reference/release-notes/8.13.0.asciidoc @@ -7,9 +7,10 @@ Also see <>. [float] === Known issues -* Cross-cluster searches involving nodes upgraded to 8.13.0 and a coordinator node that is running on - version 8.12 or earlier can produce duplicate buckets. This occurs when using date_histogram or histogram - aggregations (issue: {es-issue}108181[#108181]). +* Searches involving nodes upgraded to 8.13.0 and a coordinator node that is running on version + 8.12 or earlier can produce duplicate buckets when running `date_histogram` or `histogram` + aggregations. This can happen during a rolling upgrade to 8.13 or while running cross-cluster + searches. (issue: {es-issue}108181[#108181]). * Due to a bug in the bundled JDK 22 nodes might crash abruptly under high memory pressure. We recommend <> asap to mitigate the issue. diff --git a/docs/reference/release-notes/8.13.1.asciidoc b/docs/reference/release-notes/8.13.1.asciidoc index 82355beea0cb9..7b3dbff74cc6e 100644 --- a/docs/reference/release-notes/8.13.1.asciidoc +++ b/docs/reference/release-notes/8.13.1.asciidoc @@ -6,9 +6,10 @@ Also see <>. [[bug-8.13.1]] [float] -* Cross-cluster searches involving nodes upgraded to 8.13.1 and a coordinator node that is running on - version 8.12 or earlier can produce duplicate buckets. This occurs when using date_histogram or histogram - aggregations (issue: {es-issue}108181[#108181]). +* Searches involving nodes upgraded to 8.13.0 and a coordinator node that is running on version + 8.12 or earlier can produce duplicate buckets when running `date_histogram` or `histogram` + aggregations. This can happen during a rolling upgrade to 8.13 or while running cross-cluster + searches. (issue: {es-issue}108181[#108181]). === Bug fixes diff --git a/docs/reference/release-notes/8.13.2.asciidoc b/docs/reference/release-notes/8.13.2.asciidoc index ccaaa542c4836..514118f5ea575 100644 --- a/docs/reference/release-notes/8.13.2.asciidoc +++ b/docs/reference/release-notes/8.13.2.asciidoc @@ -6,9 +6,10 @@ Also see <>. [[bug-8.13.2]] [float] -* Cross-cluster searches involving nodes upgraded to 8.13.2 and a coordinator node that is running on - version 8.12 or earlier can produce duplicate buckets. This occurs when using date_histogram or histogram - aggregations (issue: {es-issue}108181[#108181]). +* Searches involving nodes upgraded to 8.13.0 and a coordinator node that is running on version + 8.12 or earlier can produce duplicate buckets when running `date_histogram` or `histogram` + aggregations. This can happen during a rolling upgrade to 8.13 or while running cross-cluster + searches. (issue: {es-issue}108181[#108181]). === Bug fixes diff --git a/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc b/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc index 6cb00815c0ce7..13f2a60e6a9e0 100644 --- a/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc +++ b/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc @@ -2,8 +2,6 @@ [[security-api-create-cross-cluster-api-key]] === Create Cross-Cluster API key API -beta::[] - ++++ Create Cross-Cluster API key ++++ diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index 532ea60d3e46a..4f41c0b54bb1d 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -74,7 +74,7 @@ that begin with `_` are reserved for system usage. For more information, see <>. -`remote_indices`:: beta:[] (list) A list of remote indices permissions entries. +`remote_indices`:: (list) A list of remote indices permissions entries. + -- NOTE: Remote indices are effective for <>. diff --git a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc index c22a1347c8262..b90cb6368eefb 100644 --- a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc +++ b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc @@ -2,8 +2,6 @@ [[security-api-update-cross-cluster-api-key]] === Update Cross-Cluster API key API -beta::[] - ++++ Update Cross-Cluster API key ++++ diff --git a/docs/reference/security/authorization/managing-roles.asciidoc b/docs/reference/security/authorization/managing-roles.asciidoc index 25d1c20471e9c..a6197327db683 100644 --- a/docs/reference/security/authorization/managing-roles.asciidoc +++ b/docs/reference/security/authorization/managing-roles.asciidoc @@ -31,8 +31,7 @@ A role is defined by the following JSON structure: <4> A list of indices permissions entries. This field is optional (missing `indices` privileges effectively mean no index level permissions). <5> A list of application privilege entries. This field is optional. -<6> beta:[] - A list of indices permissions entries for +<6> A list of indices permissions entries for <>. This field is optional (missing `remote_indices` privileges effectively mean no index level permissions for any API key based remote clusters). @@ -165,8 +164,6 @@ no effect, and will not grant any actions in the [[roles-remote-indices-priv]] ==== Remote indices privileges -beta::[] - For <>, remote indices privileges can be used to specify desired indices privileges for matching remote clusters. The final effective index privileges will be an intersection of the remote indices privileges diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index f85ff6bc92845..9153b5fbdcab3 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -21,7 +21,7 @@ Privileges to create snapshots for existing repositories. Can also list and view details on existing repositories and snapshots. `cross_cluster_replication`:: -beta:[] Privileges to connect to <> +Privileges to connect to <> for cross-cluster replication. + -- @@ -32,7 +32,7 @@ to manage cross-cluster API keys. -- `cross_cluster_search`:: -beta:[] Privileges to connect to <> +Privileges to connect to <> for cross-cluster search. + -- @@ -310,13 +310,13 @@ requires the `manage` privilege as well, on both the index and the aliases names. `cross_cluster_replication`:: -beta:[] Privileges to perform cross-cluster replication for indices located on +Privileges to perform cross-cluster replication for indices located on <>. This privilege should only be used for the `privileges` field of <>. `cross_cluster_replication_internal`:: -beta:[] Privileges to perform supporting actions for cross-cluster replication from +Privileges to perform supporting actions for cross-cluster replication from <>. + -- diff --git a/docs/reference/security/using-ip-filtering.asciidoc b/docs/reference/security/using-ip-filtering.asciidoc index 1280ffd281dac..b59f90c92c776 100644 --- a/docs/reference/security/using-ip-filtering.asciidoc +++ b/docs/reference/security/using-ip-filtering.asciidoc @@ -114,8 +114,6 @@ xpack.security.http.filter.deny: _all [discrete] === Remote cluster (API key based model) filtering -beta::[] - If other clusters connect <> for {ccs} or {ccr}, you may want to have different IP filtering for the remote cluster server interface. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index c620d97fda425..e0d01965479ce 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -2639,8 +2639,6 @@ include::ssl-settings.asciidoc[] [[remote-cluster-server-tls-ssl-settings]] -beta::[] - :ssl-prefix: xpack.security.remote_cluster_server :component: Remote cluster server (API key based model) :enabled-by-default: @@ -2655,8 +2653,6 @@ include::ssl-settings.asciidoc[] [[remote-cluster-client-tls-ssl-settings]] -beta::[] - :ssl-prefix: xpack.security.remote_cluster_client :component: Remote cluster client (API key based model) :enabled-by-default: @@ -2714,12 +2710,12 @@ List of IP addresses to deny for this profile. `xpack.security.remote_cluster.filter.allow`:: (<>) -beta:[] List of IP addresses to allow just for the +List of IP addresses to allow just for the <>. `xpack.security.remote_cluster.filter.deny`:: (<>) -beta:[] List of IP addresses to deny just for the remote cluster server configured with +List of IP addresses to deny just for the remote cluster server configured with the <>. include::security-hash-settings.asciidoc[] diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index bbcad622cf5e5..6b5a541d15661 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -7,7 +7,7 @@ spock = "2.1-groovy-3.0" [libraries] ant = "org.apache.ant:ant:1.10.12" antlrst4 = "org.antlr:ST4:4.3.4" -apache-compress = "org.apache.commons:commons-compress:1.24.0" +apache-compress = "org.apache.commons:commons-compress:1.26.1" apache-rat = "org.apache.rat:apache-rat:0.11" asm = { group = "org.ow2.asm", name="asm", version.ref="asm" } asm-tree = { group = "org.ow2.asm", name="asm-tree", version.ref="asm" } diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 8978274e6df95..4c6d2c0287aeb 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1076,9 +1076,9 @@ - - - + + + @@ -1116,6 +1116,11 @@ + + + + + @@ -1151,9 +1156,9 @@ - - - + + + @@ -1286,9 +1291,9 @@ - - - + + + @@ -1296,9 +1301,9 @@ - - - + + + @@ -1306,29 +1311,29 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -1336,9 +1341,9 @@ - - - + + + @@ -1346,14 +1351,14 @@ - - - + + + - - - + + + @@ -1361,14 +1366,14 @@ - - - + + + - - - + + + @@ -1381,9 +1386,9 @@ - - - + + + @@ -1820,6 +1825,11 @@ + + + + + @@ -1855,6 +1865,11 @@ + + + + + @@ -2443,24 +2458,14 @@ - - - + + + - - - - - - - - - - - - - + + + @@ -2878,14 +2883,9 @@ - - - - - - - - + + + @@ -2893,34 +2893,29 @@ - - - - - - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -2948,119 +2943,64 @@ - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + @@ -4181,14 +4121,9 @@ - - - - - - - - + + + @@ -4216,9 +4151,9 @@ - - - + + + @@ -4226,11 +4161,6 @@ - - - - - @@ -4241,16 +4171,16 @@ + + + + + - - - - - diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java index f2c4a85c8f2bc..e52f36f7c8255 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java @@ -8,7 +8,6 @@ package org.elasticsearch.nativeaccess.jna; -import com.sun.jna.Library; import com.sun.jna.Native; import com.sun.jna.Pointer; @@ -17,27 +16,25 @@ class JnaZstdLibrary implements ZstdLibrary { - private interface NativeFunctions extends Library { - long ZSTD_compressBound(int scrLen); + public static class NativeFunctions { + public static native long ZSTD_compressBound(int scrLen); - long ZSTD_compress(Pointer dst, int dstLen, Pointer src, int srcLen, int compressionLevel); + public static native long ZSTD_compress(Pointer dst, int dstLen, Pointer src, int srcLen, int compressionLevel); - boolean ZSTD_isError(long code); + public static native boolean ZSTD_isError(long code); - String ZSTD_getErrorName(long code); + public static native String ZSTD_getErrorName(long code); - long ZSTD_decompress(Pointer dst, int dstLen, Pointer src, int srcLen); + public static native long ZSTD_decompress(Pointer dst, int dstLen, Pointer src, int srcLen); } - private final NativeFunctions functions; - JnaZstdLibrary() { - this.functions = Native.load("zstd", NativeFunctions.class); + Native.register(NativeFunctions.class, "zstd"); } @Override public long compressBound(int scrLen) { - return functions.ZSTD_compressBound(scrLen); + return NativeFunctions.ZSTD_compressBound(scrLen); } @Override @@ -46,7 +43,7 @@ public long compress(CloseableByteBuffer dst, CloseableByteBuffer src, int compr assert src instanceof JnaCloseableByteBuffer; var nativeDst = (JnaCloseableByteBuffer) dst; var nativeSrc = (JnaCloseableByteBuffer) src; - return functions.ZSTD_compress( + return NativeFunctions.ZSTD_compress( nativeDst.memory.share(dst.buffer().position()), dst.buffer().remaining(), nativeSrc.memory.share(src.buffer().position()), @@ -57,12 +54,12 @@ public long compress(CloseableByteBuffer dst, CloseableByteBuffer src, int compr @Override public boolean isError(long code) { - return functions.ZSTD_isError(code); + return NativeFunctions.ZSTD_isError(code); } @Override public String getErrorName(long code) { - return functions.ZSTD_getErrorName(code); + return NativeFunctions.ZSTD_getErrorName(code); } @Override @@ -71,7 +68,7 @@ public long decompress(CloseableByteBuffer dst, CloseableByteBuffer src) { assert src instanceof JnaCloseableByteBuffer; var nativeDst = (JnaCloseableByteBuffer) dst; var nativeSrc = (JnaCloseableByteBuffer) src; - return functions.ZSTD_decompress( + return NativeFunctions.ZSTD_decompress( nativeDst.memory.share(dst.buffer().position()), dst.buffer().remaining(), nativeSrc.memory.share(src.buffer().position()), diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index 2c15ea076e11a..89f0b530713c6 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -17,12 +17,28 @@ esplugin { // this overwrites the 'versions' map from Elasticsearch itself, but indeed we want that -- we're interested in managing our dependencies // as we (and tika) demand, and are not interested in, say, having the same version of commons-codec as elasticsearch itself +// when updating tika, please review it's parent pom : https://repo1.maven.org/maven2/org/apache/tika/tika-parent +// and manually update the transitive dependencies here def versions = [ - 'tika' : '2.9.1', - 'pdfbox': '2.0.29', - 'poi' : '5.2.3', - 'mime4j': '0.8.10', - 'commonsCodec': '1.16.0' + 'tika' : '2.9.2', + 'pdfbox': '2.0.31', + 'poi' : '5.2.5', + 'sparsebitset' : '1.3', //poi dependency: https://repo1.maven.org/maven2/org/apache/poi/poi/ + 'mime4j': '0.8.11', + 'commonsCodec': '1.16.1', + 'slf4' : '2.0.10', + 'xz' : '1.9', + 'commonsIo' : '2.15.1', + //intentionally using the elder "juniversalchardet:juniversalchardet" rather than the newer "com.github.albfernandez:juniversalchardet" + //since the "com.github.albfernandez" fork has some problems with Chinese. + 'juniversalchardet' : '1.0.3', + 'tagsoup' : '1.2.1', + 'jempbox' : '1.8.17', + 'xmlbeans' : '5.2.0', //poi-ooxml dependency: https://repo1.maven.org/maven2/org/apache/poi/poi-ooxml/ + 'commonsCollections4' : '4.4', + 'commonsCompress' : '1.26.1', + 'commonsLang3' :'3.14.0', + 'commonsMath3' : '3.6.1' ] // exclude commons-logging from test dependencies to avoid jar-hell, we use jcl-over-slf4j here @@ -39,8 +55,8 @@ configurations.testCompileClasspath { dependencies { // take over logging for all dependencies - api "org.slf4j:slf4j-api:2.0.9" - api "org.slf4j:jcl-over-slf4j:2.0.9" + api "org.slf4j:slf4j-api:${versions.slf4}" + api "org.slf4j:jcl-over-slf4j:${versions.slf4}" // route slf4j over log4j // TODO blocked on https://github.com/elastic/elasticsearch/issues/93714 @@ -48,7 +64,7 @@ dependencies { // nop all slf4j logging // workaround for https://github.com/elastic/elasticsearch/issues/93714 - api "org.slf4j:slf4j-nop:2.0.9" + api "org.slf4j:slf4j-nop:${versions.slf4}" // mandatory for tika api "org.apache.tika:tika-core:${versions.tika}" @@ -63,39 +79,39 @@ dependencies { api "org.apache.tika:tika-parser-apple-module:${versions.tika}" api "org.apache.tika:tika-parser-xmp-commons:${versions.tika}" api "org.apache.tika:tika-parser-zip-commons:${versions.tika}" - api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.14.0' + api "org.tukaani:xz:${versions.xz}" + api "commons-io:commons-io:${versions.commonsIo}" // character set detection - api 'com.googlecode.juniversalchardet:juniversalchardet:1.0.3' + api "com.googlecode.juniversalchardet:juniversalchardet:${versions.juniversalchardet}" // external parser libraries // HTML - api 'org.ccil.cowan.tagsoup:tagsoup:1.2.1' + api "org.ccil.cowan.tagsoup:tagsoup:${versions.tagsoup}" // Adobe PDF api "org.apache.pdfbox:pdfbox:${versions.pdfbox}" api "org.apache.pdfbox:fontbox:${versions.pdfbox}" - api "org.apache.pdfbox:jempbox:1.8.17" + api "org.apache.pdfbox:jempbox:${versions.jempbox}" // OpenOffice api "org.apache.poi:poi-ooxml:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "org.apache.poi:poi:${versions.poi}" api "commons-codec:commons-codec:${versions.commonsCodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.1' - api 'org.apache.commons:commons-collections4:4.4' + api "org.apache.xmlbeans:xmlbeans:${versions.xmlbeans}" + api "org.apache.commons:commons-collections4:${versions.commonsCollections4}" // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork - api 'org.apache.commons:commons-compress:1.24.0' + api "org.apache.commons:commons-compress:${versions.commonsCompress}" // Outlook documents api "org.apache.james:apache-mime4j-core:${versions.mime4j}" api "org.apache.james:apache-mime4j-dom:${versions.mime4j}" // EPUB books - api "org.apache.commons:commons-lang3:3.13.0" + api "org.apache.commons:commons-lang3:${versions.commonsLang3}" // Microsoft Word files with visio diagrams - api 'org.apache.commons:commons-math3:3.6.1' + api "org.apache.commons:commons-math3:${versions.commonsMath3}" // POIs dependency - api 'com.zaxxer:SparseBitSet:1.2' + api "com.zaxxer:SparseBitSet:${versions.sparsebitset}" } restResources { diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml index 8f69b6d565ad4..a367401c28291 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -981,3 +981,50 @@ teardown: - match: { docs.0.processor_results.0.status: "error" } - match: { docs.0.processor_results.0.error.root_cause.0.type: "illegal_argument_exception" } - match: { docs.0.processor_results.0.error.root_cause.0.reason: "Pipeline processor configured for non-existent pipeline [____pipeline_doesnot_exist___]" } + +--- +"Test verbose simulate with Pipeline Processor and ignore_missing_pipeline": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "outer pipeline", + "processors": [ + { + "pipeline": { + "name": "missing-inner", + "ignore_missing_pipeline": true + } + }, + { + "set": { + "field": "outer-field", + "value": true + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "field1": "123.42 400 " + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 2 } + - match: { docs.0.processor_results.0.doc._source.field1: "123.42 400 " } + - match: { docs.0.processor_results.0.status: "error_ignored" } + - match: { docs.0.processor_results.0.processor_type: "pipeline" } + - match: { docs.0.processor_results.0.ignored_error.error.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.0.ignored_error.error.reason: "Pipeline processor configured for non-existent pipeline [missing-inner]" } + - match: { docs.0.processor_results.1.doc._source.outer-field: true } + - match: { docs.0.processor_results.1.status: "success" } + - match: { docs.0.processor_results.1.processor_type: "set" } + + diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index a70bc0e4a405b..14b0b57da3b0c 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -176,6 +176,9 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.nano.CodedOutputByteBufferNano', 'com.google.protobuf.nano.MessageNano', 'com.github.luben.zstd.Zstd', + 'com.github.luben.zstd.BaseZstdBufferDecompressingStreamNoFinalizer', + 'com.github.luben.zstd.ZstdBufferDecompressingStreamNoFinalizer', + 'com.github.luben.zstd.ZstdDirectBufferDecompressingStreamNoFinalizer', 'com.jcraft.jzlib.Deflater', 'com.jcraft.jzlib.Inflater', 'com.jcraft.jzlib.JZlib$WrapperType', diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index 9202db6f49a8e..d0cef178dc920 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -9,9 +9,8 @@ package org.elasticsearch.transport.netty4; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; @@ -25,21 +24,16 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { private MockLogAppender appender; + private Releasable appenderRelease; public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger(ESLoggingHandler.class), appender); - Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender); - Loggers.addAppender(LogManager.getLogger(TcpTransport.class), appender); - appender.start(); + appenderRelease = appender.capturing(ESLoggingHandler.class, TransportLogger.class, TcpTransport.class); } public void tearDown() throws Exception { - Loggers.removeAppender(LogManager.getLogger(ESLoggingHandler.class), appender); - Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender); - Loggers.removeAppender(LogManager.getLogger(TcpTransport.class), appender); - appender.stop(); + appenderRelease.close(); super.tearDown(); } diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 4145383f1820c..81b3a086e9aca 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.Constants; @@ -95,20 +94,6 @@ public void assertMatched() { } } - private MockLogAppender addMockLogger(String loggerName) throws Exception { - MockLogAppender appender = new MockLogAppender(); - appender.start(); - final Logger testLogger = LogManager.getLogger(loggerName); - Loggers.addAppender(testLogger, appender); - Loggers.setLevel(testLogger, Level.TRACE); - return appender; - } - - private void removeMockLogger(String loggerName, MockLogAppender appender) { - Loggers.removeAppender(LogManager.getLogger(loggerName), appender); - appender.stop(); - } - /** * Simplest case: a module with no controller daemon. */ @@ -218,15 +203,16 @@ private void assertControllerSpawns(final Function pluginsDir String stdoutLoggerName = "test_plugin-controller-stdout"; String stderrLoggerName = "test_plugin-controller-stderr"; - MockLogAppender stdoutAppender = addMockLogger(stdoutLoggerName); - MockLogAppender stderrAppender = addMockLogger(stderrLoggerName); + MockLogAppender appender = new MockLogAppender(); + Loggers.setLevel(LogManager.getLogger(stdoutLoggerName), Level.TRACE); + Loggers.setLevel(LogManager.getLogger(stderrLoggerName), Level.TRACE); CountDownLatch messagesLoggedLatch = new CountDownLatch(2); if (expectSpawn) { - stdoutAppender.addExpectation(new ExpectedStreamMessage(stdoutLoggerName, "I am alive", messagesLoggedLatch)); - stderrAppender.addExpectation(new ExpectedStreamMessage(stderrLoggerName, "I am an error", messagesLoggedLatch)); + appender.addExpectation(new ExpectedStreamMessage(stdoutLoggerName, "I am alive", messagesLoggedLatch)); + appender.addExpectation(new ExpectedStreamMessage(stderrLoggerName, "I am an error", messagesLoggedLatch)); } - try { + try (var ignore = appender.capturing(stdoutLoggerName, stderrLoggerName)) { Spawner spawner = new Spawner(); spawner.spawnNativeControllers(environment); @@ -244,11 +230,7 @@ private void assertControllerSpawns(final Function pluginsDir } else { assertThat(processes, hasSize(0)); } - stdoutAppender.assertAllExpectationsMatched(); - stderrAppender.assertAllExpectationsMatched(); - } finally { - removeMockLogger(stdoutLoggerName, stdoutAppender); - removeMockLogger(stderrLoggerName, stderrAppender); + appender.assertAllExpectationsMatched(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index c3cdfee38c4e4..0b8ee5ea82601 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -252,7 +251,6 @@ public void testRolloverDryRun() throws Exception { Logger allocationServiceLogger = LogManager.getLogger(AllocationService.class); MockLogAppender appender = new MockLogAppender(); - appender.start(); appender.addExpectation( new MockLogAppender.UnseenEventExpectation( "no related message logged on dry run", @@ -261,13 +259,11 @@ public void testRolloverDryRun() throws Exception { "*test_index*" ) ); - Loggers.addAppender(allocationServiceLogger, appender); - - final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias").dryRun(true).get(); - - appender.assertAllExpectationsMatched(); - appender.stop(); - Loggers.removeAppender(allocationServiceLogger, appender); + final RolloverResponse response; + try (var ignored = appender.capturing(AllocationService.class)) { + response = indicesAdmin().prepareRolloverIndex("test_alias").dryRun(true).get(); + appender.assertAllExpectationsMatched(); + } assertThat(response.getOldIndex(), equalTo("test_index-1")); assertThat(response.getNewIndex(), equalTo("test_index-000002")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 44b0a22f352ac..52d0fea8806a6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -9,8 +9,6 @@ package org.elasticsearch.snapshots; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -32,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; @@ -1271,10 +1268,7 @@ public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessExcept mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation("no warnings", BlobStoreRepository.class.getCanonicalName(), Level.WARN, "*") ); - mockAppender.start(); - final Logger logger = LogManager.getLogger(BlobStoreRepository.class); - Loggers.addAppender(logger, mockAppender); - try { + try (var ignored = mockAppender.capturing(BlobStoreRepository.class)) { final String index1 = "index-1"; final String index2 = "index-2"; createIndexWithContent("index-1"); @@ -1287,9 +1281,6 @@ public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessExcept clusterAdmin().prepareDeleteSnapshot(repoName, snapshot1, snapshot2).get(); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockAppender); - mockAppender.stop(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 5f9ad28b561f8..aa0b1edaafd6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -9,8 +9,6 @@ package org.elasticsearch.snapshots; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -21,7 +19,6 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.TimeValue; @@ -166,7 +163,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { ) public void testRestoreLogging() throws IllegalAccessException { final MockLogAppender mockLogAppender = new MockLogAppender(); - try { + try (var ignored = mockLogAppender.capturing(RestoreService.class)) { String indexName = "testindex"; String repoName = "test-restore-snapshot-repo"; String snapshotName = "test-restore-snapshot"; @@ -175,9 +172,6 @@ public void testRestoreLogging() throws IllegalAccessException { String restoredIndexName = indexName + "-restored"; String expectedValue = "expected"; - mockLogAppender.start(); - Loggers.addAppender(LogManager.getLogger(RestoreService.class), mockLogAppender); - mockLogAppender.addExpectation( new MockLogAppender.PatternSeenEventExpectation( "not seen start of snapshot restore", @@ -215,9 +209,6 @@ public void testRestoreLogging() throws IllegalAccessException { ensureGreen(restoredIndexName); assertThat(client.prepareGet(restoredIndexName, docId).get().isExists(), equalTo(true)); mockLogAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(LogManager.getLogger(RestoreService.class), mockLogAppender); - mockLogAppender.stop(); } } @@ -907,14 +898,13 @@ public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessExcepti createSnapshot(repoName, snapshotName, List.of(indexName)); index(indexName, "some_id", Map.of("foo", "bar")); assertAcked(indicesAdmin().prepareClose(indexName).get()); + final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*") - ); - mockAppender.start(); - final Logger logger = LogManager.getLogger(FileRestoreContext.class); - Loggers.addAppender(logger, mockAppender); - try { + try (var ignored = mockAppender.capturing(FileRestoreContext.class)) { + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*") + ); + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) .setIndices(indexName) .setRestoreGlobalState(false) @@ -922,9 +912,6 @@ public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessExcepti .get(); assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockAppender); - mockAppender.stop(); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java index 941e17a5ad0ee..9aec3504a65f5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java @@ -9,9 +9,7 @@ package org.elasticsearch.snapshots; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Tuple; @@ -139,9 +137,7 @@ public void testWarningSpeedOverRecovery() throws Exception { final String primaryNode = internalCluster().startNode(primaryNodeSettings); final MockLogAppender mockLogAppender = new MockLogAppender(); - try { - mockLogAppender.start(); - Loggers.addAppender(LogManager.getLogger(BlobStoreRepository.class), mockLogAppender); + try (var ignored = mockLogAppender.capturing(BlobStoreRepository.class)) { MockLogAppender.EventuallySeenEventExpectation snapshotExpectation = new MockLogAppender.EventuallySeenEventExpectation( "snapshot speed over recovery speed", @@ -175,9 +171,6 @@ public void testWarningSpeedOverRecovery() throws Exception { deleteRepository("test-repo"); mockLogAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(LogManager.getLogger(BlobStoreRepository.class), mockLogAppender); - mockLogAppender.stop(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java index 7ee993915ae24..19e66c6653577 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java @@ -9,12 +9,10 @@ package org.elasticsearch.snapshots; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ClusterServiceUtils; @@ -35,10 +33,7 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E final MockLogAppender mockLogAppender = new MockLogAppender(); - try { - mockLogAppender.start(); - Loggers.addAppender(LogManager.getLogger(SnapshotsService.class), mockLogAppender); - + try (var ignored = mockLogAppender.capturing(SnapshotsService.class)) { mockLogAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "[does-not-exist]", @@ -76,8 +71,6 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E awaitNoMoreRunningOperations(); // ensure background file deletion is completed mockLogAppender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(LogManager.getLogger(SnapshotsService.class), mockLogAppender); - mockLogAppender.stop(); deleteRepository("test-repo"); } } @@ -89,9 +82,7 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { final MockLogAppender mockLogAppender = new MockLogAppender(); - try { - mockLogAppender.start(); - Loggers.addAppender(LogManager.getLogger(SnapshotsService.class), mockLogAppender); + try (var ignored = mockLogAppender.capturing(SnapshotsService.class)) { mockLogAppender.addExpectation( new MockLogAppender.SeenEventExpectation( @@ -121,8 +112,6 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { mockLogAppender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(LogManager.getLogger(SnapshotsService.class), mockLogAppender); - mockLogAppender.stop(); deleteRepository("test-repo"); } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index a53cbb4a26c79..707da53b69e51 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -186,7 +186,8 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_PAGE_MAPPING_TO_ITERATOR = def(8_645_00_0); public static final TransportVersion BINARY_PIT_ID = def(8_646_00_0); public static final TransportVersion SECURITY_ROLE_MAPPINGS_IN_CLUSTER_STATE = def(8_647_00_0); - + public static final TransportVersion ESQL_REQUEST_TABLES = def(8_648_00_0); + public static final TransportVersion ROLE_REMOTE_CLUSTER_PRIVS = def(8_649_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 8ef1df3d29a58..3d5b4a73e0a57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -197,13 +196,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), componentTemplate.getKey()); builder.field(COMPONENT_TEMPLATE.getPreferredName()); - componentTemplate.getValue() - .toXContent( - builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), - rolloverConfiguration, - globalRetention - ); + componentTemplate.getValue().toXContent(builder, params, rolloverConfiguration); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 07ebfe123c98f..aebb9cef12f43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -196,13 +195,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), indexTemplate.getKey()); builder.field(INDEX_TEMPLATE.getPreferredName()); - indexTemplate.getValue() - .toXContent( - builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), - rolloverConfiguration, - globalRetention - ); + indexTemplate.getValue().toXContent(builder, params, rolloverConfiguration); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 6985e86fb287a..5d0a4a293ea4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -121,12 +120,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); if (this.resolvedTemplate != null) { builder.field(TEMPLATE.getPreferredName()); - this.resolvedTemplate.toXContent( - builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), - rolloverConfiguration, - globalRetention - ); + this.resolvedTemplate.toXContent(builder, params, rolloverConfiguration); } if (this.overlappingTemplates != null) { builder.startArray(OVERLAPPING.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java index a11ec64dc6f2c..d3d758e110ff3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java @@ -163,21 +163,17 @@ public String toString() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, null, null); + return toXContent(builder, params, null); } /** * Converts the component template to XContent and passes the RolloverConditions, when provided, to the template. */ - public XContentBuilder toXContent( - XContentBuilder builder, - Params params, - @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention - ) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable RolloverConfiguration rolloverConfiguration) + throws IOException { builder.startObject(); builder.field(TEMPLATE.getPreferredName()); - this.template.toXContent(builder, params, rolloverConfiguration, globalRetention); + this.template.toXContent(builder, params, rolloverConfiguration); if (this.version != null) { builder.field(VERSION.getPreferredName(), this.version); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index e6e48bfbd46b3..fd1019efd7b78 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -259,23 +259,19 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, null, null); + return toXContent(builder, params, null); } /** * Converts the composable index template to XContent and passes the RolloverConditions, when provided, to the template. */ - public XContentBuilder toXContent( - XContentBuilder builder, - Params params, - @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention - ) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable RolloverConfiguration rolloverConfiguration) + throws IOException { builder.startObject(); builder.stringListField(INDEX_PATTERNS.getPreferredName(), this.indexPatterns); if (this.template != null) { builder.field(TEMPLATE.getPreferredName()); - this.template.toXContent(builder, params, rolloverConfiguration, globalRetention); + this.template.toXContent(builder, params, rolloverConfiguration); } if (this.componentTemplates != null) { builder.stringListField(COMPOSED_OF.getPreferredName(), this.componentTemplates); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 74627e27032b4..70440adc4ebbe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -213,18 +213,14 @@ public String toString() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, null, null); + return toXContent(builder, params, null); } /** * Converts the template to XContent and passes the RolloverConditions, when provided, to the lifecycle. */ - public XContentBuilder toXContent( - XContentBuilder builder, - Params params, - @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention - ) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable RolloverConfiguration rolloverConfiguration) + throws IOException { builder.startObject(); if (this.settings != null) { builder.startObject(SETTINGS.getPreferredName()); @@ -254,7 +250,7 @@ public XContentBuilder toXContent( } if (this.lifecycle != null) { builder.field(LIFECYCLE.getPreferredName()); - lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention); + lifecycle.toXContent(builder, params, rolloverConfiguration, null); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/common/util/Int3Hash.java b/server/src/main/java/org/elasticsearch/common/util/Int3Hash.java new file mode 100644 index 0000000000000..c9d3c6d4cba2a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/Int3Hash.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util; + +import org.apache.lucene.util.hppc.BitMixer; +import org.elasticsearch.core.Releasables; + +/** + * Specialized hash table implementation similar to BytesRefHash that maps + * three int values to ids. Collisions are resolved with open addressing and + * linear probing, growth is smooth thanks to {@link BigArrays} and capacity + * is always a multiple of 3 for faster identification of buckets. + * This class is not thread-safe. + */ +// IDs are internally stored as id + 1 so that 0 encodes for an empty slot +public final class Int3Hash extends AbstractHash { + private IntArray keys; + + // Constructor with configurable capacity and default maximum load factor. + public Int3Hash(long capacity, BigArrays bigArrays) { + this(capacity, DEFAULT_MAX_LOAD_FACTOR, bigArrays); + } + + // Constructor with configurable capacity and load factor. + public Int3Hash(long capacity, float maxLoadFactor, BigArrays bigArrays) { + super(capacity, maxLoadFactor, bigArrays); + try { + // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. + keys = bigArrays.newIntArray(3 * capacity, false); + } finally { + if (keys == null) { + close(); + } + } + } + + public int getKey1(long id) { + return keys.get(3 * id); + } + + public int getKey2(long id) { + return keys.get(3 * id + 1); + } + + public int getKey3(long id) { + return keys.get(3 * id + 2); + } + + /** + * Get the id associated with key or -1 if the key is not contained in the hash. + */ + public long find(int key1, int key2, int key3) { + long index = slot(hash(key1, key2, key3), mask); + while (true) { + final long id = id(index); + if (id == -1) { + return id; + } else { + long keyOffset = 3 * id; + if ((keys.get(keyOffset) == key1 && keys.get(keyOffset + 1) == key2 && keys.get(keyOffset + 2) == key3)) { + return id; + } + } + index = nextSlot(index, mask); + } + } + + private long set(int key1, int key2, int key3, long id) { + assert size < maxSize; + long index = slot(hash(key1, key2, key3), mask); + while (true) { + final long curId = id(index); + if (curId == -1) { // means unset + id(index, id); + append(id, key1, key2, key3); + ++size; + return id; + } else { + long keyOffset = 3 * curId; + if (keys.get(keyOffset) == key1 && keys.get(keyOffset + 1) == key2 && keys.get(keyOffset + 2) == key3) { + return -1 - curId; + } + } + index = nextSlot(index, mask); + } + } + + private void append(long id, int key1, int key2, int key3) { + long keyOffset = 3 * id; + keys = bigArrays.grow(keys, keyOffset + 3); + keys.set(keyOffset, key1); + keys.set(keyOffset + 1, key2); + keys.set(keyOffset + 2, key3); + } + + private void reset(int key1, int key2, int key3, long id) { + long index = slot(hash(key1, key2, key3), mask); + while (true) { + final long curId = id(index); + if (curId == -1) { // means unset + id(index, id); + append(id, key1, key2, key3); + break; + } + index = nextSlot(index, mask); + } + } + + /** + * Try to add {@code key}. Return its newly allocated id if it wasn't in + * the hash table yet, or {@code -1-id} if it was already present in + * the hash table. + */ + public long add(int key1, int key2, int key3) { + if (size >= maxSize) { + assert size == maxSize; + grow(); + } + assert size < maxSize; + return set(key1, key2, key3, size); + } + + @Override + protected void removeAndAdd(long index) { + final long id = id(index, -1); + assert id >= 0; + long keyOffset = id * 3; + final int key1 = keys.set(keyOffset, 0); + final int key2 = keys.set(keyOffset + 1, 0); + final int key3 = keys.set(keyOffset + 2, 0); + reset(key1, key2, key3, id); + } + + @Override + public void close() { + Releasables.close(keys, super::close); + } + + static long hash(long key1, long key2, long key3) { + return 31L * (31L * BitMixer.mix(key1) + BitMixer.mix(key2)) + BitMixer.mix(key3); + } +} diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java b/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java index 85c57e2b0b891..bf0d2622e3ea3 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java @@ -34,6 +34,10 @@ public class PipelineProcessor extends AbstractProcessor { this.ingestService = ingestService; } + public boolean isIgnoreMissingPipeline() { + return ignoreMissingPipeline; + } + @Override public void execute(IngestDocument ingestDocument, BiConsumer handler) { String pipelineName = ingestDocument.renderTemplate(this.pipelineTemplate); diff --git a/server/src/main/java/org/elasticsearch/ingest/TrackingResultProcessor.java b/server/src/main/java/org/elasticsearch/ingest/TrackingResultProcessor.java index 82b4295a07e99..c9ba3e478afdc 100644 --- a/server/src/main/java/org/elasticsearch/ingest/TrackingResultProcessor.java +++ b/server/src/main/java/org/elasticsearch/ingest/TrackingResultProcessor.java @@ -83,11 +83,18 @@ public void execute(IngestDocument ingestDocument, BiConsumer { diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index 3014d05e81036..3a8f95b868e35 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -15,7 +15,6 @@ import org.apache.logging.log4j.message.Message; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.PathUtils; import org.elasticsearch.test.AbstractBootstrapCheckTestCase; @@ -133,22 +132,20 @@ BufferedReader getBufferedReader(Path path) throws IOException { when(reader.readLine()).thenThrow(ioException); final Logger logger = LogManager.getLogger("testGetMaxMapCountIOException"); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - appender.addExpectation( - new MessageLoggingExpectation( - "expected logged I/O exception", - "testGetMaxMapCountIOException", - Level.WARN, - "I/O exception while trying to read [" + procSysVmMaxMapCountPath + "]", - e -> ioException == e - ) - ); - Loggers.addAppender(logger, appender); - assertThat(check.getMaxMapCount(logger), equalTo(-1L)); - appender.assertAllExpectationsMatched(); + try (var ignored = appender.capturing("testGetMaxMapCountIOException")) { + appender.addExpectation( + new MessageLoggingExpectation( + "expected logged I/O exception", + "testGetMaxMapCountIOException", + Level.WARN, + "I/O exception while trying to read [" + procSysVmMaxMapCountPath + "]", + e -> ioException == e + ) + ); + assertThat(check.getMaxMapCount(logger), equalTo(-1L)); + appender.assertAllExpectationsMatched(); + } verify(reader).close(); - Loggers.removeAppender(logger, appender); - appender.stop(); } { @@ -156,22 +153,20 @@ BufferedReader getBufferedReader(Path path) throws IOException { when(reader.readLine()).thenReturn("eof"); final Logger logger = LogManager.getLogger("testGetMaxMapCountNumberFormatException"); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - appender.addExpectation( - new MessageLoggingExpectation( - "expected logged number format exception", - "testGetMaxMapCountNumberFormatException", - Level.WARN, - "unable to parse vm.max_map_count [eof]", - e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"") - ) - ); - Loggers.addAppender(logger, appender); - assertThat(check.getMaxMapCount(logger), equalTo(-1L)); - appender.assertAllExpectationsMatched(); + try (var ignored = appender.capturing("testGetMaxMapCountNumberFormatException")) { + appender.addExpectation( + new MessageLoggingExpectation( + "expected logged number format exception", + "testGetMaxMapCountNumberFormatException", + Level.WARN, + "unable to parse vm.max_map_count [eof]", + e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"") + ) + ); + assertThat(check.getMaxMapCount(logger), equalTo(-1L)); + appender.assertAllExpectationsMatched(); + } verify(reader).close(); - Loggers.removeAppender(logger, appender); - appender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java index f0b6d62ef9767..d25cea9f74b6a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java @@ -9,12 +9,10 @@ package org.elasticsearch.cluster.coordination.stateless; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -85,11 +83,8 @@ public void testLogSkippedElectionIfRecentLeaderHeartbeat() throws Exception { final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); final var maxTimeSinceLastHeartbeat = TimeValue.timeValueSeconds(2 * heartbeatFrequency.seconds()); DiscoveryNodeUtils.create("master"); - final var logger = LogManager.getLogger(AtomicRegisterPreVoteCollector.class); final var appender = new MockLogAppender(); - appender.start(); - try { - Loggers.addAppender(logger, appender); + try (var ignored = appender.capturing(AtomicRegisterPreVoteCollector.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "log emitted when skipping election", @@ -123,9 +118,6 @@ protected long absoluteTimeInMillis() { assertThat(startElection.get(), is(false)); appender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java index 55575f4a22fc6..b93ccb0f978af 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java @@ -291,7 +291,7 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); ToXContent.Params withEffectiveRetention = new ToXContent.MapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS); - template.toXContent(builder, withEffectiveRetention, rolloverConfiguration, globalRetention); + template.toXContent(builder, withEffectiveRetention, rolloverConfiguration); String serialized = Strings.toString(builder); assertThat(serialized, containsString("rollover")); for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention(globalRetention)) @@ -299,13 +299,12 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws .keySet()) { assertThat(serialized, containsString(label)); } - // We check that even if there was no retention provided by the user, the global retention applies + /* + * A template does not have a global retention and the lifecycle has no retention, so there will be no data_retention or + * effective_retention. + */ assertThat(serialized, not(containsString("data_retention"))); - if (globalRetention.getDefaultRetention() != null || globalRetention.getMaxRetention() != null) { - assertThat(serialized, containsString("effective_retention")); - } else { - assertThat(serialized, not(containsString("effective_retention"))); - } + assertThat(serialized, not(containsString("effective_retention"))); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index f4e55129f9f93..22c54ff2ad057 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -240,7 +240,7 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); ToXContent.Params withEffectiveRetention = new ToXContent.MapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS); - template.toXContent(builder, withEffectiveRetention, rolloverConfiguration, globalRetention); + template.toXContent(builder, withEffectiveRetention, rolloverConfiguration); String serialized = Strings.toString(builder); assertThat(serialized, containsString("rollover")); for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention(globalRetention)) @@ -248,13 +248,12 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws .keySet()) { assertThat(serialized, containsString(label)); } - // We check that even if there was no retention provided by the user, the global retention applies + /* + * A template does not have a global retention and the lifecycle has no retention, so there will be no data_retention or + * effective_retention. + */ assertThat(serialized, not(containsString("data_retention"))); - if (globalRetention.getDefaultRetention() != null || globalRetention.getMaxRetention() != null) { - assertThat(serialized, containsString("effective_retention")); - } else { - assertThat(serialized, not(containsString("effective_retention"))); - } + assertThat(serialized, not(containsString("effective_retention"))); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 0236cd474e204..57f1cd6d40381 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -23,7 +21,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.MockLogAppender; @@ -100,11 +97,8 @@ public void testLoggingOnNodeLeft() throws IllegalAccessException { assertTrue(initialState.toString(), initialState.getRoutingNodes().unassigned().isEmpty()); - final Logger allocationServiceLogger = LogManager.getLogger(AllocationService.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(allocationServiceLogger, appender); - try { + try (var ignored = appender.capturing(AllocationService.class)) { final String dissociationReason = "node left " + randomAlphaOfLength(10); appender.addExpectation( @@ -125,9 +119,6 @@ public void testLoggingOnNodeLeft() throws IllegalAccessException { ); appender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(allocationServiceLogger, appender); - appender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index c3c35a95491ce..03f983dbd9b02 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -8,8 +8,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterName; @@ -31,7 +29,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -1367,24 +1364,30 @@ protected void updateIndicesReadOnly(Set indicesToMarkReadOnly, Releasab private void assertNoLogging(DiskThresholdMonitor monitor, Map diskUsages) throws IllegalAccessException { MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("any INFO message", DiskThresholdMonitor.class.getCanonicalName(), Level.INFO, "*") - ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("any WARN message", DiskThresholdMonitor.class.getCanonicalName(), Level.WARN, "*") - ); + try (var ignored = mockAppender.capturing(DiskThresholdMonitor.class)) { + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "any INFO message", + DiskThresholdMonitor.class.getCanonicalName(), + Level.INFO, + "*" + ) + ); + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "any WARN message", + DiskThresholdMonitor.class.getCanonicalName(), + Level.WARN, + "*" + ) + ); - Logger diskThresholdMonitorLogger = LogManager.getLogger(DiskThresholdMonitor.class); - Loggers.addAppender(diskThresholdMonitorLogger, mockAppender); + for (int i = between(1, 3); i >= 0; i--) { + monitor.onNewInfo(clusterInfo(diskUsages)); + } - for (int i = between(1, 3); i >= 0; i--) { - monitor.onNewInfo(clusterInfo(diskUsages)); + mockAppender.assertAllExpectationsMatched(); } - - mockAppender.assertAllExpectationsMatched(); - Loggers.removeAppender(diskThresholdMonitorLogger, mockAppender); - mockAppender.stop(); } private void assertRepeatedWarningMessages(DiskThresholdMonitor monitor, Map diskUsages, String message) @@ -1406,30 +1409,24 @@ private void assertSingleInfoMessage(DiskThresholdMonitor monitor, Map diskUsages, Level level, String message) - throws IllegalAccessException { + private void assertLogging(DiskThresholdMonitor monitor, Map diskUsages, Level level, String message) { MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("expected message", DiskThresholdMonitor.class.getCanonicalName(), level, message) - ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "any message of another level", - DiskThresholdMonitor.class.getCanonicalName(), - level == Level.INFO ? Level.WARN : Level.INFO, - "*" - ) - ); - - Logger diskThresholdMonitorLogger = LogManager.getLogger(DiskThresholdMonitor.class); - Loggers.addAppender(diskThresholdMonitorLogger, mockAppender); - - monitor.onNewInfo(clusterInfo(diskUsages)); + try (var ignored = mockAppender.capturing(DiskThresholdMonitor.class)) { + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation("expected message", DiskThresholdMonitor.class.getCanonicalName(), level, message) + ); + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "any message of another level", + DiskThresholdMonitor.class.getCanonicalName(), + level == Level.INFO ? Level.WARN : Level.INFO, + "*" + ) + ); - mockAppender.assertAllExpectationsMatched(); - Loggers.removeAppender(diskThresholdMonitorLogger, mockAppender); - mockAppender.stop(); + monitor.onNewInfo(clusterInfo(diskUsages)); + mockAppender.assertAllExpectationsMatched(); + } } private static long betweenGb(int min, int max) { diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 694ff3286fd41..14cb25434e4a9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.cluster.service; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -24,7 +22,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -121,7 +118,6 @@ private void advanceTime(long millis) { @TestLogging(value = "org.elasticsearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", @@ -147,9 +143,7 @@ public void testClusterStateUpdateLogging() throws Exception { ) ); - Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class); - Loggers.addAppender(clusterLogger, mockAppender); - try { + try (var ignored = mockAppender.capturing(ClusterApplierService.class)) { currentTimeMillis = randomLongBetween(0L, Long.MAX_VALUE / 2); clusterApplierService.runOnApplierThread( "test1", @@ -188,46 +182,40 @@ public void onFailure(Exception e) { } }); assertBusy(mockAppender::assertAllExpectationsMatched); - } finally { - Loggers.removeAppender(clusterLogger, mockAppender); - mockAppender.stop(); } } @TestLogging(value = "org.elasticsearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") public void testLongClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "test1 shouldn't see because setting is too low", - ClusterApplierService.class.getCanonicalName(), - Level.WARN, - "*cluster state applier task [test1] took [*] which is above the warn threshold of *" - ) - ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "test2", - ClusterApplierService.class.getCanonicalName(), - Level.WARN, - "*cluster state applier task [test2] took [32s] which is above the warn threshold of [*]: " - + "[running task [test2]] took [*" - ) - ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "test4", - ClusterApplierService.class.getCanonicalName(), - Level.WARN, - "*cluster state applier task [test3] took [34s] which is above the warn threshold of [*]: " - + "[running task [test3]] took [*" - ) - ); + try (var ignored = mockAppender.capturing(ClusterApplierService.class)) { + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "test1 shouldn't see because setting is too low", + ClusterApplierService.class.getCanonicalName(), + Level.WARN, + "*cluster state applier task [test1] took [*] which is above the warn threshold of *" + ) + ); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test2", + ClusterApplierService.class.getCanonicalName(), + Level.WARN, + "*cluster state applier task [test2] took [32s] which is above the warn threshold of [*]: " + + "[running task [test2]] took [*" + ) + ); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test4", + ClusterApplierService.class.getCanonicalName(), + Level.WARN, + "*cluster state applier task [test3] took [34s] which is above the warn threshold of [*]: " + + "[running task [test3]] took [*" + ) + ); - Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class); - Loggers.addAppender(clusterLogger, mockAppender); - try { final CountDownLatch latch = new CountDownLatch(4); final CountDownLatch processedFirstTask = new CountDownLatch(1); currentTimeMillis = randomLongBetween(0L, Long.MAX_VALUE / 2); @@ -293,11 +281,9 @@ public void onFailure(Exception e) { } }); latch.await(); - } finally { - Loggers.removeAppender(clusterLogger, mockAppender); - mockAppender.stop(); + + mockAppender.assertAllExpectationsMatched(); } - mockAppender.assertAllExpectationsMatched(); } public void testLocalNodeMasterListenerCallbacks() { diff --git a/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java b/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java index 5f86c6ba559ae..a192fb344dabb 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java @@ -63,10 +63,8 @@ private void assertLogged(Runnable loggingCode, LoggingExpectation... expectatio Level savedLevel = testLogger.getLevel(); MockLogAppender mockAppender = new MockLogAppender(); - try { + try (var ignored = mockAppender.capturing("")) { Loggers.setLevel(testLogger, Level.ALL); - mockAppender.start(); - Loggers.addAppender(testLogger, mockAppender); for (var expectation : expectations) { mockAppender.addExpectation(expectation); } @@ -74,8 +72,6 @@ private void assertLogged(Runnable loggingCode, LoggingExpectation... expectatio mockAppender.assertAllExpectationsMatched(); } finally { Loggers.setLevel(testLogger, savedLevel); - Loggers.removeAppender(testLogger, mockAppender); - mockAppender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 5b34e49fe491b..84e2cccf88cf9 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -8,12 +8,9 @@ package org.elasticsearch.common.settings; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.AbstractScopedSettings.SettingUpdater; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -1437,31 +1434,25 @@ public void testLogSettingUpdate() throws Exception { final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); final MockLogAppender mockLogAppender = new MockLogAppender(); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message", - "org.elasticsearch.common.settings.IndexScopedSettings", - Level.INFO, - "updating [index.refresh_interval] from [20s] to [10s]" - ) { - @Override - public boolean innerMatch(LogEvent event) { - return event.getMarker().getName().equals(" [index1]"); + try (var ignored = mockLogAppender.capturing(IndexScopedSettings.class)) { + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "message", + "org.elasticsearch.common.settings.IndexScopedSettings", + Level.INFO, + "updating [index.refresh_interval] from [20s] to [10s]" + ) { + @Override + public boolean innerMatch(LogEvent event) { + return event.getMarker().getName().equals(" [index1]"); + } } - } - ); - mockLogAppender.start(); - final Logger logger = LogManager.getLogger(IndexScopedSettings.class); - try { - Loggers.addAppender(logger, mockLogAppender); + ); settings.updateIndexMetadata( newIndexMeta("index1", Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s").build()) ); mockLogAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockLogAppender); - mockLogAppender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java index ce00a2ba2ffae..4885bbc277cb4 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; @@ -116,18 +115,14 @@ public void testRegularSettingUpdateIsFullyLogged() throws Exception { ); } - private void assertExpectedLogMessages(Consumer consumer, MockLogAppender.LoggingExpectation... expectations) - throws IllegalAccessException { + private void assertExpectedLogMessages(Consumer consumer, MockLogAppender.LoggingExpectation... expectations) { Logger testLogger = LogManager.getLogger("org.elasticsearch.test"); MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(testLogger, appender); - try { + try (var ignored = appender.capturing("org.elasticsearch.test")) { appender.start(); Arrays.stream(expectations).forEach(appender::addExpectation); consumer.accept(testLogger); appender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(testLogger, appender); } } diff --git a/server/src/test/java/org/elasticsearch/common/util/Int3HashTests.java b/server/src/test/java/org/elasticsearch/common/util/Int3HashTests.java new file mode 100644 index 0000000000000..5d9debf296ab3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/util/Int3HashTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class Int3HashTests extends ESTestCase { + private BigArrays randombigArrays() { + return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + private Int3Hash randomHash() { + // Test high load factors to make sure that collision resolution works fine + final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; + return new Int3Hash(randomIntBetween(0, 100), maxLoadFactor, randombigArrays()); + } + + public void testSimple() { + try (Int3Hash hash = randomHash()) { + assertThat(hash.add(0, 0, 0), equalTo(0L)); + assertThat(hash.add(0, 0, 1), equalTo(1L)); + assertThat(hash.add(0, 1, 1), equalTo(2L)); + assertThat(hash.add(1, 0, 0), equalTo(3L)); + assertThat(hash.add(1, 0, 1), equalTo(4L)); + + assertThat(hash.add(0, 0, 0), equalTo(-1L)); + assertThat(hash.add(0, 0, 1), equalTo(-2L)); + assertThat(hash.add(1, 0, 1), equalTo(-5L)); + + assertThat(hash.getKey1(0), equalTo(0)); + assertThat(hash.getKey2(0), equalTo(0)); + assertThat(hash.getKey3(0), equalTo(0)); + assertThat(hash.getKey1(4), equalTo(1)); + assertThat(hash.getKey2(4), equalTo(0)); + assertThat(hash.getKey3(4), equalTo(1)); + } + } + + public void testDuel() { + try (Int3Hash hash = randomHash()) { + int iters = scaledRandomIntBetween(100, 100000); + Key[] values = randomArray(1, iters, Key[]::new, () -> new Key(randomInt(), randomInt(), randomInt())); + Map keyToId = new HashMap<>(); + List idToKey = new ArrayList<>(); + for (int i = 0; i < iters; ++i) { + Key key = randomFrom(values); + if (keyToId.containsKey(key)) { + assertEquals(-1 - keyToId.get(key), hash.add(key.key1, key.key2, key.key3)); + } else { + assertEquals(keyToId.size(), hash.add(key.key1, key.key2, key.key3)); + keyToId.put(key, keyToId.size()); + idToKey.add(key); + } + } + + assertEquals(keyToId.size(), hash.size()); + for (Map.Entry entry : keyToId.entrySet()) { + assertEquals(entry.getValue().longValue(), hash.find(entry.getKey().key1, entry.getKey().key2, entry.getKey().key3)); + } + + assertEquals(idToKey.size(), hash.size()); + for (long i = 0; i < hash.capacity(); i++) { + long id = hash.id(i); + if (id >= 0) { + Key key = idToKey.get((int) id); + assertEquals(key.key1, hash.getKey1(id)); + assertEquals(key.key2, hash.getKey2(id)); + assertEquals(key.key3, hash.getKey3(id)); + } + } + + for (long i = 0; i < hash.size(); i++) { + Key key = idToKey.get((int) i); + assertEquals(key.key1, hash.getKey1(i)); + assertEquals(key.key2, hash.getKey2(i)); + assertEquals(key.key3, hash.getKey3(i)); + } + } + } + + public void testAllocation() { + MockBigArrays.assertFitsIn(ByteSizeValue.ofBytes(256), bigArrays -> new Int3Hash(1, bigArrays)); + } + + record Key(int key1, int key2, int key3) { + + } +} diff --git a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java index 02bd0852f50c4..f96585a81752d 100644 --- a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.health; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; @@ -23,7 +21,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.logging.ESLogMessage; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -592,129 +589,116 @@ public void testClosingWhenRunInProgress() throws Exception { public void testLoggingHappens() { MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "overall", - HealthPeriodicLogger.class.getCanonicalName(), - Level.INFO, - String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("overall")) - ) - ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "master_is_stable", - HealthPeriodicLogger.class.getCanonicalName(), - Level.INFO, - String.format(Locale.ROOT, "%s=\"green\"", makeHealthStatusString("master_is_stable")) - ) - ); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "disk", - HealthPeriodicLogger.class.getCanonicalName(), - Level.INFO, - String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("disk")) - ) - ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "ilm", - HealthPeriodicLogger.class.getCanonicalName(), - Level.INFO, - String.format(Locale.ROOT, "%s=\"red\"", makeHealthStatusString("ilm")) - ) - ); - Logger periodicLoggerLogger = LogManager.getLogger(HealthPeriodicLogger.class); - Loggers.addAppender(periodicLoggerLogger, mockAppender); - - HealthService testHealthService = this.getMockedHealthService(); - doAnswer(invocation -> { - ActionListener> listener = invocation.getArgument(4); - assertNotNull(listener); - listener.onResponse(getTestIndicatorResults()); - return null; - }).when(testHealthService).getHealth(any(), isNull(), anyBoolean(), anyInt(), any()); - testHealthPeriodicLogger = createAndInitHealthPeriodicLogger(this.clusterService, testHealthService, false); + try (var ignored = mockAppender.capturing(HealthPeriodicLogger.class)) { + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "overall", + HealthPeriodicLogger.class.getCanonicalName(), + Level.INFO, + String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("overall")) + ) + ); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "master_is_stable", + HealthPeriodicLogger.class.getCanonicalName(), + Level.INFO, + String.format(Locale.ROOT, "%s=\"green\"", makeHealthStatusString("master_is_stable")) + ) + ); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "disk", + HealthPeriodicLogger.class.getCanonicalName(), + Level.INFO, + String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("disk")) + ) + ); + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "ilm", + HealthPeriodicLogger.class.getCanonicalName(), + Level.INFO, + String.format(Locale.ROOT, "%s=\"red\"", makeHealthStatusString("ilm")) + ) + ); - // switch to Log only mode - this.clusterSettings.applySettings( - Settings.builder() - .put(HealthPeriodicLogger.OUTPUT_MODE_SETTING.getKey(), HealthPeriodicLogger.OutputMode.LOGS) - .put(HealthPeriodicLogger.ENABLED_SETTING.getKey(), true) - .build() - ); - testHealthPeriodicLogger.clusterChanged(new ClusterChangedEvent("test", stateWithLocalHealthNode, ClusterState.EMPTY_STATE)); - assertTrue("local node should be the health node", testHealthPeriodicLogger.isHealthNode()); + HealthService testHealthService = this.getMockedHealthService(); + doAnswer(invocation -> { + ActionListener> listener = invocation.getArgument(4); + assertNotNull(listener); + listener.onResponse(getTestIndicatorResults()); + return null; + }).when(testHealthService).getHealth(any(), isNull(), anyBoolean(), anyInt(), any()); + testHealthPeriodicLogger = createAndInitHealthPeriodicLogger(this.clusterService, testHealthService, false); - SchedulerEngine.Event event = new SchedulerEngine.Event(HealthPeriodicLogger.HEALTH_PERIODIC_LOGGER_JOB_NAME, 0, 0); - testHealthPeriodicLogger.triggered(event); + // switch to Log only mode + this.clusterSettings.applySettings( + Settings.builder() + .put(HealthPeriodicLogger.OUTPUT_MODE_SETTING.getKey(), HealthPeriodicLogger.OutputMode.LOGS) + .put(HealthPeriodicLogger.ENABLED_SETTING.getKey(), true) + .build() + ); + testHealthPeriodicLogger.clusterChanged(new ClusterChangedEvent("test", stateWithLocalHealthNode, ClusterState.EMPTY_STATE)); + assertTrue("local node should be the health node", testHealthPeriodicLogger.isHealthNode()); - try { + SchedulerEngine.Event event = new SchedulerEngine.Event(HealthPeriodicLogger.HEALTH_PERIODIC_LOGGER_JOB_NAME, 0, 0); + testHealthPeriodicLogger.triggered(event); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(periodicLoggerLogger, mockAppender); - mockAppender.stop(); } } public void testOutputModeNoLogging() { MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "overall", - HealthPeriodicLogger.class.getCanonicalName(), - Level.INFO, - String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("overall")) - ) - ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "master_is_stable", - HealthPeriodicLogger.class.getCanonicalName(), - Level.INFO, - String.format(Locale.ROOT, "%s=\"green\"", makeHealthStatusString("master_is_stable")) - ) - ); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "disk", - HealthPeriodicLogger.class.getCanonicalName(), - Level.INFO, - String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("disk")) - ) - ); - Logger periodicLoggerLogger = LogManager.getLogger(HealthPeriodicLogger.class); - Loggers.addAppender(periodicLoggerLogger, mockAppender); + try (var ignored = mockAppender.capturing(HealthPeriodicLogger.class)) { + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "overall", + HealthPeriodicLogger.class.getCanonicalName(), + Level.INFO, + String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("overall")) + ) + ); + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "master_is_stable", + HealthPeriodicLogger.class.getCanonicalName(), + Level.INFO, + String.format(Locale.ROOT, "%s=\"green\"", makeHealthStatusString("master_is_stable")) + ) + ); + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "disk", + HealthPeriodicLogger.class.getCanonicalName(), + Level.INFO, + String.format(Locale.ROOT, "%s=\"yellow\"", makeHealthStatusString("disk")) + ) + ); - HealthService testHealthService = this.getMockedHealthService(); - doAnswer(invocation -> { - ActionListener> listener = invocation.getArgument(4); - assertNotNull(listener); - listener.onResponse(getTestIndicatorResults()); - return null; - }).when(testHealthService).getHealth(any(), isNull(), anyBoolean(), anyInt(), any()); - testHealthPeriodicLogger = createAndInitHealthPeriodicLogger(this.clusterService, testHealthService, false); + HealthService testHealthService = this.getMockedHealthService(); + doAnswer(invocation -> { + ActionListener> listener = invocation.getArgument(4); + assertNotNull(listener); + listener.onResponse(getTestIndicatorResults()); + return null; + }).when(testHealthService).getHealth(any(), isNull(), anyBoolean(), anyInt(), any()); + testHealthPeriodicLogger = createAndInitHealthPeriodicLogger(this.clusterService, testHealthService, false); - // switch to Metrics only mode - this.clusterSettings.applySettings( - Settings.builder() - .put(HealthPeriodicLogger.OUTPUT_MODE_SETTING.getKey(), HealthPeriodicLogger.OutputMode.METRICS) - .put(HealthPeriodicLogger.ENABLED_SETTING.getKey(), true) - .build() - ); - testHealthPeriodicLogger.clusterChanged(new ClusterChangedEvent("test", stateWithLocalHealthNode, ClusterState.EMPTY_STATE)); - assertTrue("local node should be the health node", testHealthPeriodicLogger.isHealthNode()); + // switch to Metrics only mode + this.clusterSettings.applySettings( + Settings.builder() + .put(HealthPeriodicLogger.OUTPUT_MODE_SETTING.getKey(), HealthPeriodicLogger.OutputMode.METRICS) + .put(HealthPeriodicLogger.ENABLED_SETTING.getKey(), true) + .build() + ); + testHealthPeriodicLogger.clusterChanged(new ClusterChangedEvent("test", stateWithLocalHealthNode, ClusterState.EMPTY_STATE)); + assertTrue("local node should be the health node", testHealthPeriodicLogger.isHealthNode()); - SchedulerEngine.Event event = new SchedulerEngine.Event(HealthPeriodicLogger.HEALTH_PERIODIC_LOGGER_JOB_NAME, 0, 0); - testHealthPeriodicLogger.triggered(event); + SchedulerEngine.Event event = new SchedulerEngine.Event(HealthPeriodicLogger.HEALTH_PERIODIC_LOGGER_JOB_NAME, 0, 0); + testHealthPeriodicLogger.triggered(event); - try { mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(periodicLoggerLogger, mockAppender); - mockAppender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index 06d05f9dc06fa..db3a0d7c0b3bc 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.plugins.ActionPlugin; @@ -665,7 +666,6 @@ public HttpStats stats() { public void testLogsSlowInboundProcessing() throws Exception { final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); final String opaqueId = UUIDs.randomBase64UUID(random()); final String path = "/internal/test"; final RestRequest.Method method = randomFrom(RestRequest.Method.values()); @@ -677,13 +677,12 @@ public void testLogsSlowInboundProcessing() throws Exception { "handling request [" + opaqueId + "][" + method + "][" + path + "]" ) ); - final Logger inboundHandlerLogger = LogManager.getLogger(AbstractHttpServerTransport.class); - Loggers.addAppender(inboundHandlerLogger, mockAppender); final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final Settings settings = Settings.builder() .put(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.getKey(), TimeValue.timeValueMillis(5)) .build(); try ( + var ignored = mockAppender.capturing(AbstractHttpServerTransport.class); AbstractHttpServerTransport transport = new AbstractHttpServerTransport( settings, networkService, @@ -738,9 +737,6 @@ public HttpStats stats() { transport.serverAcceptedChannel(fakeRestRequest.getHttpChannel()); transport.incomingRequest(fakeRestRequest.getHttpRequest(), fakeRestRequest.getHttpChannel()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(inboundHandlerLogger, mockAppender); - mockAppender.stop(); } } @@ -1358,15 +1354,14 @@ public void addCloseListener(ActionListener listener) { private static class LogExpectation implements AutoCloseable { private final Logger mockLogger; private final MockLogAppender appender; - private boolean checked = false; + private final Releasable appenderRelease; private final int grace; private LogExpectation(int grace) { mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); Loggers.setLevel(mockLogger, Level.DEBUG); appender = new MockLogAppender(); - Loggers.addAppender(mockLogger, appender); - appender.start(); + appenderRelease = appender.capturing(AbstractHttpServerTransport.class); this.grace = grace; } @@ -1423,16 +1418,11 @@ private LogExpectation update(int connections) { public void assertExpectationsMatched() { appender.assertAllExpectationsMatched(); - checked = true; } @Override public void close() { - Loggers.removeAppender(mockLogger, appender); - appender.stop(); - if (checked == false) { - fail("did not check expectations matched in TimedOutLogExpectation"); - } + appenderRelease.close(); } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index d8d5ab56c6e1d..fae28a9103e5b 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.MockAppender; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexingSlowLog.IndexingSlowLogMessage; import org.elasticsearch.index.engine.Engine; @@ -54,6 +55,7 @@ public class IndexingSlowLogTests extends ESTestCase { static MockAppender appender; + static Releasable appenderRelease; static Logger testLogger1 = LogManager.getLogger(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); @BeforeClass diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 2fdd5687d009e..75860ebe7fdea 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.shard; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; @@ -54,7 +53,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -3530,9 +3528,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO ); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(LogManager.getLogger(IndexShard.class), appender); - try { + try (var ignored = appender.capturing(IndexShard.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "expensive checks warning", @@ -3563,9 +3559,6 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO ); appender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(LogManager.getLogger(IndexShard.class), appender); - appender.stop(); } // check that corrupt marker is there @@ -4070,7 +4063,7 @@ public void testFlushTimeExcludingWaiting() throws Exception { @TestLogging(reason = "testing traces of concurrent flushes", value = "org.elasticsearch.index.engine.Engine:TRACE") public void testFlushOnIdleConcurrentFlushDoesNotWait() throws Exception { final MockLogAppender mockLogAppender = new MockLogAppender(); - try { + try (var ignored = mockLogAppender.capturing(Engine.class)) { CountDownLatch readyToCompleteFlushLatch = new CountDownLatch(1); IndexShard shard = newStartedShard(false, Settings.EMPTY, config -> new InternalEngine(config) { @Override @@ -4084,9 +4077,6 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl indexDoc(shard, "_doc", Integer.toString(i)); } - mockLogAppender.start(); - Loggers.addAppender(LogManager.getLogger(Engine.class), mockLogAppender); - // Issue the first flushOnIdle request. The flush happens in the background using the flush threadpool. // Then wait for log message that flush acquired lock immediately mockLogAppender.addExpectation( @@ -4140,9 +4130,6 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl assertTrue(shard.flush(new FlushRequest())); closeShards(shard); - } finally { - Loggers.removeAppender(LogManager.getLogger(Engine.class), mockLogAppender); - mockLogAppender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java index ab25465e77bd2..9f57d7276accb 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java @@ -9,10 +9,7 @@ package org.elasticsearch.indices.recovery; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -492,15 +489,13 @@ public void testRecoverFromSnapshotPermitsAreNotLeakedWhenRecoverFromSnapshotIsD final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); + final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation("no warnings", RecoverySettings.class.getCanonicalName(), Level.WARN, "*") - ); - mockAppender.start(); - final Logger logger = LogManager.getLogger(RecoverySettings.class); - Loggers.addAppender(logger, mockAppender); + try (var ignored = mockAppender.capturing(RecoverySettings.class)) { + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no warnings", RecoverySettings.class.getCanonicalName(), Level.WARN, "*") + ); - try { assertThat(recoverySettings.getUseSnapshotsDuringRecovery(), is(false)); for (int i = 0; i < 4; i++) { @@ -514,9 +509,6 @@ public void testRecoverFromSnapshotPermitsAreNotLeakedWhenRecoverFromSnapshotIsD releasable.close(); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockAppender); - mockAppender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 41e865ceb97fb..e307d75a78ce9 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.ingest; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; @@ -42,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; import org.elasticsearch.common.TriConsumer; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.Maps; @@ -754,18 +751,17 @@ public void testPutWithErrorResponse() throws IllegalAccessException { String id = "_id"; Pipeline pipeline = ingestService.getPipeline(id); assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("_name")).build(); PutPipelineRequest putRequest = new PutPipelineRequest( id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON ); - ClusterState previousClusterState = clusterState; - clusterState = executePut(putRequest, clusterState); - MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( + ClusterState clusterState = executePut(putRequest, previousClusterState); + MockLogAppender.assertThatLogger( + () -> ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)), + IngestService.class, new MockLogAppender.SeenEventExpectation( "test1", IngestService.class.getCanonicalName(), @@ -773,15 +769,6 @@ public void testPutWithErrorResponse() throws IllegalAccessException { "failed to update ingest pipelines" ) ); - Logger ingestLogger = LogManager.getLogger(IngestService.class); - Loggers.addAppender(ingestLogger, mockAppender); - try { - ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(ingestLogger, mockAppender); - mockAppender.stop(); - } pipeline = ingestService.getPipeline(id); assertNotNull(pipeline); assertThat(pipeline.getId(), equalTo("_id")); diff --git a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java index 348ff8d10d8b1..64f5269e1b031 100644 --- a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java @@ -9,14 +9,12 @@ package org.elasticsearch.tasks; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.node.tasks.TaskManagerTestCase; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; @@ -172,10 +170,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, ); MockLogAppender appender = new MockLogAppender(); - appender.start(); - resources.add(appender::stop); - Loggers.addAppender(LogManager.getLogger(TaskCancellationService.class), appender); - resources.add(() -> Loggers.removeAppender(LogManager.getLogger(TaskCancellationService.class), appender)); + resources.add(appender.capturing(TaskCancellationService.class)); for (MockLogAppender.LoggingExpectation expectation : expectations.apply(childTransportService.getLocalDiscoNode())) { appender.addExpectation(expectation); diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java index 66d3dd7a829eb..ffbe142c83177 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java @@ -9,11 +9,8 @@ package org.elasticsearch.threadpool; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -109,11 +106,8 @@ public void testLateTimeIntervalWarningMuchLongerThanEstimatedTimeIntervalByDefa } public void testTimerThreadWarningLogging() throws Exception { - final Logger threadPoolLogger = LogManager.getLogger(ThreadPool.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - try { - Loggers.addAppender(threadPoolLogger, appender); + try (var ignored = appender.capturing(ThreadPool.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected warning for absolute clock", @@ -138,19 +132,12 @@ public void testTimerThreadWarningLogging() throws Exception { thread.interrupt(); thread.join(); - } finally { - Loggers.removeAppender(threadPoolLogger, appender); - appender.stop(); } } public void testTimeChangeChecker() throws Exception { - final Logger threadPoolLogger = LogManager.getLogger(ThreadPool.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - try { - Loggers.addAppender(threadPoolLogger, appender); - + try (var ignored = appender.capturing(ThreadPool.class)) { long absoluteMillis = randomLong(); // overflow should still be handled correctly long relativeNanos = randomLong(); // overflow should still be handled correctly @@ -212,9 +199,6 @@ public void testTimeChangeChecker() throws Exception { } appender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(threadPoolLogger, appender); - appender.stop(); } } @@ -288,11 +272,8 @@ public void testSchedulerWarnLogging() throws Exception { "test", Settings.builder().put(ThreadPool.SLOW_SCHEDULER_TASK_WARN_THRESHOLD_SETTING.getKey(), "10ms").build() ); - final Logger logger = LogManager.getLogger(ThreadPool.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - try { - Loggers.addAppender(logger, appender); + try (var ignored = appender.capturing(ThreadPool.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected warning for slow task", @@ -320,8 +301,6 @@ public String toString() { threadPool.schedule(runnable, TimeValue.timeValueMillis(randomLongBetween(0, 300)), EsExecutors.DIRECT_EXECUTOR_SERVICE); assertBusy(appender::assertAllExpectationsMatched); } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); assertTrue(terminate(threadPool)); } } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index 3d3026a6788ac..f923f0ce81f02 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -23,7 +21,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; @@ -233,19 +230,16 @@ public void testClosesChannelOnErrorInHandshake() throws Exception { // it. final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "expected message", - EXPECTED_LOGGER_NAME, - Level.WARN, - "error processing handshake version" - ) - ); - final Logger inboundHandlerLogger = LogManager.getLogger(InboundHandler.class); - Loggers.addAppender(inboundHandlerLogger, mockAppender); + try (var ignored = mockAppender.capturing(InboundHandler.class)) { + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "expected message", + EXPECTED_LOGGER_NAME, + Level.WARN, + "error processing handshake version" + ) + ); - try { final AtomicBoolean isClosed = new AtomicBoolean(); channel.addCloseListener(ActionListener.running(() -> assertTrue(isClosed.compareAndSet(false, true)))); @@ -268,9 +262,6 @@ public void testClosesChannelOnErrorInHandshake() throws Exception { assertTrue(isClosed.get()); assertNull(channel.getMessageCaptor().get()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(inboundHandlerLogger, mockAppender); - mockAppender.stop(); } } @@ -282,12 +273,9 @@ public void testClosesChannelOnErrorInHandshake() throws Exception { public void testLogsSlowInboundProcessing() throws Exception { final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - final Logger inboundHandlerLogger = LogManager.getLogger(InboundHandler.class); - Loggers.addAppender(inboundHandlerLogger, mockAppender); handler.setSlowLogThreshold(TimeValue.timeValueMillis(5L)); - try { + try (var ignored = mockAppender.capturing(InboundHandler.class)) { final TransportVersion remoteVersion = TransportVersion.current(); mockAppender.addExpectation( @@ -339,9 +327,6 @@ public void onResponseReceived(long requestId, Transport.ResponseContext context handler.inboundMessage(channel, new InboundMessage(responseHeader, ReleasableBytesReference.empty(), () -> {})); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(inboundHandlerLogger, mockAppender); - mockAppender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 61131f5fc18bb..99cc7c471ab82 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; @@ -26,7 +24,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -537,16 +534,14 @@ public void onResponseSent(long requestId, String action, Exception error) { private static final String EXPECTED_LOGGER_NAME = "org.elasticsearch.transport.OutboundHandler"; public void testSlowLogOutboundMessage() throws Exception { - final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation("expected message", EXPECTED_LOGGER_NAME, Level.WARN, "sending transport message ") - ); - final Logger outboundHandlerLogger = LogManager.getLogger(OutboundHandler.class); - Loggers.addAppender(outboundHandlerLogger, mockAppender); handler.setSlowLogThreshold(TimeValue.timeValueMillis(5L)); - try { + final MockLogAppender mockAppender = new MockLogAppender(); + try (var ignored = mockAppender.capturing(OutboundHandler.class)) { + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation("expected message", EXPECTED_LOGGER_NAME, Level.WARN, "sending transport message ") + ); + final int length = randomIntBetween(1, 100); final PlainActionFuture f = new PlainActionFuture<>(); handler.sendBytes(new FakeTcpChannel() { @@ -562,9 +557,6 @@ public void sendMessage(BytesReference reference, ActionListener listener) }, new BytesArray(randomByteArrayOfLength(length)), f); f.get(); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(outboundHandlerLogger, mockAppender); - mockAppender.stop(); } } diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index b9867e909bd60..b4c729d012b20 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -9,14 +9,12 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; @@ -574,14 +572,11 @@ private void testExceptionHandling( Exception exception, boolean expectClosed, MockLogAppender.LoggingExpectation... expectations - ) throws IllegalAccessException { + ) { final TestThreadPool testThreadPool = new TestThreadPool("test"); MockLogAppender appender = new MockLogAppender(); - try { - appender.start(); - - Loggers.addAppender(LogManager.getLogger(TcpTransport.class), appender); + try (var ignored = appender.capturing(TcpTransport.class)) { for (MockLogAppender.LoggingExpectation expectation : expectations) { appender.addExpectation(expectation); } @@ -621,8 +616,6 @@ private void testExceptionHandling( appender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(LogManager.getLogger(TcpTransport.class), appender); - appender.stop(); ThreadPool.terminate(testThreadPool, 30, TimeUnit.SECONDS); } } diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index e636e3c2d7d9c..fcd8b64df47dd 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -8,13 +8,11 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -29,21 +27,6 @@ @TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace", reason = "to ensure we log network events on TRACE level") public class TransportLoggerTests extends ESTestCase { - private MockLogAppender appender; - - public void setUp() throws Exception { - super.setUp(); - appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender); - appender.start(); - } - - public void tearDown() throws Exception { - Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender); - appender.stop(); - super.tearDown(); - } - public void testLoggingHandler() throws IOException { final String writePattern = ".*\\[length: \\d+" + ", request id: \\d+" @@ -74,12 +57,15 @@ public void testLoggingHandler() throws IOException { readPattern ); - appender.addExpectation(writeExpectation); - appender.addExpectation(readExpectation); - BytesReference bytesReference = buildRequest(); - TransportLogger.logInboundMessage(mock(TcpChannel.class), bytesReference.slice(6, bytesReference.length() - 6)); - TransportLogger.logOutboundMessage(mock(TcpChannel.class), bytesReference); - appender.assertAllExpectationsMatched(); + MockLogAppender appender = new MockLogAppender(); + try (var ignored = appender.capturing(TransportLogger.class)) { + appender.addExpectation(writeExpectation); + appender.addExpectation(readExpectation); + BytesReference bytesReference = buildRequest(); + TransportLogger.logInboundMessage(mock(TcpChannel.class), bytesReference.slice(6, bytesReference.length() - 6)); + TransportLogger.logOutboundMessage(mock(TcpChannel.class), bytesReference); + appender.assertAllExpectationsMatched(); + } } private BytesReference buildRequest() throws IOException { diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index 6b8b9a33e0284..38f8ad4766b7e 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -263,7 +263,7 @@ public void testManyEval() throws IOException { columns = columns.item(matchesMap().entry("name", "c").entry("type", "long")); columns = columns.item(matchesMap().entry("name", "d").entry("type", "long")); columns = columns.item(matchesMap().entry("name", "e").entry("type", "long")); - for (int i = 0; i < 10; i++) { + for (int i = 0; i < 20; i++) { columns = columns.item(matchesMap().entry("name", "i0" + i).entry("type", "long")); } assertMap(map, matchesMap().entry("columns", columns).entry("values", hasSize(10_000))); @@ -272,7 +272,7 @@ public void testManyEval() throws IOException { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108104") public void testTooManyEval() throws IOException { initManyLongs(); - assertCircuitBreaks(() -> manyEval(1000)); + assertCircuitBreaks(() -> manyEval(490)); } private Response manyEval(int evalLines) throws IOException { @@ -280,7 +280,7 @@ private Response manyEval(int evalLines) throws IOException { query.append("FROM manylongs"); for (int e = 0; e < evalLines; e++) { query.append("\n| EVAL "); - for (int i = 0; i < 10; i++) { + for (int i = 0; i < 20; i++) { if (i != 0) { query.append(", "); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 40c48a4d3fcde..ee7687398cf7b 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -9,7 +9,6 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; @@ -29,7 +28,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkUtils; @@ -1322,9 +1320,7 @@ public void handleException(TransportException exp) {} ); MockLogAppender appender = new MockLogAppender(); - try { - appender.start(); - Loggers.addAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); + try (var ignored = appender.capturing("org.elasticsearch.transport.TransportService.tracer")) { //////////////////////////////////////////////////////////////////////// // tests for included action type "internal:test" @@ -1464,9 +1460,6 @@ public void handleException(TransportException exp) {} submitRequest(serviceA, nodeB, "internal:testNotSeen", new StringMessageRequest(""), noopResponseHandler).get(); assertBusy(appender::assertAllExpectationsMatched); - } finally { - Loggers.removeAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); - appender.stop(); } } diff --git a/test/x-content/build.gradle b/test/x-content/build.gradle index 9c00e32b41348..432fe8ec3a216 100644 --- a/test/x-content/build.gradle +++ b/test/x-content/build.gradle @@ -18,7 +18,8 @@ dependencies { implementation "com.networknt:json-schema-validator:${versions.networknt_json_schema_validator}" implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - implementation "org.apache.commons:commons-compress:1.24.0" + implementation "org.apache.commons:commons-compress:1.26.1" + implementation "commons-io:commons-io:2.15.1" implementation "org.apache.commons:commons-lang3:${versions.commons_lang3}" } diff --git a/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java b/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java index bb5a90803d665..787a3b334d70c 100644 --- a/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java +++ b/test/yaml-rest-runner/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseFailLogIT.java @@ -11,8 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -36,10 +34,7 @@ public static Iterable parameters() throws Exception { @Override public void test() throws IOException { final MockLogAppender mockLogAppender = new MockLogAppender(); - try { - mockLogAppender.start(); - Loggers.addAppender(LogManager.getLogger(ESClientYamlSuiteTestCaseFailLogIT.class), mockLogAppender); - + try (var ignored = mockLogAppender.capturing(ESClientYamlSuiteTestCaseFailLogIT.class)) { mockLogAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "message with dump of the test yaml", @@ -68,9 +63,6 @@ public void test() throws IOException { } mockLogAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(LogManager.getLogger(ESClientYamlSuiteTestCaseFailLogIT.class), mockLogAppender); - mockLogAppender.stop(); } } } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java index bf50155dafccf..1e7e30dca0954 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java @@ -8,9 +8,6 @@ package org.elasticsearch.xpack.autoscaling.action; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.monitor.os.OsProbe; import org.elasticsearch.test.ESIntegTestCase; @@ -49,25 +46,22 @@ public void testCurrentCapacity() throws Exception { assertBusy(() -> { assertCurrentCapacity(memory, storage, nodes); }); } - public void assertCurrentCapacity(long memory, long storage, int nodes) throws IllegalAccessException { - Logger subjectLogger = LogManager.getLogger(TransportGetAutoscalingCapacityAction.class); - + public void assertCurrentCapacity(long memory, long storage, int nodes) { MockLogAppender appender = new MockLogAppender(); - appender.start(); - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "autoscaling capacity response message with " + storage, - TransportGetAutoscalingCapacityAction.class.getName(), - Level.DEBUG, - "autoscaling capacity response [*\"policies\"*\"test\"*\"current_capacity\"*\"storage\":" - + storage - + "*\"deciders\"" - + "*\"reactive_storage\"" - + "*\"reason_summary\"*\"reason_details\"*]" - ) - ); - Loggers.addAppender(subjectLogger, appender); - try { + try (var ignored = appender.capturing(TransportGetAutoscalingCapacityAction.class)) { + appender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "autoscaling capacity response message with " + storage, + TransportGetAutoscalingCapacityAction.class.getName(), + Level.DEBUG, + "autoscaling capacity response [*\"policies\"*\"test\"*\"current_capacity\"*\"storage\":" + + storage + + "*\"deciders\"" + + "*\"reactive_storage\"" + + "*\"reason_summary\"*\"reason_details\"*]" + ) + ); + GetAutoscalingCapacityAction.Response capacity = capacity(); AutoscalingCapacity currentCapacity = capacity.results().get("test").currentCapacity(); assertThat(currentCapacity.node().memory().getBytes(), Matchers.equalTo(memory)); @@ -75,9 +69,6 @@ public void assertCurrentCapacity(long memory, long storage, int nodes) throws I assertThat(currentCapacity.node().storage().getBytes(), Matchers.equalTo(storage)); assertThat(currentCapacity.total().storage().getBytes(), Matchers.equalTo(storage * nodes)); appender.assertAllExpectationsMatched(); - } finally { - appender.stop(); - Loggers.removeAppender(subjectLogger, appender); } } diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index 1448ba6b7756c..350fd5075f940 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.MockLogAppender; @@ -135,22 +134,18 @@ public void onFailure(final Exception e) { public void testAutoFollowCoordinatorLogsSkippingAutoFollowCoordinationWithNonCompliantLicense() throws Exception { final Logger logger = LogManager.getLogger(AutoFollowCoordinator.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - appender.addExpectation( - new MockLogAppender.ExceptionSeenEventExpectation( - getTestName(), - logger.getName(), - Level.WARN, - "skipping auto-follower coordination", - ElasticsearchSecurityException.class, - "current license is non-compliant for [ccr]" - ) - ); - try { - // Need to add mock log appender before submitting CS update, otherwise we miss the expected log: - // (Auto followers for new remote clusters are bootstrapped when a new cluster state is published) - Loggers.addAppender(logger, appender); + try (var ignored = appender.capturing(AutoFollowCoordinator.class)) { + appender.addExpectation( + new MockLogAppender.ExceptionSeenEventExpectation( + getTestName(), + logger.getName(), + Level.WARN, + "skipping auto-follower coordination", + ElasticsearchSecurityException.class, + "current license is non-compliant for [ccr]" + ) + ); // Update the cluster state so that we have auto follow patterns and verify that we log a warning // in case of incompatible license: CountDownLatch latch = new CountDownLatch(1); @@ -203,9 +198,6 @@ public void onFailure(Exception e) { }); latch.await(); appender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 9435dd56d4095..1981109a53aae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -82,6 +82,8 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.slm.SLMFeatureSetUsage; @@ -161,6 +163,13 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AnyExpression.NAME, AnyExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, FieldExpression.NAME, FieldExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, ExceptExpression.NAME, ExceptExpression::new), + // security : role descriptors + new NamedWriteableRegistry.Entry(RemoteClusterPermissions.class, RemoteClusterPermissions.NAME, RemoteClusterPermissions::new), + new NamedWriteableRegistry.Entry( + RemoteClusterPermissionGroup.class, + RemoteClusterPermissionGroup.NAME, + RemoteClusterPermissionGroup::new + ), // eql new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.EQL, EqlFeatureSetUsage::new), // esql diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java index 9695aeae283e2..0cdb04c0516d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java @@ -18,14 +18,19 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.stream.Stream; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class CrossClusterApiKeyRoleDescriptorBuilder { - public static final String[] CCS_CLUSTER_PRIVILEGE_NAMES = { "cross_cluster_search" }; + // monitor_enrich is needed for ES|QL + ENRICH and https://github.com/elastic/elasticsearch/issues/106926 is related + public static final String[] CCS_CLUSTER_PRIVILEGE_NAMES = { "cross_cluster_search", "monitor_enrich" }; public static final String[] CCR_CLUSTER_PRIVILEGE_NAMES = { "cross_cluster_replication" }; - public static final String[] CCS_AND_CCR_CLUSTER_PRIVILEGE_NAMES = { "cross_cluster_search", "cross_cluster_replication" }; + public static final String[] CCS_AND_CCR_CLUSTER_PRIVILEGE_NAMES = Stream.concat( + Arrays.stream(CCS_CLUSTER_PRIVILEGE_NAMES), + Arrays.stream(CCR_CLUSTER_PRIVILEGE_NAMES) + ).toArray(String[]::new); public static final String[] CCS_INDICES_PRIVILEGE_NAMES = { "read", "read_cross_cluster", "view_index_metadata" }; public static final String[] CCR_INDICES_PRIVILEGE_NAMES = { "cross_cluster_replication", "cross_cluster_replication_internal" }; public static final String ROLE_DESCRIPTOR_NAME = "cross_cluster"; @@ -112,6 +117,9 @@ static void validate(RoleDescriptor roleDescriptor) { if (roleDescriptor.hasRemoteIndicesPrivileges()) { throw new IllegalArgumentException("remote indices privileges must be empty"); } + if (roleDescriptor.hasRemoteClusterPermissions()) { + throw new IllegalArgumentException("remote cluster permissions must be empty"); + } final String[] clusterPrivileges = roleDescriptor.getClusterPrivileges(); if (false == Arrays.equals(clusterPrivileges, CCS_CLUSTER_PRIVILEGE_NAMES) && false == Arrays.equals(clusterPrivileges, CCR_CLUSTER_PRIVILEGE_NAMES) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index fea925f667bcf..9c53c1483c9df 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.support.NativeRealmValidationUtil; @@ -43,6 +44,7 @@ public class PutRoleRequest extends ActionRequest { private WriteRequest.RefreshPolicy refreshPolicy = WriteRequest.RefreshPolicy.IMMEDIATE; private Map metadata; private List remoteIndicesPrivileges = new ArrayList<>(); + private RemoteClusterPermissions remoteClusterPermissions = RemoteClusterPermissions.NONE; private boolean restrictRequest = false; public PutRoleRequest() {} @@ -85,6 +87,10 @@ public boolean restrictRequest() { return restrictRequest; } + public void putRemoteCluster(RemoteClusterPermissions remoteClusterPermissions) { + this.remoteClusterPermissions = remoteClusterPermissions; + } + public void addRemoteIndex( final String[] remoteClusters, final String[] indices, @@ -206,6 +212,7 @@ public RoleDescriptor roleDescriptor() { metadata, Collections.emptyMap(), remoteIndicesPrivileges.toArray(new RoleDescriptor.RemoteIndicesPrivileges[0]), + remoteClusterPermissions, null ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java index e2da04bb61534..daf485814c799 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java @@ -39,6 +39,7 @@ public PutRoleRequestBuilder source(String name, BytesReference source, XContent request.conditionalCluster(descriptor.getConditionalClusterPrivileges()); request.addIndex(descriptor.getIndicesPrivileges()); request.addRemoteIndex(descriptor.getRemoteIndicesPrivileges()); + request.putRemoteCluster(descriptor.getRemoteClusterPermissions()); request.addApplicationPrivileges(descriptor.getApplicationPrivileges()); request.runAs(descriptor.getRunAs()); request.metadata(descriptor.getMetadata()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java index 3a24a3ef40f6e..472faee97a707 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java @@ -64,6 +64,13 @@ public static ActionRequestValidationException validate( validationException = addValidationError(ile.getMessage(), validationException); } } + if (roleDescriptor.hasRemoteClusterPermissions()) { + try { + roleDescriptor.getRemoteClusterPermissions().validate(); + } catch (IllegalArgumentException e) { + validationException = addValidationError(e.getMessage(), validationException); + } + } if (roleDescriptor.getApplicationPrivileges() != null) { for (RoleDescriptor.ApplicationResourcePrivileges privilege : roleDescriptor.getApplicationPrivileges()) { try { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java index 9f62513e1b69f..c5cbe50ef1575 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java @@ -17,6 +17,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; @@ -40,6 +41,7 @@ public final class GetUserPrivilegesResponse extends ActionResponse { private final Set application; private final Set runAs; private final Set remoteIndex; + private final RemoteClusterPermissions remoteClusterPermissions; public GetUserPrivilegesResponse(StreamInput in) throws IOException { super(in); @@ -53,6 +55,11 @@ public GetUserPrivilegesResponse(StreamInput in) throws IOException { } else { remoteIndex = Set.of(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS)) { + remoteClusterPermissions = new RemoteClusterPermissions(in); + } else { + remoteClusterPermissions = RemoteClusterPermissions.NONE; + } } public GetUserPrivilegesResponse( @@ -61,7 +68,8 @@ public GetUserPrivilegesResponse( Set index, Set application, Set runAs, - Set remoteIndex + Set remoteIndex, + RemoteClusterPermissions remoteClusterPermissions ) { this.cluster = Collections.unmodifiableSet(cluster); this.configurableClusterPrivileges = Collections.unmodifiableSet(conditionalCluster); @@ -69,6 +77,7 @@ public GetUserPrivilegesResponse( this.application = Collections.unmodifiableSet(application); this.runAs = Collections.unmodifiableSet(runAs); this.remoteIndex = Collections.unmodifiableSet(remoteIndex); + this.remoteClusterPermissions = remoteClusterPermissions; } public Set getClusterPrivileges() { @@ -87,6 +96,10 @@ public Set getRemoteIndexPrivileges() { return remoteIndex; } + public RemoteClusterPermissions getRemoteClusterPermissions() { + return remoteClusterPermissions; + } + public Set getApplicationPrivileges() { return application; } @@ -99,6 +112,10 @@ public boolean hasRemoteIndicesPrivileges() { return false == remoteIndex.isEmpty(); } + public boolean hasRemoteClusterPrivileges() { + return remoteClusterPermissions.hasPrivileges(); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(cluster); @@ -117,6 +134,17 @@ public void writeTo(StreamOutput out) throws IOException { + "]" ); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS)) { + remoteClusterPermissions.writeTo(out); + } else if (hasRemoteClusterPrivileges()) { + throw new IllegalArgumentException( + "versions of Elasticsearch before [" + + TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS + + "] can't handle remote cluster privileges and attempted to send to [" + + out.getTransportVersion() + + "]" + ); + } } @Override @@ -133,12 +161,13 @@ public boolean equals(Object other) { && Objects.equals(index, that.index) && Objects.equals(application, that.application) && Objects.equals(runAs, that.runAs) - && Objects.equals(remoteIndex, that.remoteIndex); + && Objects.equals(remoteIndex, that.remoteIndex) + && Objects.equals(remoteClusterPermissions, that.remoteClusterPermissions); } @Override public int hashCode() { - return Objects.hash(cluster, configurableClusterPrivileges, index, application, runAs, remoteIndex); + return Objects.hash(cluster, configurableClusterPrivileges, index, application, runAs, remoteIndex, remoteClusterPermissions); } public record RemoteIndices(Indices indices, Set remoteClusters) implements ToXContentObject, Writeable { @@ -151,7 +180,7 @@ public RemoteIndices(StreamInput in) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); indices.innerToXContent(builder); - builder.field(RoleDescriptor.Fields.REMOTE_CLUSTERS.getPreferredName(), remoteClusters); + builder.field(RoleDescriptor.Fields.CLUSTERS.getPreferredName(), remoteClusters); return builder.endObject(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationEngine.java index 17d80274e161e..80716c9f7c9df 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationEngine.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.authz; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -246,6 +247,7 @@ void checkPrivileges( */ default void getRoleDescriptorsIntersectionForRemoteCluster( final String remoteClusterAlias, + final TransportVersion remoteClusterVersion, final AuthorizationInfo authorizationInfo, final ActionListener listener ) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index d1d24e2e4461e..d8a2900021783 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -31,6 +31,8 @@ import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.PrivilegesToCheck; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.support.Validation; @@ -43,6 +45,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -63,6 +66,7 @@ public class RoleDescriptor implements ToXContentObject, Writeable { private final ApplicationResourcePrivileges[] applicationPrivileges; private final String[] runAs; private final RemoteIndicesPrivileges[] remoteIndicesPrivileges; + private final RemoteClusterPermissions remoteClusterPermissions; private final Restriction restriction; private final Map metadata; private final Map transientMetadata; @@ -89,7 +93,7 @@ public RoleDescriptor( /** * @deprecated Use {@link #RoleDescriptor(String, String[], IndicesPrivileges[], ApplicationResourcePrivileges[], - * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], Restriction)} + * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], RemoteClusterPermissions, Restriction)} */ @Deprecated public RoleDescriptor( @@ -104,7 +108,7 @@ public RoleDescriptor( /** * @deprecated Use {@link #RoleDescriptor(String, String[], IndicesPrivileges[], ApplicationResourcePrivileges[], - * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], Restriction)} + * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], RemoteClusterPermissions, Restriction)} */ @Deprecated public RoleDescriptor( @@ -125,6 +129,7 @@ public RoleDescriptor( metadata, transientMetadata, RemoteIndicesPrivileges.NONE, + RemoteClusterPermissions.NONE, Restriction.NONE ); } @@ -149,6 +154,7 @@ public RoleDescriptor( metadata, transientMetadata, RemoteIndicesPrivileges.NONE, + RemoteClusterPermissions.NONE, Restriction.NONE ); } @@ -163,6 +169,7 @@ public RoleDescriptor( @Nullable Map metadata, @Nullable Map transientMetadata, @Nullable RemoteIndicesPrivileges[] remoteIndicesPrivileges, + @Nullable RemoteClusterPermissions remoteClusterPermissions, @Nullable Restriction restriction ) { this.name = name; @@ -176,6 +183,9 @@ public RoleDescriptor( ? Collections.unmodifiableMap(transientMetadata) : Collections.singletonMap("enabled", true); this.remoteIndicesPrivileges = remoteIndicesPrivileges != null ? remoteIndicesPrivileges : RemoteIndicesPrivileges.NONE; + this.remoteClusterPermissions = remoteClusterPermissions != null && remoteClusterPermissions.hasPrivileges() + ? remoteClusterPermissions + : RemoteClusterPermissions.NONE; this.restriction = restriction != null ? restriction : Restriction.NONE; } @@ -203,6 +213,11 @@ public RoleDescriptor(StreamInput in) throws IOException { } else { this.restriction = Restriction.NONE; } + if (in.getTransportVersion().onOrAfter(TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS)) { + this.remoteClusterPermissions = new RemoteClusterPermissions(in); + } else { + this.remoteClusterPermissions = RemoteClusterPermissions.NONE; + } } public String getName() { @@ -229,6 +244,14 @@ public boolean hasRemoteIndicesPrivileges() { return remoteIndicesPrivileges.length != 0; } + public boolean hasRemoteClusterPermissions() { + return remoteClusterPermissions.hasPrivileges(); + } + + public RemoteClusterPermissions getRemoteClusterPermissions() { + return this.remoteClusterPermissions; + } + public ApplicationResourcePrivileges[] getApplicationPrivileges() { return this.applicationPrivileges; } @@ -249,13 +272,15 @@ public boolean hasRunAs() { return runAs.length != 0; } - public boolean hasPrivilegesOtherThanIndex() { - return hasClusterPrivileges() - || hasConfigurableClusterPrivileges() + public boolean hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster() { + return hasConfigurableClusterPrivileges() || hasApplicationPrivileges() || hasRunAs() || hasRemoteIndicesPrivileges() - || hasWorkflowsRestriction(); + || hasRemoteClusterPermissions() + || hasWorkflowsRestriction() + || (hasClusterPrivileges() + && RemoteClusterPermissions.getSupportedRemoteClusterPermissions().containsAll(Arrays.asList(clusterPrivileges)) == false); } public String[] getRunAs() { @@ -308,6 +333,10 @@ public String toString() { for (RemoteIndicesPrivileges group : remoteIndicesPrivileges) { sb.append(group.toString()).append(","); } + sb.append("], remoteClusterPrivileges=["); + for (RemoteClusterPermissionGroup group : remoteClusterPermissions.groups()) { + sb.append(group.toString()).append(","); + } sb.append("], restriction=").append(restriction); sb.append("]"); return sb.toString(); @@ -393,6 +422,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, boolea if (hasRemoteIndicesPrivileges()) { builder.xContentList(Fields.REMOTE_INDICES.getPreferredName(), remoteIndicesPrivileges); } + if (hasRemoteClusterPermissions()) { + builder.array(Fields.REMOTE_CLUSTER.getPreferredName(), remoteClusterPermissions); + } if (hasRestriction()) { builder.field(Fields.RESTRICTION.getPreferredName(), restriction); } @@ -418,6 +450,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(WORKFLOWS_RESTRICTION_VERSION)) { restriction.writeTo(out); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS)) { + remoteClusterPermissions.writeTo(out); + } } public static Parser.Builder parserBuilder() { @@ -478,6 +513,7 @@ public RoleDescriptor parse(String name, XContentParser parser) throws IOExcepti String currentFieldName = null; IndicesPrivileges[] indicesPrivileges = null; RemoteIndicesPrivileges[] remoteIndicesPrivileges = null; + RemoteClusterPermissions remoteClusterPermissions = null; String[] clusterPrivileges = null; List configurableClusterPrivileges = Collections.emptyList(); ApplicationResourcePrivileges[] applicationPrivileges = null; @@ -522,6 +558,8 @@ public RoleDescriptor parse(String name, XContentParser parser) throws IOExcepti } } else if (Fields.REMOTE_INDICES.match(currentFieldName, parser.getDeprecationHandler())) { remoteIndicesPrivileges = parseRemoteIndices(name, parser); + } else if (Fields.REMOTE_CLUSTER.match(currentFieldName, parser.getDeprecationHandler())) { + remoteClusterPermissions = parseRemoteCluster(name, parser); } else if (allowRestriction && Fields.RESTRICTION.match(currentFieldName, parser.getDeprecationHandler())) { restriction = Restriction.parse(name, parser); } else if (Fields.TYPE.match(currentFieldName, parser.getDeprecationHandler())) { @@ -544,6 +582,7 @@ public RoleDescriptor parse(String name, XContentParser parser) throws IOExcepti metadata, null, remoteIndicesPrivileges, + remoteClusterPermissions, restriction ); @@ -700,12 +739,69 @@ private static RemoteIndicesPrivileges parseRemoteIndex(String roleName, XConten throw new ElasticsearchParseException( "failed to parse remote indices privileges for role [{}]. missing required [{}] field", roleName, - Fields.REMOTE_CLUSTERS + Fields.CLUSTERS ); } return new RemoteIndicesPrivileges(parsed.indicesPrivileges(), parsed.remoteClusters()); } + private static RemoteClusterPermissions parseRemoteCluster(final String roleName, final XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_ARRAY) { + throw new ElasticsearchParseException( + "failed to parse remote_cluster for role [{}]. expected field [{}] value to be an array, but found [{}] instead", + roleName, + parser.currentName(), + parser.currentToken() + ); + } + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + String[] privileges = null; + String[] clusters = null; + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Fields.PRIVILEGES.match(currentFieldName, parser.getDeprecationHandler())) { + privileges = readStringArray(roleName, parser, false); + if (privileges.length != 1 + || RemoteClusterPermissions.getSupportedRemoteClusterPermissions() + .contains(privileges[0].trim().toLowerCase(Locale.ROOT)) == false) { + throw new ElasticsearchParseException( + "failed to parse remote_cluster for role [{}]. " + + RemoteClusterPermissions.getSupportedRemoteClusterPermissions() + + " is the only value allowed for [{}] within [remote_cluster]", + roleName, + currentFieldName + ); + } + } else if (Fields.CLUSTERS.match(currentFieldName, parser.getDeprecationHandler())) { + clusters = readStringArray(roleName, parser, false); + } else { + throw new ElasticsearchParseException( + "failed to parse remote_cluster for role [{}]. unexpected field [{}]", + roleName, + currentFieldName + ); + } + } + if (privileges != null && clusters == null) { + throw new ElasticsearchParseException( + "failed to parse remote_cluster for role [{}]. [clusters] must be defined when [privileges] are defined ", + roleName + ); + } else if (privileges == null && clusters != null) { + throw new ElasticsearchParseException( + "failed to parse remote_cluster for role [{}]. [privileges] must be defined when [clusters] are defined ", + roleName + ); + } + remoteClusterPermissions.addGroup(new RemoteClusterPermissionGroup(privileges, clusters)); + } + return remoteClusterPermissions; + } + private record IndicesPrivilegesWithOptionalRemoteClusters(IndicesPrivileges indicesPrivileges, String[] remoteClusters) {} public static IndicesPrivileges parseIndexWithPredefinedPrivileges(final String roleName, String[] privileges, XContentParser parser) @@ -908,7 +1004,7 @@ private static IndicesPrivilegesWithOptionalRemoteClusters parseIndexWithOptiona Fields.TRANSIENT_METADATA ); } - } else if (allowRemoteClusters && Fields.REMOTE_CLUSTERS.match(currentFieldName, parser.getDeprecationHandler())) { + } else if (allowRemoteClusters && Fields.CLUSTERS.match(currentFieldName, parser.getDeprecationHandler())) { remoteClusters = readStringArray(roleName, parser, false); } else { throw new ElasticsearchParseException( @@ -1046,7 +1142,7 @@ public RemoteIndicesPrivileges(StreamInput in) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); indicesPrivileges.innerToXContent(builder, true); - builder.array(Fields.REMOTE_CLUSTERS.getPreferredName(), remoteClusters); + builder.array(Fields.CLUSTERS.getPreferredName(), remoteClusters); return builder.endObject(); } @@ -1148,7 +1244,7 @@ public RemoteIndicesPrivileges build() { "the [" + Fields.REMOTE_INDICES + "] sub-field [" - + Fields.REMOTE_CLUSTERS + + Fields.CLUSTERS + "] must refer to at least one cluster alias or cluster alias pattern" ); } @@ -1703,6 +1799,7 @@ public interface Fields { ParseField INDEX = new ParseField("index"); ParseField INDICES = new ParseField("indices"); ParseField REMOTE_INDICES = new ParseField("remote_indices"); + ParseField REMOTE_CLUSTER = new ParseField("remote_cluster"); ParseField APPLICATIONS = new ParseField("applications"); ParseField RUN_AS = new ParseField("run_as"); ParseField NAMES = new ParseField("names"); @@ -1710,7 +1807,7 @@ public interface Fields { ParseField RESOURCES = new ParseField("resources"); ParseField QUERY = new ParseField("query"); ParseField PRIVILEGES = new ParseField("privileges"); - ParseField REMOTE_CLUSTERS = new ParseField("clusters"); + ParseField CLUSTERS = new ParseField("clusters"); ParseField APPLICATION = new ParseField("application"); ParseField FIELD_PERMISSIONS = new ParseField("field_security"); ParseField FIELD_PERMISSIONS_2X = new ParseField("fields"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java index b1d4326e676ab..ea32ba13ae576 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; @@ -75,6 +76,11 @@ public RemoteIndicesPermission remoteIndices() { throw new UnsupportedOperationException("cannot retrieve remote indices permission on limited role"); } + @Override + public RemoteClusterPermissions remoteCluster() { + throw new UnsupportedOperationException("cannot retrieve remote cluster permission on limited role"); + } + @Override public boolean hasWorkflowsRestriction() { return baseRole.hasWorkflowsRestriction() || limitedByRole.hasWorkflowsRestriction(); @@ -152,8 +158,14 @@ public IndicesAccessControl authorize( } @Override - public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluster(final String remoteClusterAlias) { - final RoleDescriptorsIntersection baseIntersection = baseRole.getRoleDescriptorsIntersectionForRemoteCluster(remoteClusterAlias); + public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluster( + final String remoteClusterAlias, + TransportVersion remoteClusterVersion + ) { + final RoleDescriptorsIntersection baseIntersection = baseRole.getRoleDescriptorsIntersectionForRemoteCluster( + remoteClusterAlias, + remoteClusterVersion + ); // Intersecting with empty descriptors list should result in an empty intersection. if (baseIntersection.roleDescriptorsList().isEmpty()) { logger.trace( @@ -166,7 +178,8 @@ public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluste return RoleDescriptorsIntersection.EMPTY; } final RoleDescriptorsIntersection limitedByIntersection = limitedByRole.getRoleDescriptorsIntersectionForRemoteCluster( - remoteClusterAlias + remoteClusterAlias, + remoteClusterVersion ); if (limitedByIntersection.roleDescriptorsList().isEmpty()) { logger.trace( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroup.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroup.java new file mode 100644 index 0000000000000..0f5a755e9fe01 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroup.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.support.StringMatcher; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Represents a group of permissions for a remote cluster. For example: + * + { + "privileges" : ["monitor_enrich"], + "clusters" : ["*"] + } + * + */ +public class RemoteClusterPermissionGroup implements NamedWriteable, ToXContentObject { + + public static final String NAME = "remote_cluster_permission_group"; + private final String[] clusterPrivileges; + private final String[] remoteClusterAliases; + private final StringMatcher remoteClusterAliasMatcher; + + public RemoteClusterPermissionGroup(StreamInput in) throws IOException { + clusterPrivileges = in.readStringArray(); + remoteClusterAliases = in.readStringArray(); + remoteClusterAliasMatcher = StringMatcher.of(remoteClusterAliases); + } + + /** + * @param clusterPrivileges The list of cluster privileges that are allowed for the remote cluster. must not be null or empty. + * @param remoteClusterAliases The list of remote clusters that the privileges apply to. must not be null or empty. + */ + public RemoteClusterPermissionGroup(String[] clusterPrivileges, String[] remoteClusterAliases) { + if (clusterPrivileges == null + || remoteClusterAliases == null + || clusterPrivileges.length <= 0 + || remoteClusterAliases.length <= 0) { + throw new IllegalArgumentException("remote cluster groups must not be null or empty"); + } + if (Arrays.stream(clusterPrivileges).anyMatch(s -> Strings.hasText(s) == false)) { + throw new IllegalArgumentException("remote_cluster privileges must contain valid non-empty, non-null values"); + } + if (Arrays.stream(remoteClusterAliases).anyMatch(s -> Strings.hasText(s) == false)) { + throw new IllegalArgumentException("remote_cluster clusters aliases must contain valid non-empty, non-null values"); + } + + this.clusterPrivileges = clusterPrivileges; + this.remoteClusterAliases = remoteClusterAliases; + this.remoteClusterAliasMatcher = StringMatcher.of(remoteClusterAliases); + } + + /** + * @param remoteClusterAlias The remote cluster alias to check to see if has privileges defined in this group. + * @return true if the remote cluster alias has privileges defined in this group, false otherwise. + */ + public boolean hasPrivileges(final String remoteClusterAlias) { + return remoteClusterAliasMatcher.test(remoteClusterAlias); + } + + /** + * @return A copy of the cluster privileges. + */ + public String[] clusterPrivileges() { + return Arrays.copyOf(clusterPrivileges, clusterPrivileges.length); + } + + /** + * @return A copy of the cluster aliases. + */ + public String[] remoteClusterAliases() { + return Arrays.copyOf(remoteClusterAliases, remoteClusterAliases.length); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.array(RoleDescriptor.Fields.PRIVILEGES.getPreferredName(), clusterPrivileges); + builder.array(RoleDescriptor.Fields.CLUSTERS.getPreferredName(), remoteClusterAliases); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(clusterPrivileges); + out.writeStringArray(remoteClusterAliases); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RemoteClusterPermissionGroup that = (RemoteClusterPermissionGroup) o; + return Arrays.equals(clusterPrivileges, that.clusterPrivileges) + && Arrays.equals(remoteClusterAliases, that.remoteClusterAliases) + && Objects.equals(remoteClusterAliasMatcher, that.remoteClusterAliasMatcher); + } + + @Override + public int hashCode() { + int result = Objects.hash(remoteClusterAliasMatcher); + result = 31 * result + Arrays.hashCode(clusterPrivileges); + result = 31 * result + Arrays.hashCode(remoteClusterAliases); + return result; + } + + @Override + public String toString() { + return "RemoteClusterPermissionGroup{" + + "privileges=" + + Arrays.toString(clusterPrivileges) + + ", clusters=" + + Arrays.toString(remoteClusterAliases) + + '}'; + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissions.java new file mode 100644 index 0000000000000..2960c5aaa53e7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissions.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Represents the set of permissions for remote clusters. This is intended to be the model for both the {@link RoleDescriptor} + * and {@link Role}. This model is not intended to be sent to a remote cluster, but can be (wire) serialized within a single cluster + * as well as the Xcontent serialization for the REST API and persistence of the role in the security index. The privileges modeled here + * will be converted to the appropriate cluster privileges when sent to a remote cluster. + * For example, on the local/querying cluster this model represents the following: + * + * "remote_cluster" : [ + * { + * "privileges" : ["foo"], + * "clusters" : ["clusterA"] + * }, + * { + * "privileges" : ["bar"], + * "clusters" : ["clusterB"] + * } + * ] + * + * when sent to the remote cluster "clusterA", the privileges will be converted to the appropriate cluster privileges. For example: + * + * "cluster": ["foo"] + * + * and when sent to the remote cluster "clusterB", the privileges will be converted to the appropriate cluster privileges. For example: + * + * "cluster": ["bar"] + * + * If the remote cluster does not support the privilege, as determined by the remote cluster version, the privilege will be not be sent. + */ +public class RemoteClusterPermissions implements NamedWriteable, ToXContentObject { + + public static final String NAME = "remote_cluster_permissions"; + private static final Logger logger = LogManager.getLogger(RemoteClusterPermissions.class); + private final List remoteClusterPermissionGroups; + + // package private non-final for testing + static Map> allowedRemoteClusterPermissions = Map.of( + TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS, + Set.of(ClusterPrivilegeResolver.MONITOR_ENRICH.name()) + ); + + public static final RemoteClusterPermissions NONE = new RemoteClusterPermissions(); + + public static Set getSupportedRemoteClusterPermissions() { + return allowedRemoteClusterPermissions.values().stream().flatMap(Set::stream).collect(Collectors.toSet()); + } + + public RemoteClusterPermissions(StreamInput in) throws IOException { + remoteClusterPermissionGroups = in.readNamedWriteableCollectionAsList(RemoteClusterPermissionGroup.class); + } + + public RemoteClusterPermissions() { + remoteClusterPermissionGroups = new ArrayList<>(); + } + + public RemoteClusterPermissions addGroup(RemoteClusterPermissionGroup remoteClusterPermissionGroup) { + Objects.requireNonNull(remoteClusterPermissionGroup, "remoteClusterPermissionGroup must not be null"); + if (this == NONE) { + throw new IllegalArgumentException("Cannot add a group to the `NONE` instance"); + } + remoteClusterPermissionGroups.add(remoteClusterPermissionGroup); + return this; + } + + /** + * Gets the privilege names for the remote cluster. This method will collapse all groups to single String[] all lowercase + * and will only return the appropriate privileges for the provided remote cluster version. + */ + public String[] privilegeNames(final String remoteClusterAlias, TransportVersion remoteClusterVersion) { + + // get all privileges for the remote cluster + Set groupPrivileges = remoteClusterPermissionGroups.stream() + .filter(group -> group.hasPrivileges(remoteClusterAlias)) + .flatMap(groups -> Arrays.stream(groups.clusterPrivileges())) + .distinct() + .map(s -> s.toLowerCase(Locale.ROOT)) + .collect(Collectors.toSet()); + + // find all the privileges that are allowed for the remote cluster version + Set allowedPermissionsPerVersion = allowedRemoteClusterPermissions.entrySet() + .stream() + .filter((entry) -> entry.getKey().onOrBefore(remoteClusterVersion)) + .map(Map.Entry::getValue) + .flatMap(Set::stream) + .map(s -> s.toLowerCase(Locale.ROOT)) + .collect(Collectors.toSet()); + + // intersect the two sets to get the allowed privileges for the remote cluster version + Set allowedPrivileges = new HashSet<>(groupPrivileges); + boolean hasRemovedPrivileges = allowedPrivileges.retainAll(allowedPermissionsPerVersion); + if (hasRemovedPrivileges) { + HashSet removedPrivileges = new HashSet<>(groupPrivileges); + removedPrivileges.removeAll(allowedPermissionsPerVersion); + logger.info( + "Removed unsupported remote cluster permissions {} for remote cluster [{}]. " + + "Due to the remote cluster version, only the following permissions are allowed: {}", + removedPrivileges, + remoteClusterAlias, + allowedPrivileges + ); + } + + return allowedPrivileges.stream().sorted().toArray(String[]::new); + } + + /** + * Validates the remote cluster permissions (regardless of remote cluster version). + * This method will throw an {@link IllegalArgumentException} if the permissions are invalid. + * Generally, this method is just a safety check and validity should be checked before adding the permissions to this class. + */ + public void validate() { + assert hasPrivileges(); + Set invalid = getUnsupportedPrivileges(); + if (invalid.isEmpty() == false) { + throw new IllegalArgumentException( + "Invalid remote_cluster permissions found. Please remove the following: " + + invalid + + " Only " + + getSupportedRemoteClusterPermissions() + + " are allowed" + ); + } + } + + /** + * Returns the unsupported privileges in the remote cluster permissions (regardless of remote cluster version). + * Empty set if all privileges are supported. + */ + private Set getUnsupportedPrivileges() { + Set invalid = new HashSet<>(); + for (RemoteClusterPermissionGroup group : remoteClusterPermissionGroups) { + for (String namedPrivilege : group.clusterPrivileges()) { + String toCheck = namedPrivilege.toLowerCase(Locale.ROOT); + if (getSupportedRemoteClusterPermissions().contains(toCheck) == false) { + invalid.add(namedPrivilege); + } + } + } + return invalid; + } + + public boolean hasPrivileges(final String remoteClusterAlias) { + return remoteClusterPermissionGroups.stream().anyMatch(remoteIndicesGroup -> remoteIndicesGroup.hasPrivileges(remoteClusterAlias)); + } + + public boolean hasPrivileges() { + return remoteClusterPermissionGroups.isEmpty() == false; + } + + public List groups() { + return Collections.unmodifiableList(remoteClusterPermissionGroups); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (RemoteClusterPermissionGroup remoteClusterPermissionGroup : remoteClusterPermissionGroups) { + builder.value(remoteClusterPermissionGroup); + } + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteableCollection(remoteClusterPermissionGroups); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RemoteClusterPermissions that = (RemoteClusterPermissions) o; + return Objects.equals(remoteClusterPermissionGroups, that.remoteClusterPermissionGroups); + } + + @Override + public int hashCode() { + return Objects.hash(remoteClusterPermissionGroups); + } + + @Override + public String toString() { + return "RemoteClusterPermissions{" + "remoteClusterPermissionGroups=" + remoteClusterPermissionGroups + '}'; + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index 0aa562d817a1d..0fc04e8cc9a52 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.authz.permission; import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.set.Sets; @@ -62,6 +63,8 @@ public interface Role { RemoteIndicesPermission remoteIndices(); + RemoteClusterPermissions remoteCluster(); + boolean hasWorkflowsRestriction(); /** @@ -185,10 +188,14 @@ IndicesAccessControl authorize( * Returns the intersection of role descriptors defined for a remote cluster with the given alias. * * @param remoteClusterAlias the remote cluster alias for which to return a role descriptors intersection + * @param remoteClusterVersion the version of the remote cluster * @return an intersection of role descriptors that describe the remote privileges towards a given cluster, * otherwise an empty intersection if remote privileges are not defined */ - RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluster(String remoteClusterAlias); + RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluster( + String remoteClusterAlias, + TransportVersion remoteClusterVersion + ); /*** * Creates a {@link LimitedRole} that uses this Role as base and the given role as limited-by. @@ -214,10 +221,11 @@ class Builder { private ClusterPermission cluster = ClusterPermission.NONE; private RunAsPermission runAs = RunAsPermission.NONE; private final List groups = new ArrayList<>(); - private final Map, List> remoteGroups = new HashMap<>(); + private final Map, List> remoteIndicesGroups = new HashMap<>(); private final List>> applicationPrivs = new ArrayList<>(); private final RestrictedIndices restrictedIndices; private WorkflowsRestriction workflowsRestriction = WorkflowsRestriction.NONE; + private RemoteClusterPermissions remoteClusterPermissions = null; private Builder(RestrictedIndices restrictedIndices, String[] names) { this.restrictedIndices = restrictedIndices; @@ -259,7 +267,7 @@ public Builder add( return this; } - public Builder addRemoteGroup( + public Builder addRemoteIndicesGroup( final Set remoteClusterAliases, final FieldPermissions fieldPermissions, final Set query, @@ -267,11 +275,21 @@ public Builder addRemoteGroup( final boolean allowRestrictedIndices, final String... indices ) { - remoteGroups.computeIfAbsent(remoteClusterAliases, k -> new ArrayList<>()) + remoteIndicesGroups.computeIfAbsent(remoteClusterAliases, k -> new ArrayList<>()) .add(new IndicesPermissionGroupDefinition(privilege, fieldPermissions, query, allowRestrictedIndices, indices)); return this; } + public Builder addRemoteClusterPermissions(RemoteClusterPermissions remoteClusterPermissions) { + Objects.requireNonNull(remoteClusterPermissions, "remoteClusterPermissions must not be null"); + assert this.remoteClusterPermissions == null : "addRemoteClusterPermissions should only be called once"; + if (remoteClusterPermissions.hasPrivileges()) { + remoteClusterPermissions.validate(); + } + this.remoteClusterPermissions = remoteClusterPermissions; + return this; + } + public Builder addApplicationPrivilege(ApplicationPrivilege privilege, Set resources) { applicationPrivs.add(new Tuple<>(privilege, resources)); return this; @@ -304,12 +322,13 @@ public SimpleRole build() { indices = indicesBuilder.build(); } - final RemoteIndicesPermission remoteIndices; - if (remoteGroups.isEmpty()) { - remoteIndices = RemoteIndicesPermission.NONE; + final RemoteIndicesPermission remoteIndicesPermission; + if (remoteIndicesGroups.isEmpty()) { + remoteIndicesPermission = RemoteIndicesPermission.NONE; } else { final RemoteIndicesPermission.Builder remoteIndicesBuilder = new RemoteIndicesPermission.Builder(); - for (final Map.Entry, List> remoteGroupEntry : remoteGroups.entrySet()) { + for (final Map.Entry, List> remoteGroupEntry : remoteIndicesGroups + .entrySet()) { final var clusterAlias = remoteGroupEntry.getKey(); for (IndicesPermissionGroupDefinition group : remoteGroupEntry.getValue()) { remoteIndicesBuilder.addGroup( @@ -322,13 +341,22 @@ public SimpleRole build() { ); } } - remoteIndices = remoteIndicesBuilder.build(); + remoteIndicesPermission = remoteIndicesBuilder.build(); } final ApplicationPermission applicationPermission = applicationPrivs.isEmpty() ? ApplicationPermission.NONE : new ApplicationPermission(applicationPrivs); - return new SimpleRole(names, cluster, indices, applicationPermission, runAs, remoteIndices, workflowsRestriction); + return new SimpleRole( + names, + cluster, + indices, + applicationPermission, + runAs, + remoteIndicesPermission, + remoteClusterPermissions == null ? RemoteClusterPermissions.NONE : remoteClusterPermissions, + workflowsRestriction + ); } private static class IndicesPermissionGroupDefinition { @@ -394,7 +422,7 @@ static SimpleRole buildFromRoleDescriptor( assert Arrays.equals(new String[] { "*" }, clusterAliases) : "reserved role should not define remote indices privileges for specific clusters"; final RoleDescriptor.IndicesPrivileges indicesPrivileges = remoteIndicesPrivileges.indicesPrivileges(); - builder.addRemoteGroup( + builder.addRemoteIndicesGroup( Set.of(clusterAliases), fieldPermissionsCache.getFieldPermissions( new FieldPermissionsDefinition(indicesPrivileges.getGrantedFields(), indicesPrivileges.getDeniedFields()) @@ -406,6 +434,15 @@ static SimpleRole buildFromRoleDescriptor( ); } + RemoteClusterPermissions remoteClusterPermissions = roleDescriptor.getRemoteClusterPermissions(); + for (RemoteClusterPermissionGroup group : remoteClusterPermissions.groups()) { + final String[] clusterAliases = group.remoteClusterAliases(); + // note: this validation only occurs from reserved roles + assert Arrays.equals(new String[] { "*" }, clusterAliases) + : "reserved role should not define remote cluster privileges for specific clusters"; + } + builder.addRemoteClusterPermissions(remoteClusterPermissions); + for (RoleDescriptor.ApplicationResourcePrivileges applicationPrivilege : roleDescriptor.getApplicationPrivileges()) { ApplicationPrivilege.get( applicationPrivilege.getApplication(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java index 08b173a962a71..08c86c5f71f4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.authz.permission; import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.cache.Cache; @@ -51,7 +52,8 @@ public class SimpleRole implements Role { private final IndicesPermission indices; private final ApplicationPermission application; private final RunAsPermission runAs; - private final RemoteIndicesPermission remoteIndices; + private final RemoteIndicesPermission remoteIndicesPermission; + private final RemoteClusterPermissions remoteClusterPermissions; private final WorkflowsRestriction workflowsRestriction; SimpleRole( @@ -60,7 +62,8 @@ public class SimpleRole implements Role { IndicesPermission indices, ApplicationPermission application, RunAsPermission runAs, - RemoteIndicesPermission remoteIndices, + RemoteIndicesPermission remoteIndicesPermission, + RemoteClusterPermissions remoteClusterPermissions, WorkflowsRestriction workflowsRestriction ) { this.names = names; @@ -68,7 +71,8 @@ public class SimpleRole implements Role { this.indices = Objects.requireNonNull(indices); this.application = Objects.requireNonNull(application); this.runAs = Objects.requireNonNull(runAs); - this.remoteIndices = Objects.requireNonNull(remoteIndices); + this.remoteIndicesPermission = Objects.requireNonNull(remoteIndicesPermission); + this.remoteClusterPermissions = Objects.requireNonNull(remoteClusterPermissions); this.workflowsRestriction = Objects.requireNonNull(workflowsRestriction); } @@ -99,7 +103,12 @@ public RunAsPermission runAs() { @Override public RemoteIndicesPermission remoteIndices() { - return remoteIndices; + return remoteIndicesPermission; + } + + @Override + public RemoteClusterPermissions remoteCluster() { + return remoteClusterPermissions; } @Override @@ -194,11 +203,17 @@ public IndicesAccessControl authorize( } @Override - public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluster(final String remoteClusterAlias) { - final RemoteIndicesPermission remoteIndicesPermission = remoteIndices.forCluster(remoteClusterAlias); - if (remoteIndicesPermission.remoteIndicesGroups().isEmpty()) { + public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluster( + final String remoteClusterAlias, + TransportVersion remoteClusterVersion + ) { + final RemoteIndicesPermission remoteIndicesPermission = this.remoteIndicesPermission.forCluster(remoteClusterAlias); + + if (remoteIndicesPermission.remoteIndicesGroups().isEmpty() + && remoteClusterPermissions.hasPrivileges(remoteClusterAlias) == false) { return RoleDescriptorsIntersection.EMPTY; } + final List indicesPrivileges = new ArrayList<>(); for (RemoteIndicesPermission.RemoteIndicesGroup remoteIndicesGroup : remoteIndicesPermission.remoteIndicesGroups()) { for (IndicesPermission.Group indicesGroup : remoteIndicesGroup.indicesPermissionGroups()) { @@ -209,7 +224,7 @@ public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluste return new RoleDescriptorsIntersection( new RoleDescriptor( REMOTE_USER_ROLE_NAME, - null, + remoteClusterPermissions.privilegeNames(remoteClusterAlias, remoteClusterVersion), // The role descriptors constructed here may be cached in raw byte form, using a hash of their content as a // cache key; we therefore need deterministic order when constructing them here, to ensure cache hits for // equivalent role descriptors diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index cdb7f44d41e4a..8e4f9108c3b9c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -406,6 +406,7 @@ static RoleDescriptor kibanaSystem(String name) { getRemoteIndicesReadPrivileges("metrics-apm.*"), getRemoteIndicesReadPrivileges("traces-apm.*"), getRemoteIndicesReadPrivileges("traces-apm-*") }, + null, null ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index dc5b8bfcce262..0793578004a4e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -19,6 +19,8 @@ import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.elasticsearch.xpack.core.security.user.KibanaSystemUser; import org.elasticsearch.xpack.core.security.user.UsernamesField; @@ -94,6 +96,12 @@ public class ReservedRolesStore implements BiConsumer, ActionListene .build(), "*" ) }, + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "*" } + ) + ), null ); @@ -192,6 +200,7 @@ private static Map initializeReservedRoles() { getRemoteIndicesReadPrivileges(".monitoring-*"), getRemoteIndicesReadPrivileges("/metrics-(beats|elasticsearch|enterprisesearch|kibana|logstash).*/"), getRemoteIndicesReadPrivileges("metricbeat-*") }, + null, null ) ), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java index ede11fe157487..5e3a39a6e16f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java @@ -191,4 +191,17 @@ private static String getPatternsDescription(Collection patterns) { return description; } } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StringMatcher that = (StringMatcher) o; + return Objects.equals(description, that.description) && Objects.equals(predicate, that.predicate); + } + + @Override + public int hashCode() { + return Objects.hash(description, predicate); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java index edf7156125e70..1413d7f87eaa1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java @@ -45,6 +45,7 @@ public class SystemUser extends InternalUser { null, null, null, + null, null ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java index 583b336b3f6eb..525c805f37929 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java @@ -70,6 +70,7 @@ public void testRoleDescriptorValidation() { Map.of("_key", "value"), null, null, + null, new RoleDescriptor.Restriction(unknownWorkflows) ) ), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java index eee2e6e7da338..17298c04709a4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java @@ -105,6 +105,7 @@ public void testRoleDescriptorValidation() { Map.of("_key", "value"), null, null, + null, new RoleDescriptor.Restriction(unknownWorkflows) ) ), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java index e03ec6fa083eb..b64e8cadf5203 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java @@ -14,9 +14,12 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import java.io.IOException; +import java.util.Set; +import static org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder.CCS_CLUSTER_PRIVILEGE_NAMES; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -39,7 +42,7 @@ public void testBuildForSearchOnly() throws IOException { assertRoleDescriptor( roleDescriptor, - new String[] { "cross_cluster_search" }, + new String[] { "cross_cluster_search", "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("metrics") @@ -99,7 +102,7 @@ public void testBuildForSearchAndReplication() throws IOException { assertRoleDescriptor( roleDescriptor, - new String[] { "cross_cluster_search", "cross_cluster_replication" }, + new String[] { "cross_cluster_search", "cross_cluster_replication", "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("metrics") @@ -153,6 +156,12 @@ public void testEmptyAccessIsNotAllowed() throws IOException { assertThat(e2.getMessage(), containsString("doesn't support values of type: VALUE_NULL")); } + public void testAPIKeyAllowsAllRemoteClusterPrivilegesForCCS() throws IOException { + // if users can add remote cluster permissions to a role, then the APIKey should also allow that for that permission + // the inverse however, is not guaranteed. cross_cluster_search exists largely for internal use and is not exposed to the users role + assertTrue(Set.of(CCS_CLUSTER_PRIVILEGE_NAMES).containsAll(RemoteClusterPermissions.getSupportedRemoteClusterPermissions())); + } + private static void assertRoleDescriptor( RoleDescriptor roleDescriptor, String[] clusterPrivileges, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java index 65a615d24e16e..b1b39c82cf6c1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java @@ -275,7 +275,7 @@ public void testToXContent() throws IOException { "role_descriptors": { "cross_cluster": { "cluster": [ - "cross_cluster_search", "cross_cluster_replication" + "cross_cluster_search", "monitor_enrich", "cross_cluster_replication" ], "indices": [ { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java index 7b85c71c7519f..161e9419f9561 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java @@ -62,6 +62,7 @@ public void testRoleDescriptorValidation() { Map.of("_key", "value"), null, null, + null, new RoleDescriptor.Restriction(workflows.toArray(String[]::new)) ) ), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index 8accbc1ff617e..97255502bc7be 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -10,11 +10,18 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.support.NativeRealmValidationUtil; import org.junit.BeforeClass; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; import java.util.Locale; +import java.util.Set; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -76,6 +83,30 @@ public void testValidationErrorWithUnknownIndexPrivilegeName() { assertValidationError("unknown index privilege [" + unknownIndexPrivilegeName.toLowerCase(Locale.ROOT) + "]", request); } + public void testValidationErrorWithUnknownRemoteClusterPrivilegeName() { + final PutRoleRequest request = new PutRoleRequest(); + request.name(randomAlphaOfLengthBetween(4, 9)); + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + Set validUnsupportedNames = new HashSet<>(ClusterPrivilegeResolver.names()); + validUnsupportedNames.removeAll(RemoteClusterPermissions.getSupportedRemoteClusterPermissions()); + for (int i = 0; i < randomIntBetween(1, 10); i++) { + if (randomBoolean()) { + // unknown cluster privilege + remoteClusterPermissions.addGroup( + new RemoteClusterPermissionGroup(new String[] { "_x" + randomAlphaOfLengthBetween(4, 9) }, new String[] { "valid" }) + ); + } else { + // known but unsupported cluster privilege + remoteClusterPermissions.addGroup( + new RemoteClusterPermissionGroup(validUnsupportedNames.toArray(new String[0]), new String[] { "valid" }) + ); + } + } + request.putRemoteCluster(remoteClusterPermissions); + assertValidationError("Invalid remote_cluster permissions found. Please remove the following: [", request); + assertValidationError("Only [monitor_enrich] are allowed", request); + } + public void testValidationErrorWithEmptyClustersInRemoteIndices() { final PutRoleRequest request = new PutRoleRequest(); request.name(randomAlphaOfLengthBetween(4, 9)); @@ -91,6 +122,18 @@ public void testValidationErrorWithEmptyClustersInRemoteIndices() { assertValidationError("remote index cluster alias cannot be an empty string", request); } + public void testValidationErrorWithEmptyClustersInRemoteCluster() { + final PutRoleRequest request = new PutRoleRequest(); + request.name(randomAlphaOfLengthBetween(4, 9)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "valid" }) + ).addGroup(new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "" })) + ); + assertThat(iae.getMessage(), containsString("remote_cluster clusters aliases must contain valid non-empty, non-null values")); + } + public void testValidationSuccessWithCorrectRemoteIndexPrivilegeClusters() { final PutRoleRequest request = new PutRoleRequest(); request.name(randomAlphaOfLengthBetween(4, 9)); @@ -111,6 +154,23 @@ public void testValidationSuccessWithCorrectRemoteIndexPrivilegeClusters() { assertSuccessfulValidation(request); } + public void testValidationSuccessWithCorrectRemoteClusterPrivilegeClusters() { + final PutRoleRequest request = new PutRoleRequest(); + request.name(randomAlphaOfLengthBetween(4, 9)); + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + for (int i = 0; i < randomIntBetween(1, 10); i++) { + List aliases = new ArrayList<>(); + for (int j = 0; j < randomIntBetween(1, 10); j++) { + aliases.add(randomAlphaOfLengthBetween(1, 10)); + } + remoteClusterPermissions.addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, aliases.toArray(new String[0])) + ); + } + request.putRemoteCluster(remoteClusterPermissions); + assertSuccessfulValidation(request); + } + public void testValidationSuccessWithCorrectIndexPrivilegeName() { final PutRoleRequest request = new PutRoleRequest(); request.name(randomAlphaOfLengthBetween(4, 9)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java index 1cf61fac174a5..437f58449b4de 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges.ManageApplicationPrivileges; @@ -67,8 +69,9 @@ public void testSerialization() throws IOException { public void testSerializationForCurrentVersion() throws Exception { final TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); final boolean canIncludeRemoteIndices = version.onOrAfter(TransportVersions.V_8_8_0); + final boolean canIncludeRemoteCluster = version.onOrAfter(TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS); - final GetUserPrivilegesResponse original = randomResponse(canIncludeRemoteIndices); + final GetUserPrivilegesResponse original = randomResponse(canIncludeRemoteIndices, canIncludeRemoteCluster); final BytesStreamOutput out = new BytesStreamOutput(); out.setTransportVersion(version); @@ -93,7 +96,7 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro ); out.setTransportVersion(version); - final GetUserPrivilegesResponse original = randomResponse(); + final GetUserPrivilegesResponse original = randomResponse(true, false); if (original.hasRemoteIndicesPrivileges()) { final var ex = expectThrows(IllegalArgumentException.class, () -> original.writeTo(out)); assertThat( @@ -124,7 +127,8 @@ public void testEqualsAndHashCode() throws IOException { original.getIndexPrivileges(), original.getApplicationPrivileges(), original.getRunAs(), - original.getRemoteIndexPrivileges() + original.getRemoteIndexPrivileges(), + original.getRemoteClusterPermissions() ); final EqualsHashCodeTestUtils.MutateFunction mutate = new EqualsHashCodeTestUtils.MutateFunction<>() { @Override @@ -175,7 +179,16 @@ public GetUserPrivilegesResponse mutate(GetUserPrivilegesResponse original) { randomStringSet(1) ) ); - return new GetUserPrivilegesResponse(cluster, conditionalCluster, index, application, runAs, remoteIndex); + + final RemoteClusterPermissions remoteCluster = new RemoteClusterPermissions(); + remoteCluster.addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + generateRandomStringArray(3, 5, false, false) + ) + ); + + return new GetUserPrivilegesResponse(cluster, conditionalCluster, index, application, runAs, remoteIndex, remoteCluster); } private Set maybeMutate(int random, int index, Set original, Supplier supplier) { @@ -193,10 +206,10 @@ private Set maybeMutate(int random, int index, Set original, Supplier< } private GetUserPrivilegesResponse randomResponse() { - return randomResponse(true); + return randomResponse(true, true); } - private GetUserPrivilegesResponse randomResponse(boolean allowRemoteIndices) { + private GetUserPrivilegesResponse randomResponse(boolean allowRemoteIndices, boolean allowRemoteClusters) { final Set cluster = randomStringSet(5); final Set conditionalCluster = Sets.newHashSet( randomArray(3, ConfigurableClusterPrivilege[]::new, () -> new ManageApplicationPrivileges(randomStringSet(3))) @@ -226,7 +239,16 @@ private GetUserPrivilegesResponse randomResponse(boolean allowRemoteIndices) { ) : Set.of(); - return new GetUserPrivilegesResponse(cluster, conditionalCluster, index, application, runAs, remoteIndex); + RemoteClusterPermissions remoteCluster = allowRemoteClusters ? new RemoteClusterPermissions() : RemoteClusterPermissions.NONE; + if (allowRemoteClusters) { + remoteCluster.addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + generateRandomStringArray(3, 5, false, false) + ) + ); + } + return new GetUserPrivilegesResponse(cluster, conditionalCluster, index, application, runAs, remoteIndex, remoteCluster); } private GetUserPrivilegesResponse.Indices randomIndices(boolean allowMultipleFlsDlsDefinitions) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java index 9bcf80685910b..b7495004e58e7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java @@ -313,6 +313,7 @@ public static CrossClusterAccessSubjectInfo randomCrossClusterAccessSubjectInfo( null, null, null, + null, null ) ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java index 0c13bbc1d6f79..0fb678a5ea045 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelTests.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.message.Message; import org.elasticsearch.common.logging.Loggers; @@ -72,19 +71,11 @@ public void testCheckAgainstDefinedFieldDoesNotLog() throws Exception { private void doWithLoggingExpectations(List expectations, CheckedRunnable body) throws Exception { - final Logger modelLogger = LogManager.getLogger(ExpressionModel.class); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(modelLogger, mockAppender); + try (var ignored = mockAppender.capturing(ExpressionModel.class)) { expectations.forEach(mockAppender::addExpectation); - body.run(); - mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(modelLogger, mockAppender); - mockAppender.stop(); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java index efa1dc2e29d10..a3a590dc5a4d4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java @@ -31,6 +31,8 @@ import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; @@ -153,13 +155,14 @@ public void testToString() { + ", indicesPrivileges=[IndicesPrivileges[indices=[i1,i2], allowRestrictedIndices=[false], privileges=[read]" + ", field_security=[grant=[body,title], except=null], query={\"match_all\": {}}],]" + ", applicationPrivileges=[ApplicationResourcePrivileges[application=my_app, privileges=[read,write], resources=[*]],]" - + ", runAs=[sudo], metadata=[{}], remoteIndicesPrivileges=[], restriction=Restriction[workflows=[]]]" + + ", runAs=[sudo], metadata=[{}], remoteIndicesPrivileges=[], remoteClusterPrivileges=[]" + + ", restriction=Restriction[workflows=[]]]" ) ); } public void testToXContentRoundtrip() throws Exception { - final RoleDescriptor descriptor = randomRoleDescriptor(true, true, true); + final RoleDescriptor descriptor = randomRoleDescriptor(true, true, true, true); final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference xContentValue = toShuffledXContent(descriptor, xContentType, ToXContent.EMPTY_PARAMS, false); final RoleDescriptor parsed = RoleDescriptor.parserBuilder() @@ -245,6 +248,24 @@ public void testParse() throws Exception { "clusters": ["*"] } ], + "remote_cluster": [ + { + "privileges": [ + "monitor_enrich" + ], + "clusters": [ + "one" + ] + }, + { + "privileges": [ + "monitor_enrich" + ], + "clusters": [ + "two", "three" + ] + } + ], "restriction":{ "workflows": ["search_application_query"] } @@ -254,10 +275,15 @@ public void testParse() throws Exception { assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); assertEquals(3, rd.getIndicesPrivileges().length); assertEquals(3, rd.getRemoteIndicesPrivileges().length); + assertEquals(2, rd.getRemoteClusterPermissions().groups().size()); assertArrayEquals(new String[] { "r1" }, rd.getRemoteIndicesPrivileges()[0].remoteClusters()); assertArrayEquals(new String[] { "r1", "*-*" }, rd.getRemoteIndicesPrivileges()[1].remoteClusters()); assertArrayEquals(new String[] { "*" }, rd.getRemoteIndicesPrivileges()[2].remoteClusters()); assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); + assertArrayEquals(new String[] { "one" }, rd.getRemoteClusterPermissions().groups().get(0).remoteClusterAliases()); + assertArrayEquals(new String[] { "monitor_enrich" }, rd.getRemoteClusterPermissions().groups().get(0).clusterPrivileges()); + assertArrayEquals(new String[] { "two", "three" }, rd.getRemoteClusterPermissions().groups().get(1).remoteClusterAliases()); + assertArrayEquals(new String[] { "monitor_enrich" }, rd.getRemoteClusterPermissions().groups().get(1).clusterPrivileges()); assertThat(rd.hasRestriction(), equalTo(true)); assertThat(rd.getRestriction().hasWorkflows(), equalTo(true)); assertArrayEquals(new String[] { "search_application_query" }, rd.getRestriction().getWorkflows()); @@ -453,6 +479,72 @@ public void testParse() throws Exception { ); } + public void testParseInvalidRemoteCluster() throws IOException { + // missing clusters + String q = """ + { + "remote_cluster": [ + { + "privileges": [ + "monitor_enrich" + ] + } + ] + }"""; + ElasticsearchParseException exception = expectThrows( + ElasticsearchParseException.class, + () -> RoleDescriptor.parserBuilder().build().parse("test", new BytesArray(q), XContentType.JSON) + ); + assertThat( + exception.getMessage(), + containsString("failed to parse remote_cluster for role [test]. " + "[clusters] must be defined when [privileges] are defined") + ); + + // missing privileges + String q2 = """ + { + "remote_cluster": [ + { + "clusters": [ + "two", "three" + ] + } + ] + }"""; + exception = expectThrows( + ElasticsearchParseException.class, + () -> RoleDescriptor.parserBuilder().build().parse("test", new BytesArray(q2), XContentType.JSON) + ); + assertThat( + exception.getMessage(), + containsString("failed to parse remote_cluster for role [test]. " + "[privileges] must be defined when [clusters] are defined") + ); + + // missing both does not cause an exception while parsing. However, we generally want to avoid any assumptions about the behavior + // and is allowed for legacy reasons to better match how other fields work + String q3 = """ + { + "remote_cluster": [] + }"""; + RoleDescriptor rd = RoleDescriptor.parserBuilder().build().parse("test", new BytesArray(q3), XContentType.JSON); + assertThat(rd.getRemoteClusterPermissions().groups().size(), equalTo(0)); + assertThat(rd.getRemoteClusterPermissions(), equalTo(RemoteClusterPermissions.NONE)); + if (assertsAreEnabled) { + expectThrows(AssertionError.class, () -> rd.getRemoteClusterPermissions().validate()); + } + // similarly, missing both but with a group placeholder does not cause an exception while parsing but will still raise an exception + String q4 = """ + { + "remote_cluster": [{}] + }"""; + + IllegalArgumentException illegalArgumentException = expectThrows( + IllegalArgumentException.class, + () -> RoleDescriptor.parserBuilder().build().parse("test", new BytesArray(q4), XContentType.JSON) + ); + assertThat(illegalArgumentException.getMessage(), containsString("remote cluster groups must not be null or empty")); + } + public void testParsingFieldPermissionsUsesCache() throws IOException { FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); RoleDescriptor.setFieldPermissionsCache(fieldPermissionsCache); @@ -500,12 +592,18 @@ public void testParsingFieldPermissionsUsesCache() throws IOException { public void testSerializationForCurrentVersion() throws Exception { final TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); final boolean canIncludeRemoteIndices = version.onOrAfter(TransportVersions.V_8_8_0); + final boolean canIncludeRemoteClusters = version.onOrAfter(TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS); final boolean canIncludeWorkflows = version.onOrAfter(WORKFLOWS_RESTRICTION_VERSION); logger.info("Testing serialization with version {}", version); BytesStreamOutput output = new BytesStreamOutput(); output.setTransportVersion(version); - final RoleDescriptor descriptor = randomRoleDescriptor(true, canIncludeRemoteIndices, canIncludeWorkflows); + final RoleDescriptor descriptor = randomRoleDescriptor( + true, + canIncludeRemoteIndices, + canIncludeWorkflows, + canIncludeRemoteClusters + ); descriptor.writeTo(output); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput( @@ -518,7 +616,7 @@ public void testSerializationForCurrentVersion() throws Exception { assertThat(serialized, equalTo(descriptor)); } - public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() throws IOException { + public void testSerializationWithRemoteIndicesWithElderVersion() throws IOException { final TransportVersion versionBeforeRemoteIndices = TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_8_0); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), @@ -528,7 +626,7 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro final BytesStreamOutput output = new BytesStreamOutput(); output.setTransportVersion(version); - final RoleDescriptor descriptor = randomRoleDescriptor(true, true, false); + final RoleDescriptor descriptor = randomRoleDescriptor(true, true, false, false); descriptor.writeTo(output); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput( @@ -551,12 +649,59 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro descriptor.getMetadata(), descriptor.getTransientMetadata(), null, + null, + descriptor.getRestriction() + ) + ) + ); + } else { + assertThat(descriptor, equalTo(serialized)); + } + } + + public void testSerializationWithRemoteClusterWithElderVersion() throws IOException { + final TransportVersion versionBeforeRemoteCluster = TransportVersionUtils.getPreviousVersion( + TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS + ); + final TransportVersion version = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersions.V_7_17_0, + versionBeforeRemoteCluster + ); + final BytesStreamOutput output = new BytesStreamOutput(); + output.setTransportVersion(version); + + final RoleDescriptor descriptor = randomRoleDescriptor(true, false, false, true); + descriptor.writeTo(output); + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); + StreamInput streamInput = new NamedWriteableAwareStreamInput( + ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), + registry + ); + streamInput.setTransportVersion(version); + final RoleDescriptor serialized = new RoleDescriptor(streamInput); + if (descriptor.hasRemoteClusterPermissions()) { + assertThat( + serialized, + equalTo( + new RoleDescriptor( + descriptor.getName(), + descriptor.getClusterPrivileges(), + descriptor.getIndicesPrivileges(), + descriptor.getApplicationPrivileges(), + descriptor.getConditionalClusterPrivileges(), + descriptor.getRunAs(), + descriptor.getMetadata(), + descriptor.getTransientMetadata(), + null, + descriptor.getRemoteClusterPermissions(), descriptor.getRestriction() ) ) ); } else { assertThat(descriptor, equalTo(serialized)); + assertThat(descriptor.getRemoteClusterPermissions(), equalTo(RemoteClusterPermissions.NONE)); } } @@ -570,7 +715,7 @@ public void testSerializationWithWorkflowsRestrictionAndUnsupportedVersions() th final BytesStreamOutput output = new BytesStreamOutput(); output.setTransportVersion(version); - final RoleDescriptor descriptor = randomRoleDescriptor(true, false, true); + final RoleDescriptor descriptor = randomRoleDescriptor(true, false, true, false); descriptor.writeTo(output); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput( @@ -593,6 +738,7 @@ public void testSerializationWithWorkflowsRestrictionAndUnsupportedVersions() th descriptor.getMetadata(), descriptor.getTransientMetadata(), descriptor.getRemoteIndicesPrivileges(), + descriptor.getRemoteClusterPermissions(), null ) ) @@ -1001,6 +1147,7 @@ public void testIsEmpty() { new HashMap<>(), new HashMap<>(), new RoleDescriptor.RemoteIndicesPrivileges[0], + RemoteClusterPermissions.NONE, null ).isEmpty() ); @@ -1013,6 +1160,7 @@ public void testIsEmpty() { randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean() ); @@ -1040,7 +1188,8 @@ public void testIsEmpty() { ? new RoleDescriptor.RemoteIndicesPrivileges[0] : new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("rmt").indices("idx").privileges("foo").build() }, - booleans.get(7) ? null : RoleRestrictionTests.randomWorkflowsRestriction(1, 2) + booleans.get(7) ? null : randomRemoteClusterPermissions(5), + booleans.get(8) ? null : RoleRestrictionTests.randomWorkflowsRestriction(1, 2) ); if (booleans.stream().anyMatch(e -> e.equals(false))) { @@ -1062,8 +1211,9 @@ public void testHasPrivilegesOtherThanIndex() { null, null, null, + null, null - ).hasPrivilegesOtherThanIndex(), + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), is(false) ); final RoleDescriptor roleDescriptor = randomRoleDescriptor(); @@ -1072,7 +1222,7 @@ public void testHasPrivilegesOtherThanIndex() { || roleDescriptor.hasApplicationPrivileges() || roleDescriptor.hasRunAs() || roleDescriptor.hasRemoteIndicesPrivileges(); - assertThat(roleDescriptor.hasPrivilegesOtherThanIndex(), equalTo(expected)); + assertThat(roleDescriptor.hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), equalTo(expected)); } public static List randomUniquelyNamedRoleDescriptors(int minSize, int maxSize) { @@ -1087,10 +1237,15 @@ public static RoleDescriptor randomRoleDescriptor() { } public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata) { - return randomRoleDescriptor(allowReservedMetadata, false, false); + return randomRoleDescriptor(allowReservedMetadata, false, false, false); } - public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata, boolean allowRemoteIndices, boolean allowWorkflows) { + public static RoleDescriptor randomRoleDescriptor( + boolean allowReservedMetadata, + boolean allowRemoteIndices, + boolean allowWorkflows, + boolean allowRemoteClusters + ) { final RoleDescriptor.RemoteIndicesPrivileges[] remoteIndexPrivileges; if (false == allowRemoteIndices || randomBoolean()) { remoteIndexPrivileges = null; @@ -1098,6 +1253,11 @@ public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata, remoteIndexPrivileges = randomRemoteIndicesPrivileges(0, 3); } + RemoteClusterPermissions remoteClusters = RemoteClusterPermissions.NONE; + if (allowRemoteClusters && randomBoolean()) { + randomRemoteClusterPermissions(randomIntBetween(1, 5)); + } + return new RoleDescriptor( randomAlphaOfLengthBetween(3, 90), randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), @@ -1108,6 +1268,7 @@ public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata, randomRoleDescriptorMetadata(allowReservedMetadata), Map.of(), remoteIndexPrivileges, + remoteClusters, allowWorkflows ? RoleRestrictionTests.randomWorkflowsRestriction(1, 3) : null ); } @@ -1175,6 +1336,20 @@ public static ApplicationResourcePrivileges[] randomApplicationPrivileges() { return applicationPrivileges; } + public static RemoteClusterPermissions randomRemoteClusterPermissions(int maxGroups) { + final RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + final String[] supportedPermissions = RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]); + for (int i = 0; i < maxGroups; i++) { + remoteClusterPermissions.addGroup( + new RemoteClusterPermissionGroup( + randomNonEmptySubsetOf(Arrays.asList(supportedPermissions)).toArray(new String[0]), + generateRandomStringArray(5, randomIntBetween(3, 9), false, false) + ) + ); + } + return remoteClusterPermissions; + } + public static RoleDescriptor.RemoteIndicesPrivileges[] randomRemoteIndicesPrivileges(int min, int max) { return randomRemoteIndicesPrivileges(min, max, Set.of()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index aac6e17cd6ac2..a4d5a60094645 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -8,8 +8,6 @@ package org.elasticsearch.xpack.core.security.authz.accesscontrol; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -29,7 +27,6 @@ import org.apache.lucene.util.BitSet; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.CheckedBiConsumer; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexSettings; @@ -194,11 +191,8 @@ public void testLogWarningIfBitSetExceedsCacheSize() throws Exception { assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); - final Logger cacheLogger = LogManager.getLogger(cache.getClass()); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(cacheLogger, mockAppender); + try (var ignored = mockAppender.capturing(cache.getClass())) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "[bitset too big]", @@ -222,9 +216,6 @@ public void testLogWarningIfBitSetExceedsCacheSize() throws Exception { }); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(cacheLogger, mockAppender); - mockAppender.stop(); } } @@ -238,11 +229,8 @@ public void testLogMessageIfCacheFull() throws Exception { assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); - final Logger cacheLogger = LogManager.getLogger(cache.getClass()); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(cacheLogger, mockAppender); + try (var ignored = mockAppender.capturing(cache.getClass())) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "[cache full]", @@ -264,9 +252,6 @@ public void testLogMessageIfCacheFull() throws Exception { }); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(cacheLogger, mockAppender); - mockAppender.stop(); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java index 91cf339e46018..feea49430cfc3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.authz.permission; import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.bulk.TransportBulkAction; @@ -94,7 +95,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { .toArray(String[]::new); Role baseRole = Role.builder(EMPTY_RESTRICTED_INDICES, "base-role") - .addRemoteGroup( + .addRemoteIndicesGroup( Set.of(remoteClusterAlias), baseFieldPermissions, baseQuery, @@ -102,8 +103,8 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { baseAllowRestrictedIndices, baseIndices ) - // This privilege should be ignored - .addRemoteGroup( + // This privilege should be ignored (wrong alias) + .addRemoteIndicesGroup( Set.of(randomAlphaOfLength(3)), randomFlsPermissions(), randomDlsQuery(), @@ -111,6 +112,21 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { randomBoolean(), randomAlphaOfLengthBetween(4, 6) ) + .addRemoteClusterPermissions( + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { remoteClusterAlias } + ) + ) + // this group should be ignored (wrong alias) + .addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { randomAlphaOfLength(3) } + ) + ) + ) .build(); String[] limitedGrantedFields = new String[] { randomAlphaOfLength(5) }; @@ -122,17 +138,18 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { .sorted() // sorted so we can simplify assertions .toArray(String[]::new); + Set altAliases = Set.of(remoteClusterPrefix + "-*", randomAlphaOfLength(4)); Role limitedByRole = Role.builder(EMPTY_RESTRICTED_INDICES, "limited-role") - .addRemoteGroup( - Set.of(remoteClusterPrefix + "-*", randomAlphaOfLength(4)), + .addRemoteIndicesGroup( + altAliases, limitedFieldPermissions, limitedQuery, limitedPrivilege, limitedAllowRestrictedIndices, limitedIndices ) - // This privilege should be ignored - .addRemoteGroup( + // This privilege should be ignored (wrong alias) + .addRemoteIndicesGroup( Set.of(randomAlphaOfLength(4)), randomFlsPermissions(), randomDlsQuery(), @@ -140,6 +157,21 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { randomBoolean(), randomAlphaOfLength(9) ) + .addRemoteClusterPermissions( + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + altAliases.toArray(new String[0]) + ) + ) + // this group should be ignored (wrong alias) + .addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { randomAlphaOfLength(4) } + ) + ) + ) .build(); Role role = baseRole.limitedBy(limitedByRole); @@ -148,7 +180,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), new IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .privileges(basePrivilege.name()) @@ -167,7 +199,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), new IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .privileges(limitedPrivilege.name()) @@ -187,11 +219,11 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { ); // for the existing remote cluster alias, check that the result is equal to the expected intersection - assertThat(role.getRoleDescriptorsIntersectionForRemoteCluster(remoteClusterAlias), equalTo(expected)); + assertThat(role.getRoleDescriptorsIntersectionForRemoteCluster(remoteClusterAlias, TransportVersion.current()), equalTo(expected)); // and for a random cluster alias, check that it returns empty intersection assertThat( - role.getRoleDescriptorsIntersectionForRemoteCluster(randomAlphaOfLengthBetween(5, 7)), + role.getRoleDescriptorsIntersectionForRemoteCluster(randomAlphaOfLengthBetween(5, 7), TransportVersion.current()), equalTo(RoleDescriptorsIntersection.EMPTY) ); } @@ -216,35 +248,50 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterReturnsEmpty() { Role.Builder limitedByRole1 = Role.builder(EMPTY_RESTRICTED_INDICES, "limited-role-1"); Role.Builder limitedByRole2 = Role.builder(EMPTY_RESTRICTED_INDICES, "limited-role-2"); - // randomly include remote indices privileges in one of the role for the remoteClusterAlias - boolean includeRemoteIndicesPermission = randomBoolean(); - if (includeRemoteIndicesPermission) { + // randomly include remote privileges in one of the role for the remoteClusterAlias + boolean includeRemotePermission = randomBoolean(); + if (includeRemotePermission) { + RemoteClusterPermissions remoteCluster = new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { remoteClusterAlias } + ) + ); String roleToAddRemoteGroup = randomFrom("b", "l1", "l2"); switch (roleToAddRemoteGroup) { - case "b" -> baseRole.addRemoteGroup( - Set.of(remoteClusterAlias), - randomFlsPermissions(randomAlphaOfLength(3)), - randomDlsQuery(), - randomIndexPrivilege(), - randomBoolean(), - randomAlphaOfLength(3) - ); - case "l1" -> limitedByRole1.addRemoteGroup( - Set.of(remoteClusterAlias), - randomFlsPermissions(randomAlphaOfLength(4)), - randomDlsQuery(), - randomIndexPrivilege(), - randomBoolean(), - randomAlphaOfLength(4) - ); - case "l2" -> limitedByRole2.addRemoteGroup( - Set.of(remoteClusterAlias), - randomFlsPermissions(randomAlphaOfLength(5)), - randomDlsQuery(), - randomIndexPrivilege(), - randomBoolean(), - randomAlphaOfLength(5) - ); + case "b" -> { + baseRole.addRemoteIndicesGroup( + Set.of(remoteClusterAlias), + randomFlsPermissions(randomAlphaOfLength(3)), + randomDlsQuery(), + randomIndexPrivilege(), + randomBoolean(), + randomAlphaOfLength(3) + ); + baseRole.addRemoteClusterPermissions(remoteCluster); + } + case "l1" -> { + limitedByRole1.addRemoteIndicesGroup( + Set.of(remoteClusterAlias), + randomFlsPermissions(randomAlphaOfLength(4)), + randomDlsQuery(), + randomIndexPrivilege(), + randomBoolean(), + randomAlphaOfLength(4) + ); + limitedByRole1.addRemoteClusterPermissions(remoteCluster); + } + case "l2" -> { + limitedByRole2.addRemoteIndicesGroup( + Set.of(remoteClusterAlias), + randomFlsPermissions(randomAlphaOfLength(5)), + randomDlsQuery(), + randomIndexPrivilege(), + randomBoolean(), + randomAlphaOfLength(5) + ); + limitedByRole2.addRemoteClusterPermissions(remoteCluster); + } default -> throw new IllegalStateException("unexpected case"); } } @@ -253,7 +300,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterReturnsEmpty() { // Note: defining a remote indices privileges for a remote cluster that we do not request intersection for, should be ignored if (randomBoolean()) { String otherRemoteClusterAlias = randomValueOtherThan(remoteClusterAlias, () -> randomAlphaOfLengthBetween(4, 6)); - baseRole.addRemoteGroup( + baseRole.addRemoteIndicesGroup( Set.of(otherRemoteClusterAlias), randomFlsPermissions(randomAlphaOfLength(3)), randomDlsQuery(), @@ -261,7 +308,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterReturnsEmpty() { randomBoolean(), randomAlphaOfLength(5) ); - limitedByRole1.addRemoteGroup( + limitedByRole1.addRemoteIndicesGroup( Set.of(otherRemoteClusterAlias), randomFlsPermissions(randomAlphaOfLength(4)), randomDlsQuery(), @@ -269,7 +316,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterReturnsEmpty() { randomBoolean(), randomAlphaOfLength(4) ); - limitedByRole2.addRemoteGroup( + limitedByRole2.addRemoteIndicesGroup( Set.of(otherRemoteClusterAlias), randomFlsPermissions(randomAlphaOfLength(5)), randomDlsQuery(), @@ -280,7 +327,12 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterReturnsEmpty() { } Role role = baseRole.build().limitedBy(limitedByRole1.build().limitedBy(limitedByRole2.build())); - assertThat(role.getRoleDescriptorsIntersectionForRemoteCluster(remoteClusterAlias).roleDescriptorsList().isEmpty(), equalTo(true)); + assertThat( + role.getRoleDescriptorsIntersectionForRemoteCluster(remoteClusterAlias, TransportVersion.current()) + .roleDescriptorsList() + .isEmpty(), + equalTo(true) + ); } public void testAuthorize() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroupTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroupTests.java new file mode 100644 index 0000000000000..cd269bd1a97b3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroupTests.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; + +import static org.hamcrest.Matchers.containsString; + +public class RemoteClusterPermissionGroupTests extends AbstractXContentSerializingTestCase { + + public void testToXContent() throws IOException { + String[] privileges = generateRandomStringArray(5, 5, false, false); + String[] clusters = generateRandomStringArray(5, 5, false, false); + RemoteClusterPermissionGroup remoteClusterPermissionGroup = new RemoteClusterPermissionGroup(privileges, clusters); + String output = Strings.toString(remoteClusterPermissionGroup); + assertEquals( + XContentHelper.stripWhitespace(String.format(Locale.ROOT, """ + { + "privileges" : [ + "%s" + ], + "clusters" : [ + "%s" + ] + } + """, String.join("\",\"", Arrays.asList(privileges)), String.join("\",\"", Arrays.asList(clusters)))), + XContentHelper.stripWhitespace(output) + ); + } + + public void testToString() throws IOException { + String[] privileges = generateRandomStringArray(5, 5, false, false); + String[] clusters = generateRandomStringArray(5, 5, false, false); + RemoteClusterPermissionGroup remoteClusterPermissionGroup = new RemoteClusterPermissionGroup(privileges, clusters); + assertThat( + remoteClusterPermissionGroup.toString(), + containsString("privileges=[" + String.join(", ", Arrays.asList(privileges)) + "]") + ); + assertThat( + remoteClusterPermissionGroup.toString(), + containsString("clusters=[" + String.join(", ", Arrays.asList(clusters)) + "]") + ); + } + + public void testMatcher() { + String[] privileges = generateRandomStringArray(5, 5, false, false); + String[] clusters = generateRandomStringArray(5, 5, false, false); + for (int i = 0; i < clusters.length; i++) { + if (randomBoolean()) { + clusters[i] = clusters[i].substring(0, clusters[i].length() - 1) + "*"; + } + } + RemoteClusterPermissionGroup remoteClusterPermissionGroup = new RemoteClusterPermissionGroup(privileges, clusters); + for (String cluster : clusters) { + assertTrue(remoteClusterPermissionGroup.hasPrivileges(cluster)); + assertFalse(remoteClusterPermissionGroup.hasPrivileges(randomAlphaOfLength(20))); + } + } + + public void testNullAndEmptyArgs() { + final ThrowingRunnable nullGroup = randomFrom( + () -> new RemoteClusterPermissionGroup(null, null), + () -> new RemoteClusterPermissionGroup(new String[] {}, new String[] {}), + () -> new RemoteClusterPermissionGroup(null, new String[] {}), + () -> new RemoteClusterPermissionGroup(new String[] {}, null) + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, nullGroup); + assertEquals("remote cluster groups must not be null or empty", e.getMessage()); + } + + public void testInvalidValues() { + final ThrowingRunnable invalidClusterAlias = randomFrom( + () -> new RemoteClusterPermissionGroup(new String[] { "foo" }, new String[] { null }), + () -> new RemoteClusterPermissionGroup(new String[] { "foo" }, new String[] { "bar", null }), + () -> new RemoteClusterPermissionGroup(new String[] { "foo" }, new String[] { "bar", "" }), + () -> new RemoteClusterPermissionGroup(new String[] { "foo" }, new String[] { "bar", " " }) + ); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, invalidClusterAlias); + assertEquals("remote_cluster clusters aliases must contain valid non-empty, non-null values", e.getMessage()); + + final ThrowingRunnable invalidPermission = randomFrom( + () -> new RemoteClusterPermissionGroup(new String[] { null }, new String[] { "bar" }), + () -> new RemoteClusterPermissionGroup(new String[] { "foo", null }, new String[] { "bar" }), + () -> new RemoteClusterPermissionGroup(new String[] { "foo", "" }, new String[] { "bar" }), + () -> new RemoteClusterPermissionGroup(new String[] { "foo", " " }, new String[] { "bar" }) + ); + + IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, invalidPermission); + assertEquals("remote_cluster privileges must contain valid non-empty, non-null values", e2.getMessage()); + } + + @Override + protected Writeable.Reader instanceReader() { + return RemoteClusterPermissionGroup::new; + } + + @Override + protected RemoteClusterPermissionGroup createTestInstance() { + return new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" }); + } + + @Override + protected RemoteClusterPermissionGroup mutateInstance(RemoteClusterPermissionGroup instance) throws IOException { + if (randomBoolean()) { + return new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "foo", "bar" }); + } else { + return new RemoteClusterPermissionGroup(new String[] { "foobar" }, new String[] { "*" }); + } + } + + @Override + protected RemoteClusterPermissionGroup doParseInstance(XContentParser parser) throws IOException { + // fromXContent/parsing isn't supported since we still do old school manual parsing of the role descriptor + return createTestInstance(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java new file mode 100644 index 0000000000000..394455879bbdf --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.xcontent.XContentParser; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteClusterPermissionsTests extends AbstractXContentSerializingTestCase { + + List groupPrivileges; + List groupClusters; + RemoteClusterPermissions remoteClusterPermission; + + @Before + void clean() { + groupPrivileges = new ArrayList<>(); + groupClusters = new ArrayList<>(); + remoteClusterPermission = new RemoteClusterPermissions(); + } + + public void testToXContent() throws IOException { + List groups = generateRandomGroups(false); + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < groups.size(); i++) { + String[] privileges = groupPrivileges.get(i); + String[] clusters = groupClusters.get(i); + sb.append(XContentHelper.stripWhitespace(String.format(Locale.ROOT, """ + { + "privileges" : [ + "%s" + ], + "clusters" : [ + "%s" + ] + } + """, String.join("\",\"", Arrays.asList(privileges)), String.join("\",\"", Arrays.asList(clusters))))); + if (i < groups.size() - 1) { + sb.append(","); + } + } + String output = Strings.toString(remoteClusterPermission); + assertEquals(XContentHelper.stripWhitespace(sb.toString()), XContentHelper.stripWhitespace(output)); + } + + public void testToString() throws IOException { + for (int i = 0; i < generateRandomGroups(false).size(); i++) { + String[] privileges = groupPrivileges.get(i); + String[] clusters = groupClusters.get(i); + assertThat( + remoteClusterPermission.toString(), + containsString("privileges=[" + String.join(", ", Arrays.asList(privileges)) + "]") + ); + assertThat(remoteClusterPermission.toString(), containsString("clusters=[" + String.join(", ", Arrays.asList(clusters)) + "]")); + } + } + + public void testMatcher() { + for (int i = 0; i < generateRandomGroups(true).size(); i++) { + String[] clusters = groupClusters.get(i); + for (String cluster : clusters) { + assertTrue(remoteClusterPermission.hasPrivileges(cluster)); + assertFalse(remoteClusterPermission.hasPrivileges(randomAlphaOfLength(20))); + } + } + } + + public void testPrivilegeNames() { + Map> original = RemoteClusterPermissions.allowedRemoteClusterPermissions; + try { + // create random groups with random privileges for random clusters + List randomGroups = generateRandomGroups(true); + RemoteClusterPermissions.allowedRemoteClusterPermissions = new HashMap<>(); + Set allPrivileges = new HashSet<>(); + // allow all the privileges across the random groups for the current version + for (int i = 0; i < randomGroups.size(); i++) { + allPrivileges.addAll(Set.of(groupPrivileges.get(i))); + } + RemoteClusterPermissions.allowedRemoteClusterPermissions.put(TransportVersion.current(), allPrivileges); + + for (int i = 0; i < randomGroups.size(); i++) { + String[] privileges = groupPrivileges.get(i); + String[] clusters = groupClusters.get(i); + for (String cluster : clusters) { + String[] found = remoteClusterPermission.privilegeNames(cluster, TransportVersion.current()); + Arrays.sort(found); + // ensure all lowercase since the privilege names are case insensitive and the method will result in lowercase + for (int j = 0; j < privileges.length; j++) { + privileges[j] = privileges[j].toLowerCase(Locale.ROOT); + } + Arrays.sort(privileges); + // the two array are always equal since the all the random values are allowed + assertArrayEquals(privileges, found); + } + } + } finally { + RemoteClusterPermissions.allowedRemoteClusterPermissions = original; + } + + // create random groups with random privileges for random clusters + List randomGroups = generateRandomGroups(true); + // replace a random value with one that is allowed + groupPrivileges.get(0)[0] = "monitor_enrich"; + + for (int i = 0; i < randomGroups.size(); i++) { + String[] privileges = groupPrivileges.get(i); + String[] clusters = groupClusters.get(i); + for (String cluster : clusters) { + String[] found = remoteClusterPermission.privilegeNames(cluster, TransportVersion.current()); + Arrays.sort(found); + // ensure all lowercase since the privilege names are case insensitive and the method will result in lowercase + for (int j = 0; j < privileges.length; j++) { + privileges[j] = privileges[j].toLowerCase(Locale.ROOT); + } + Arrays.sort(privileges); + + // the results are conditional. the first group has a value that is allowed for the current version + if (i == 0 && privileges.length == 1) { + // special case where there was only 1 random value and it was replaced with a value that is allowed + assertArrayEquals(privileges, found); + } else { + // none of the random privileges are allowed for the current version + assertFalse(Arrays.equals(privileges, found)); + if (i == 0) { + // ensure that for the current version we only find the valid "monitor_enrich" + assertThat(Set.of(found), equalTo(Set.of("monitor_enrich"))); + } else { + // all other groups should be found to not have any privileges + assertTrue(found.length == 0); + } + } + } + } + } + + public void testMonitorEnrichPerVersion() { + // test monitor_enrich before, after and on monitor enrich version + String[] privileges = randomBoolean() ? new String[] { "monitor_enrich" } : new String[] { "monitor_enrich", "foo", "bar" }; + String[] before = new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(privileges, new String[] { "*" })) + .privilegeNames("*", TransportVersionUtils.getPreviousVersion(TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS)); + // empty set since monitor_enrich is not allowed in the before version + assertThat(Set.of(before), equalTo(Collections.emptySet())); + String[] on = new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(privileges, new String[] { "*" })) + .privilegeNames("*", TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS); + // only monitor_enrich since the other values are not allowed + assertThat(Set.of(on), equalTo(Set.of("monitor_enrich"))); + String[] after = new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(privileges, new String[] { "*" })) + .privilegeNames("*", TransportVersion.current()); + // only monitor_enrich since the other values are not allowed + assertThat(Set.of(after), equalTo(Set.of("monitor_enrich"))); + } + + public void testValidate() { + generateRandomGroups(randomBoolean()); + // random values not allowed + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> remoteClusterPermission.validate()); + assertTrue(error.getMessage().contains("Invalid remote_cluster permissions found. Please remove the following:")); + assertTrue(error.getMessage().contains("Only [monitor_enrich] are allowed")); + + new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" })) + .validate(); // no error + } + + private List generateRandomGroups(boolean fuzzyCluster) { + clean(); + List groups = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 5); i++) { + String[] privileges = generateRandomStringArray(5, 5, false, false); + groupPrivileges.add(privileges); + String[] clusters = generateRandomStringArray(5, 5, false, false); + if (fuzzyCluster) { + for (int j = 0; j < clusters.length; j++) { + if (randomBoolean()) { + clusters[j] = clusters[j].substring(0, clusters[j].length() - 1) + "*"; + } + } + } + groupClusters.add(clusters); + RemoteClusterPermissionGroup group = new RemoteClusterPermissionGroup(privileges, clusters); + groups.add(group); + remoteClusterPermission.addGroup(group); + } + return groups; + } + + @Override + protected Writeable.Reader instanceReader() { + return RemoteClusterPermissions::new; + } + + @Override + protected RemoteClusterPermissions createTestInstance() { + return new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" }) + ); + } + + @Override + protected RemoteClusterPermissions mutateInstance(RemoteClusterPermissions instance) throws IOException { + return new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" }) + ).addGroup(new RemoteClusterPermissionGroup(new String[] { "foobar" }, new String[] { "*" })); + } + + @Override + protected RemoteClusterPermissions doParseInstance(XContentParser parser) throws IOException { + // fromXContent/parsing isn't supported since we still do old school manual parsing of the role descriptor + return createTestInstance(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + RemoteClusterPermissionGroup.class, + RemoteClusterPermissionGroup.NAME, + RemoteClusterPermissionGroup::new + ) + ) + ); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRoleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRoleTests.java index ad11cab19133e..0c15256d1951e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRoleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRoleTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.authz.permission; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; @@ -150,7 +151,7 @@ public void testBuildFromRoleDescriptorWithApplicationPrivileges() { public void testGetRoleDescriptorsIntersectionForRemoteCluster() { SimpleRole role = Role.builder(RESTRICTED_INDICES, randomAlphaOfLength(6)) - .addRemoteGroup( + .addRemoteIndicesGroup( Set.of("remote-cluster-a"), FieldPermissions.DEFAULT, null, @@ -159,9 +160,9 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { "remote-index-a-1", "remote-index-a-2" ) - .addRemoteGroup(Set.of("remote-*-a"), FieldPermissions.DEFAULT, null, IndexPrivilege.READ, false, "remote-index-a-3") - // This privilege should be ignored - .addRemoteGroup( + .addRemoteIndicesGroup(Set.of("remote-*-a"), FieldPermissions.DEFAULT, null, IndexPrivilege.READ, false, "remote-index-a-3") + // This privilege should be ignored (wrong alias) + .addRemoteIndicesGroup( Set.of("remote-cluster-b"), FieldPermissions.DEFAULT, null, @@ -170,8 +171,8 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { "remote-index-b-1", "remote-index-b-2" ) - // This privilege should be ignored - .addRemoteGroup( + // This privilege should be ignored (wrong alias) + .addRemoteIndicesGroup( Set.of(randomAlphaOfLength(8)), new FieldPermissions(new FieldPermissionsDefinition(new String[] { randomAlphaOfLength(5) }, null)), null, @@ -179,9 +180,27 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { randomBoolean(), randomAlphaOfLength(9) ) + .addRemoteClusterPermissions( + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "remote-cluster-a" } + ) + ) + // this group should be ignored (wrong alias) + .addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { randomAlphaOfLength(3) } + ) + ) + ) .build(); - RoleDescriptorsIntersection intersection = role.getRoleDescriptorsIntersectionForRemoteCluster("remote-cluster-a"); + RoleDescriptorsIntersection intersection = role.getRoleDescriptorsIntersectionForRemoteCluster( + "remote-cluster-a", + TransportVersion.current() + ); assertThat(intersection.roleDescriptorsList().isEmpty(), equalTo(false)); assertThat( @@ -190,7 +209,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { new RoleDescriptorsIntersection( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .privileges(IndexPrivilege.READ.name()) @@ -215,7 +234,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() { // Requesting role descriptors intersection for a cluster alias // that has no cross cluster access defined should result in an empty intersection. assertThat( - role.getRoleDescriptorsIntersectionForRemoteCluster("non-existing-cluster-alias"), + role.getRoleDescriptorsIntersectionForRemoteCluster("non-existing-cluster-alias", TransportVersion.current()), equalTo(RoleDescriptorsIntersection.EMPTY) ); } @@ -238,7 +257,10 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterWithoutRemoteIndic RESTRICTED_INDICES ); - assertThat(role.getRoleDescriptorsIntersectionForRemoteCluster(randomAlphaOfLength(8)), equalTo(RoleDescriptorsIntersection.EMPTY)); + assertThat( + role.getRoleDescriptorsIntersectionForRemoteCluster(randomAlphaOfLength(8), TransportVersion.current()), + equalTo(RoleDescriptorsIntersection.EMPTY) + ); } public void testForWorkflowWithRestriction() { @@ -253,6 +275,7 @@ public void testForWorkflowWithRestriction() { null, null, null, + null, new RoleDescriptor.Restriction(new String[] { WorkflowResolver.SEARCH_APPLICATION_QUERY_WORKFLOW.name() }) ), new FieldPermissionsCache(Settings.EMPTY), @@ -267,7 +290,7 @@ public void testForWorkflowWithRestriction() { public void testForWorkflowWithoutRestriction() { final SimpleRole role = Role.buildFromRoleDescriptor( - new RoleDescriptor("r1", null, null, null, null, null, null, null, null, null), + new RoleDescriptor("r1", null, null, null, null, null, null, null, null, null, null), new FieldPermissionsCache(Settings.EMPTY), RESTRICTED_INDICES, List.of() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 584f0a2d95fca..7d376fb8bf0be 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.security.authz.store; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; @@ -180,6 +181,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.permission.RemoteIndicesPermission; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; @@ -2700,6 +2702,10 @@ public void testSuperuserRole() { .test(mockIndexAbstraction(internalSecurityIndex)), is(false) ); + assertThat( + superuserRole.remoteCluster().privilegeNames("*", TransportVersion.current()), + equalTo(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0])) + ); } public void testLogstashSystemRole() { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java index 4c413ad54f2f6..7fcb412de5e2b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java @@ -22,6 +22,7 @@ import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.OrdinalBytesRefBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupe; import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeBytesRef; @@ -87,6 +88,10 @@ IntVector add(BytesRefVector vector) { } IntBlock add(BytesRefBlock block) { + var ordinals = block.asOrdinals(); + if (ordinals != null) { + return addOrdinalsBlock(ordinals); + } MultivalueDedupe.HashResult result = new MultivalueDedupeBytesRef(block).hashAdd(blockFactory, hash); seenNull |= result.sawNull(); return result.ords(); @@ -108,6 +113,38 @@ public ReleasableIterator lookup(Page page, ByteSizeValue targetBlockS return ReleasableIterator.single(lookup(vector)); } + private IntBlock addOrdinalsBlock(OrdinalBytesRefBlock inputBlock) { + var inputOrds = inputBlock.getOrdinalsBlock(); + try ( + var builder = blockFactory.newIntBlockBuilder(inputOrds.getPositionCount()); + var hashOrds = add(inputBlock.getDictionaryVector()) + ) { + for (int i = 0; i < inputOrds.getPositionCount(); i++) { + int valueCount = inputOrds.getValueCount(i); + int firstIndex = inputOrds.getFirstValueIndex(i); + switch (valueCount) { + case 0 -> { + builder.appendInt(0); + seenNull = true; + } + case 1 -> { + int ord = hashOrds.getInt(inputOrds.getInt(firstIndex)); + builder.appendInt(ord); + } + default -> { + builder.beginPositionEntry(); + for (int v = 0; v < valueCount; v++) { + int ord = hashOrds.getInt(inputOrds.getInt(firstIndex + i)); + builder.appendInt(ord); + } + builder.endPositionEntry(); + } + } + } + return builder.build(); + } + } + private IntBlock lookup(BytesRefVector vector) { BytesRef scratch = new BytesRef(); int positions = vector.getPositionCount(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index c33bd12b74bbd..1372f7b94b78c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -88,6 +88,11 @@ public BytesRefVector asVector() { return null; } + @Override + public OrdinalBytesRefBlock asOrdinals() { + return null; + } + @Override public BytesRef getBytesRef(int valueIndex, BytesRef dest) { return vector.getBytesRef(valueIndex, dest); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index d0b600d0f0be2..81f507a4fa55a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -58,6 +58,11 @@ public BytesRefBlock asBlock() { return new BytesRefVectorBlock(this); } + @Override + public OrdinalBytesRefVector asOrdinals() { + return null; + } + @Override public BytesRef getBytesRef(int position, BytesRef dest) { return values.get(position, dest); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index d3afcfd6dde4d..d7c28a24482e0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -41,6 +41,12 @@ public sealed interface BytesRefBlock extends Block permits BytesRefArrayBlock, @Override BytesRefVector asVector(); + /** + * Returns an ordinal bytesref block if this block is backed by a dictionary and ordinals; otherwise, + * returns null. Callers must not release the returned block as no extra reference is retained by this method. + */ + OrdinalBytesRefBlock asOrdinals(); + @Override BytesRefBlock filter(int... positions); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java index 11daa4a4f768d..4f07ca2d61049 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java @@ -25,6 +25,12 @@ public sealed interface BytesRefVector extends Vector permits ConstantBytesRefVe @Override BytesRefBlock asBlock(); + /** + * Returns an ordinal BytesRef vector if this vector is backed by a dictionary and ordinals; otherwise, + * returns null. Callers must not release the returned vector as no extra reference is retained by this method. + */ + OrdinalBytesRefVector asOrdinals(); + @Override BytesRefVector filter(int... positions); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 9838fde8a0ffe..39bd37ea9bc34 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -32,6 +32,16 @@ public BytesRefVector asVector() { return vector; } + @Override + public OrdinalBytesRefBlock asOrdinals() { + var ordinals = vector.asOrdinals(); + if (ordinals != null) { + return ordinals.asBlock(); + } else { + return null; + } + } + @Override public BytesRef getBytesRef(int valueIndex, BytesRef dest) { return vector.getBytesRef(valueIndex, dest); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java index 57ec1c945ade5..eed780a42f7ba 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java @@ -35,6 +35,11 @@ public BytesRefBlock asBlock() { return new BytesRefVectorBlock(this); } + @Override + public OrdinalBytesRefVector asOrdinals() { + return null; + } + @Override public BytesRefVector filter(int... positions) { return blockFactory().newConstantBytesRefVector(value, positions.length); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java index 431d8fe3bcd5d..abd11f98e7376 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java @@ -35,7 +35,7 @@ * @see BytesRefHash */ public abstract sealed class BlockHash implements Releasable, SeenGroupIds // - permits BooleanBlockHash, BytesRefBlockHash, DoubleBlockHash, IntBlockHash, LongBlockHash, // + permits BooleanBlockHash, BytesRefBlockHash, DoubleBlockHash, IntBlockHash, LongBlockHash, BytesRef3BlockHash, // NullBlockHash, PackedValuesBlockHash, BytesRefLongBlockHash, LongLongBlockHash, TimeSeriesBlockHash { protected final BlockFactory blockFactory; @@ -95,6 +95,9 @@ public static BlockHash build(List groups, BlockFactory blockFactory, if (groups.size() == 1) { return newForElementType(groups.get(0).channel(), groups.get(0).elementType(), blockFactory); } + if (groups.size() == 3 && groups.stream().allMatch(g -> g.elementType == ElementType.BYTES_REF)) { + return new BytesRef3BlockHash(blockFactory, groups.get(0).channel, groups.get(1).channel, groups.get(2).channel, emitBatchSize); + } if (allowBrokenOptimizations && groups.size() == 2) { var g1 = groups.get(0); var g2 = groups.get(1); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRef3BlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRef3BlockHash.java new file mode 100644 index 0000000000000..ce11d1bb64146 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRef3BlockHash.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.Int3Hash; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +import java.util.Locale; + +/** + * Maps three {@link BytesRefBlock}s to group ids. + */ +final class BytesRef3BlockHash extends BlockHash { + private final int emitBatchSize; + private final int channel1; + private final int channel2; + private final int channel3; + private final BytesRefBlockHash hash1; + private final BytesRefBlockHash hash2; + private final BytesRefBlockHash hash3; + private final Int3Hash finalHash; + + BytesRef3BlockHash(BlockFactory blockFactory, int channel1, int channel2, int channel3, int emitBatchSize) { + super(blockFactory); + this.emitBatchSize = emitBatchSize; + this.channel1 = channel1; + this.channel2 = channel2; + this.channel3 = channel3; + boolean success = false; + try { + this.hash1 = new BytesRefBlockHash(channel1, blockFactory); + this.hash2 = new BytesRefBlockHash(channel2, blockFactory); + this.hash3 = new BytesRefBlockHash(channel3, blockFactory); + this.finalHash = new Int3Hash(1, blockFactory.bigArrays()); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + @Override + public void close() { + Releasables.close(hash1, hash2, hash3, finalHash); + } + + @Override + public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { + BytesRefBlock b1 = page.getBlock(channel1); + BytesRefBlock b2 = page.getBlock(channel2); + BytesRefBlock b3 = page.getBlock(channel3); + BytesRefVector v1 = b1.asVector(); + BytesRefVector v2 = b2.asVector(); + BytesRefVector v3 = b3.asVector(); + if (v1 != null && v2 != null && v3 != null) { + addVectors(v1, v2, v3, addInput); + } else { + try (IntBlock k1 = hash1.add(b1); IntBlock k2 = hash2.add(b2); IntBlock k3 = hash3.add(b3)) { + try (AddWork work = new AddWork(k1, k2, k3, addInput)) { + work.add(); + } + } + } + } + + private void addVectors(BytesRefVector v1, BytesRefVector v2, BytesRefVector v3, GroupingAggregatorFunction.AddInput addInput) { + final int positionCount = v1.getPositionCount(); + try (IntVector.Builder ordsBuilder = blockFactory.newIntVectorFixedBuilder(positionCount)) { + // TODO: enable ordinal vectors in BytesRefBlockHash + try (IntVector k1 = hash1.add(v1); IntVector k2 = hash2.add(v2); IntVector k3 = hash3.add(v3)) { + for (int p = 0; p < positionCount; p++) { + long ord = hashOrdToGroup(finalHash.add(k1.getInt(p), k2.getInt(p), k3.getInt(p))); + ordsBuilder.appendInt(Math.toIntExact(ord)); + } + } + try (IntVector ords = ordsBuilder.build()) { + addInput.add(0, ords); + } + } + } + + private class AddWork extends AbstractAddBlock { + final IntBlock b1; + final IntBlock b2; + final IntBlock b3; + + AddWork(IntBlock b1, IntBlock b2, IntBlock b3, GroupingAggregatorFunction.AddInput addInput) { + super(blockFactory, emitBatchSize, addInput); + this.b1 = b1; + this.b2 = b2; + this.b3 = b3; + } + + void add() { + int positionCount = b1.getPositionCount(); + for (int i = 0; i < positionCount; i++) { + int v1 = b1.getValueCount(i); + int v2 = b2.getValueCount(i); + int v3 = b3.getValueCount(i); + int first1 = b1.getFirstValueIndex(i); + int first2 = b2.getFirstValueIndex(i); + int first3 = b3.getFirstValueIndex(i); + if (v1 == 1 && v2 == 1 && v3 == 1) { + long ord = hashOrdToGroup(finalHash.add(b1.getInt(first1), b2.getInt(first2), b3.getInt(first3))); + ords.appendInt(Math.toIntExact(ord)); + addedValue(i); + continue; + } + ords.beginPositionEntry(); + for (int i1 = 0; i1 < v1; i1++) { + int k1 = b1.getInt(first1 + i1); + for (int i2 = 0; i2 < v2; i2++) { + int k2 = b2.getInt(first2 + i2); + for (int i3 = 0; i3 < v3; i3++) { + int k3 = b3.getInt(first3 + i3); + long ord = hashOrdToGroup(finalHash.add(k1, k2, k3)); + ords.appendInt(Math.toIntExact(ord)); + addedValueInMultivaluePosition(i); + } + } + } + ords.endPositionEntry(); + } + emitOrds(); + } + } + + @Override + public ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize) { + throw new UnsupportedOperationException("TODO"); + } + + @Override + public Block[] getKeys() { + final int positions = (int) finalHash.size(); + final BytesRef scratch = new BytesRef(); + final BytesRefBlock[] outputBlocks = new BytesRefBlock[3]; + try { + try (BytesRefBlock.Builder b1 = blockFactory.newBytesRefBlockBuilder(positions)) { + for (int i = 0; i < positions; i++) { + int k1 = finalHash.getKey1(i); + if (k1 == 0) { + b1.appendNull(); + } else { + b1.appendBytesRef(hash1.hash.get(k1 - 1, scratch)); + } + } + outputBlocks[0] = b1.build(); + } + try (BytesRefBlock.Builder b2 = blockFactory.newBytesRefBlockBuilder(positions)) { + for (int i = 0; i < positions; i++) { + int k2 = finalHash.getKey2(i); + if (k2 == 0) { + b2.appendNull(); + } else { + b2.appendBytesRef(hash2.hash.get(k2 - 1, scratch)); + } + } + outputBlocks[1] = b2.build(); + } + try (BytesRefBlock.Builder b3 = blockFactory.newBytesRefBlockBuilder(positions)) { + for (int i = 0; i < positions; i++) { + int k3 = finalHash.getKey3(i); + if (k3 == 0) { + b3.appendNull(); + } else { + b3.appendBytesRef(hash3.hash.get(k3 - 1, scratch)); + } + } + outputBlocks[2] = b3.build(); + } + return outputBlocks; + } finally { + if (outputBlocks[outputBlocks.length - 1] == null) { + Releasables.close(outputBlocks); + } + } + } + + @Override + public BitArray seenGroupIds(BigArrays bigArrays) { + return new Range(0, Math.toIntExact(finalHash.size())).seenGroupIds(bigArrays); + } + + @Override + public IntVector nonEmpty() { + return IntVector.range(0, Math.toIntExact(finalHash.size()), blockFactory); + } + + @Override + public String toString() { + return String.format( + Locale.ROOT, + "BytesRef3BlockHash{keys=[channel1=%d, channel2=%d, channel3=%d], entries=%d}", + channel1, + channel2, + channel3, + finalHash.size() + ); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st index 1e4c5af134aa3..b4f700980558e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st @@ -27,6 +27,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.OrdinalBytesRefBlock; $elseif(double)$ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -125,6 +126,12 @@ $endif$ } IntBlock add($Type$Block block) { +$if(BytesRef)$ + var ordinals = block.asOrdinals(); + if (ordinals != null) { + return addOrdinalsBlock(ordinals); + } +$endif$ MultivalueDedupe.HashResult result = new MultivalueDedupe$Type$(block).hashAdd(blockFactory, hash); seenNull |= result.sawNull(); return result.ords(); @@ -146,6 +153,40 @@ $endif$ return ReleasableIterator.single(lookup(vector)); } +$if(BytesRef)$ + private IntBlock addOrdinalsBlock(OrdinalBytesRefBlock inputBlock) { + var inputOrds = inputBlock.getOrdinalsBlock(); + try ( + var builder = blockFactory.newIntBlockBuilder(inputOrds.getPositionCount()); + var hashOrds = add(inputBlock.getDictionaryVector()) + ) { + for (int i = 0; i < inputOrds.getPositionCount(); i++) { + int valueCount = inputOrds.getValueCount(i); + int firstIndex = inputOrds.getFirstValueIndex(i); + switch (valueCount) { + case 0 -> { + builder.appendInt(0); + seenNull = true; + } + case 1 -> { + int ord = hashOrds.getInt(inputOrds.getInt(firstIndex)); + builder.appendInt(ord); + } + default -> { + builder.beginPositionEntry(); + for (int v = 0; v < valueCount; v++) { + int ord = hashOrds.getInt(inputOrds.getInt(firstIndex + i)); + builder.appendInt(ord); + } + builder.endPositionEntry(); + } + } + } + return builder.build(); + } + } +$endif$ + private IntBlock lookup($Type$Vector vector) { $if(BytesRef)$ BytesRef scratch = new BytesRef(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index 1baa4d2283b25..3b08f46e6e7fc 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -42,6 +42,11 @@ public ConstantNullVector asVector() { return null; } + @Override + public OrdinalBytesRefBlock asOrdinals() { + return null; + } + @Override public boolean isNull(int position) { return true; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullVector.java index ebe1aeda24412..e262259424fa2 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullVector.java @@ -33,6 +33,12 @@ public ConstantNullBlock asBlock() { throw new UnsupportedOperationException("null vector"); } + @Override + public OrdinalBytesRefVector asOrdinals() { + assert false : "null vector"; + throw new UnsupportedOperationException("null vector"); + } + @Override public ConstantNullVector filter(int... positions) { assert false : "null vector"; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java index 41ab5256e9109..947d0daded40d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java @@ -80,6 +80,11 @@ public BytesRefVector asVector() { } } + @Override + public OrdinalBytesRefBlock asOrdinals() { + return this; + } + @Override public BytesRefBlock filter(int... positions) { if (positions.length * ordinals.getTotalValueCount() >= bytes.getPositionCount() * ordinals.getPositionCount()) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefVector.java index f353961454b02..a67db54b68ec9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefVector.java @@ -78,8 +78,21 @@ public BytesRef getBytesRef(int position, BytesRef dest) { } @Override - public BytesRefBlock asBlock() { - return new BytesRefVectorBlock(this); + public OrdinalBytesRefBlock asBlock() { + return new OrdinalBytesRefBlock(ordinals.asBlock(), bytes); + } + + @Override + public OrdinalBytesRefVector asOrdinals() { + return this; + } + + public IntVector getOrdinalsVector() { + return ordinals; + } + + public BytesRefVector getDictionaryVector() { + return bytes; } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 1de2fa239e61e..1f4285686f03a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -96,6 +96,13 @@ final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { return null; } +$if(BytesRef)$ + @Override + public OrdinalBytesRefBlock asOrdinals() { + return null; + } +$endif$ + @Override $if(BytesRef)$ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index 9615ce83215e8..dc95512b6439b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -107,6 +107,13 @@ $endif$ return new $Type$VectorBlock(this); } +$if(BytesRef)$ + @Override + public OrdinalBytesRefVector asOrdinals() { + return null; + } +$endif$ + $if(BytesRef)$ @Override public BytesRef getBytesRef(int position, BytesRef dest) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index b9d3dfc1f16ff..6d62d44f99e66 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -57,6 +57,14 @@ $endif$ @Override $Type$Vector asVector(); +$if(BytesRef)$ + /** + * Returns an ordinal bytesref block if this block is backed by a dictionary and ordinals; otherwise, + * returns null. Callers must not release the returned block as no extra reference is retained by this method. + */ + OrdinalBytesRefBlock asOrdinals(); +$endif$ + @Override $Type$Block filter(int... positions); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st index 625f014a20ffc..b33f91f8c648f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st @@ -46,6 +46,13 @@ $endif$ return new $Type$VectorBlock(this); } +$if(BytesRef)$ + @Override + public OrdinalBytesRefVector asOrdinals() { + return null; + } +$endif$ + @Override public $Type$Vector filter(int... positions) { return blockFactory().newConstant$Type$Vector(value, positions.length); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index 6979883534323..a7f805ea02570 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -43,6 +43,14 @@ $endif$ @Override $Type$Block asBlock(); +$if(BytesRef)$ + /** + * Returns an ordinal BytesRef vector if this vector is backed by a dictionary and ordinals; otherwise, + * returns null. Callers must not release the returned vector as no extra reference is retained by this method. + */ + OrdinalBytesRefVector asOrdinals(); +$endif$ + @Override $Type$Vector filter(int... positions); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 274457a4d5bd8..f011d6f2a4b48 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -34,6 +34,18 @@ public final class $Type$VectorBlock extends AbstractVectorBlock implements $Typ return vector; } +$if(BytesRef)$ + @Override + public OrdinalBytesRefBlock asOrdinals() { + var ordinals = vector.asOrdinals(); + if (ordinals != null) { + return ordinals.asBlock(); + } else { + return null; + } + } +$endif$ + @Override $if(BytesRef)$ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java index f821f2a37d1cf..4c2f2410addd1 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java @@ -122,8 +122,13 @@ public void add(int positionOffset, IntVector groupIds) { @Override protected ReleasableIterator receive(Page page) { Page mapped = page.projectBlocks(blockMapping); - page.releaseBlocks(); - return appendBlocks(mapped, hash.lookup(mapped, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE)); + // TODO maybe this should take an array of Blocks instead? + try { + // hash.lookup increments any references we need to keep for the iterator + return appendBlocks(page, hash.lookup(mapped, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE)); + } finally { + mapped.releaseBlocks(); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index da014ada387d6..dcdba70652910 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -101,7 +101,6 @@ public void registerTransportHandler(TransportService transportService) { OpenExchangeRequest::new, new OpenExchangeRequestHandler() ); - } /** diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java index 73863bec7bf8a..0b296fcb5c18d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashRandomizedTests.java @@ -207,7 +207,9 @@ private void test(MockBlockFactory blockFactory) { assertMap(keyList, keyMatcher); } - if (blockHash instanceof LongLongBlockHash == false && blockHash instanceof BytesRefLongBlockHash == false) { + if (blockHash instanceof LongLongBlockHash == false + && blockHash instanceof BytesRefLongBlockHash == false + && blockHash instanceof BytesRef3BlockHash == false) { assertLookup(blockFactory, expectedOrds, types, blockHash, oracle); } } finally { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java index cf43df98e2629..499f552cca816 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java @@ -21,12 +21,14 @@ import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -43,6 +45,7 @@ import java.util.stream.LongStream; import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -1105,6 +1108,97 @@ public void testLongNull() { }, blockFactory.newLongArrayVector(values, values.length).asBlock(), blockFactory.newConstantNullBlock(values.length)); } + public void test3BytesRefs() { + final Page page; + final int positions = randomIntBetween(1, 1000); + final boolean generateVector = randomBoolean(); + try ( + BytesRefBlock.Builder builder1 = blockFactory.newBytesRefBlockBuilder(positions); + BytesRefBlock.Builder builder2 = blockFactory.newBytesRefBlockBuilder(positions); + BytesRefBlock.Builder builder3 = blockFactory.newBytesRefBlockBuilder(positions) + ) { + List builders = List.of(builder1, builder2, builder3); + for (int p = 0; p < positions; p++) { + for (BytesRefBlock.Builder builder : builders) { + int valueCount = generateVector ? 1 : between(0, 3); + switch (valueCount) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendBytesRef(new BytesRef(Integer.toString(between(1, 100)))); + default -> { + builder.beginPositionEntry(); + for (int v = 0; v < valueCount; v++) { + builder.appendBytesRef(new BytesRef(Integer.toString(between(1, 100)))); + } + builder.endPositionEntry(); + } + } + } + } + page = new Page(builder1.build(), builder2.build(), builder3.build()); + } + final int emitBatchSize = between(positions, 10 * 1024); + var groupSpecs = List.of( + new BlockHash.GroupSpec(0, ElementType.BYTES_REF), + new BlockHash.GroupSpec(1, ElementType.BYTES_REF), + new BlockHash.GroupSpec(2, ElementType.BYTES_REF) + ); + record Output(int offset, IntBlock block, IntVector vector) implements Releasable { + @Override + public void close() { + Releasables.close(block, vector); + } + } + List output1 = new ArrayList<>(); + List output2 = new ArrayList<>(); + try ( + BlockHash hash1 = new BytesRef3BlockHash(blockFactory, 0, 1, 2, emitBatchSize); + BlockHash hash2 = new PackedValuesBlockHash(groupSpecs, blockFactory, emitBatchSize) + ) { + hash1.add(page, new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + groupIds.incRef(); + output1.add(new Output(positionOffset, groupIds, null)); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + groupIds.incRef(); + output1.add(new Output(positionOffset, null, groupIds)); + } + }); + hash2.add(page, new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + groupIds.incRef(); + output2.add(new Output(positionOffset, groupIds, null)); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + groupIds.incRef(); + output2.add(new Output(positionOffset, null, groupIds)); + } + }); + assertThat(output1.size(), equalTo(output1.size())); + for (int i = 0; i < output1.size(); i++) { + Output o1 = output1.get(i); + Output o2 = output2.get(i); + assertThat(o1.offset, equalTo(o2.offset)); + if (o1.vector != null) { + assertThat(o1.vector, either(equalTo(o2.vector)).or(equalTo(o2.block.asVector()))); + } else { + assertNull(o2.vector); + assertThat(o1.block, equalTo(o2.block)); + } + } + } finally { + Releasables.close(output1); + Releasables.close(output2); + page.releaseBlocks(); + } + } + record OrdsAndKeys(String description, int positionOffset, IntBlock ords, Block[] keys, IntVector nonEmpty) {} /** @@ -1128,7 +1222,9 @@ private void hash(Consumer callback, Block... values) { } called[0] = true; callback.accept(ordsAndKeys); - if (hash instanceof LongLongBlockHash == false && hash instanceof BytesRefLongBlockHash == false) { + if (hash instanceof LongLongBlockHash == false + && hash instanceof BytesRefLongBlockHash == false + && hash instanceof BytesRef3BlockHash == false) { try (ReleasableIterator lookup = hash.lookup(new Page(values), ByteSizeValue.ofKb(between(1, 100)))) { assertThat(lookup.hasNext(), equalTo(true)); try (IntBlock ords = lookup.next()) { @@ -1202,7 +1298,9 @@ public void add(int positionOffset, IntVector groupIds) { add(positionOffset, groupIds.asBlock()); } }); - if (blockHash instanceof LongLongBlockHash == false && blockHash instanceof BytesRefLongBlockHash == false) { + if (blockHash instanceof LongLongBlockHash == false + && blockHash instanceof BytesRefLongBlockHash == false + && blockHash instanceof BytesRef3BlockHash == false) { Block[] keys = blockHash.getKeys(); try (ReleasableIterator lookup = blockHash.lookup(new Page(keys), ByteSizeValue.ofKb(between(1, 100)))) { while (lookup.hasNext()) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index 7198e11e0464a..66314f1a95e05 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -8,10 +8,12 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockTestUtils; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; +import org.elasticsearch.core.Tuple; import org.hamcrest.Matcher; import java.util.List; @@ -28,6 +30,10 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected void assertSimpleOutput(List input, List results) { + assertSimpleOutput(input, results, 0, 1); + } + + private void assertSimpleOutput(List input, List results, int keyChannel, int outputChannel) { int count = input.stream().mapToInt(Page::getPositionCount).sum(); assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(count)); int keysIdx = 0; @@ -39,10 +45,10 @@ protected void assertSimpleOutput(List input, List results) { int p = 0; while (p < count) { if (keys == null) { - keys = input.get(keysIdx++).getBlock(0); + keys = input.get(keysIdx++).getBlock(keyChannel); } if (ords == null) { - ords = results.get(ordsIdx++).getBlock(1); + ords = results.get(ordsIdx++).getBlock(outputChannel); } int valueCount = keys.getValueCount(p - keysOffset); assertThat(ords.getValueCount(p - ordsOffset), equalTo(valueCount)); @@ -92,4 +98,28 @@ protected Matcher expectedToStringOfSimple() { "HashLookup\\[keys=\\[foo], hash=PackedValuesBlockHash\\{groups=\\[0:LONG], entries=4, size=\\d+b}, mapping=\\[0]]" ); } + + public void testSelectBlocks() { + DriverContext context = driverContext(); + List input = CannedSourceOperator.collectPages( + new TupleBlockSourceOperator( + context.blockFactory(), + LongStream.range(0, 1000).mapToObj(l -> Tuple.tuple(randomLong(), randomFrom(1L, 7L, 14L, 20L))) + ) + ); + List clonedInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive( + new HashLookupOperator.Factory( + new HashLookupOperator.Key[] { + new HashLookupOperator.Key( + "foo", + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 1, 7, 14, 20 }, 4).asBlock() + ) }, + new int[] { 1 } + ).get(context), + input.iterator(), + context + ); + assertSimpleOutput(clonedInput, results, 1, 2); + } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index 5113346baf0ac..3d5468cd2bfc9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -9,9 +9,15 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.Build; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.Verifier; @@ -30,6 +36,7 @@ import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.DateUtils; import org.elasticsearch.xpack.ql.type.EsField; import org.elasticsearch.xpack.ql.type.TypesTests; @@ -40,6 +47,7 @@ import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -48,6 +56,7 @@ import java.util.regex.Pattern; import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; @@ -109,6 +118,8 @@ public boolean isIndexed(String field) { public static final TestSearchStats TEST_SEARCH_STATS = new TestSearchStats(); + private static final Map> TABLES = tables(); + public static final EsqlConfiguration TEST_CFG = configuration(new QueryPragmas(Settings.EMPTY)); public static final Verifier TEST_VERIFIER = new Verifier(new Metrics()); @@ -125,7 +136,8 @@ public static EsqlConfiguration configuration(QueryPragmas pragmas, String query EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), query, - false + false, + TABLES ); } @@ -263,4 +275,45 @@ public static void assertWarnings(List warnings, List allowedWar } } } + + static Map> tables() { + BlockFactory factory = new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE); + Map> tables = new HashMap<>(); + try ( + IntBlock.Builder ints = factory.newIntBlockBuilder(10); + LongBlock.Builder longs = factory.newLongBlockBuilder(10); + BytesRefBlock.Builder names = factory.newBytesRefBlockBuilder(10); + ) { + for (int i = 0; i < 10; i++) { + ints.appendInt(i); + longs.appendLong(i); + names.appendBytesRef(new BytesRef(switch (i) { + case 0 -> "zero"; + case 1 -> "one"; + case 2 -> "two"; + case 3 -> "three"; + case 4 -> "four"; + case 5 -> "five"; + case 6 -> "six"; + case 7 -> "seven"; + case 8 -> "eight"; + case 9 -> "nine"; + default -> throw new IllegalArgumentException(); + })); + } + + IntBlock intsBlock = ints.build(); + LongBlock longsBlock = longs.build(); + BytesRefBlock namesBlock = names.build(); + tables.put( + "int_number_names", + Map.of("int", new Column(DataTypes.INTEGER, intsBlock), "name", new Column(DataTypes.KEYWORD, namesBlock)) + ); + tables.put( + "long_number_names", + Map.of("long", new Column(DataTypes.LONG, longsBlock), "name", new Column(DataTypes.KEYWORD, namesBlock)) + ); + } + return unmodifiableMap(tables); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/Column.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/Column.java new file mode 100644 index 0000000000000..b3d22da643ef4 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/Column.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockStreamInput; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.io.IOException; + +/** + * A column of data provided in the request. + */ +public record Column(DataType type, Block values) implements Releasable, Writeable { + public Column(BlockStreamInput in) throws IOException { + this(EsqlDataTypes.fromTypeName(in.readString()), in.readNamedWriteable(Block.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(type.typeName()); + out.writeNamedWriteable(values); + } + + @Override + public void close() { + Releasables.closeExpectNoException(values); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 32ff0cf7bc6aa..08d10e5ca7763 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -11,21 +11,26 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.parser.TypedParamValue; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.version.EsqlVersion; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.TreeMap; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -49,6 +54,11 @@ public class EsqlQueryRequest extends org.elasticsearch.xpack.core.esql.action.E private boolean keepOnCompletion; private boolean onSnapshotBuild = Build.current().isSnapshot(); + /** + * "Tables" provided in the request for use with things like {@code LOOKUP}. + */ + private final Map> tables = new TreeMap<>(); + static EsqlQueryRequest syncEsqlQueryRequest() { return new EsqlQueryRequest(false); } @@ -84,11 +94,19 @@ public ActionRequestValidationException validate() { if (Strings.hasText(query) == false) { validationException = addValidationError("[" + RequestXContent.QUERY_FIELD + "] is required", validationException); } - if (onSnapshotBuild == false && pragmas.isEmpty() == false) { - validationException = addValidationError( - "[" + RequestXContent.PRAGMA_FIELD + "] only allowed in snapshot builds", - validationException - ); + if (onSnapshotBuild == false) { + if (pragmas.isEmpty() == false) { + validationException = addValidationError( + "[" + RequestXContent.PRAGMA_FIELD + "] only allowed in snapshot builds", + validationException + ); + } + if (tables.isEmpty() == false) { + validationException = addValidationError( + "[" + RequestXContent.TABLES_FIELD + "] only allowed in snapshot builds", + validationException + ); + } } return validationException; } @@ -207,6 +225,36 @@ public void keepOnCompletion(boolean keepOnCompletion) { this.keepOnCompletion = keepOnCompletion; } + /** + * Add a "table" to the request for use with things like {@code LOOKUP}. + */ + public void addTable(String name, Map columns) { + for (Column c : columns.values()) { + if (false == c.values().blockFactory().breaker() instanceof NoopCircuitBreaker) { + throw new AssertionError("block tracking not supported on tables parameter"); + } + } + Iterator itr = columns.values().iterator(); + if (itr.hasNext()) { + int firstSize = itr.next().values().getPositionCount(); + while (itr.hasNext()) { + int size = itr.next().values().getPositionCount(); + if (size != firstSize) { + throw new IllegalArgumentException("mismatched column lengths: was [" + size + "] but expected [" + firstSize + "]"); + } + } + } + var prev = tables.put(name, columns); + if (prev != null) { + Releasables.close(prev.values()); + throw new IllegalArgumentException("duplicate table for [" + name + "]"); + } + } + + public Map> tables() { + return tables; + } + @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { // Pass the query as the description diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ParseTables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ParseTables.java new file mode 100644 index 0000000000000..771adb6f9ce5c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ParseTables.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.esql.Column; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +/** + * Parses the {@code tables} request body parameter. + */ +public class ParseTables { + public static final Set SUPPORTED_TYPES = Set.of(DataTypes.INTEGER, DataTypes.KEYWORD, DataTypes.LONG); + private static final int MAX_LENGTH = (int) ByteSizeValue.ofMb(1).getBytes(); + + private final BlockFactory blockFactory; + private final EsqlQueryRequest request; + private final XContentParser p; + private int length; + + ParseTables(EsqlQueryRequest request, XContentParser p) { + // TODO use a real block factory + this.blockFactory = new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE); + this.request = request; + this.p = p; + } + + void parseTables() throws IOException { + if (p.currentToken() != XContentParser.Token.START_OBJECT) { + throw new XContentParseException(p.getTokenLocation(), "expected " + XContentParser.Token.START_OBJECT); + } + while (true) { + switch (p.nextToken()) { + case END_OBJECT -> { + return; + } + case FIELD_NAME -> { + String name = p.currentName(); + p.nextToken(); + request.addTable(name, parseTable()); + } + } + } + } + + /** + * Parse a table from the request. Object keys are in the format {@code name:type} + * so we can be sure we'll always have a type. + */ + private Map parseTable() throws IOException { + Map columns = new TreeMap<>(); + boolean success = false; + try { + if (p.currentToken() != XContentParser.Token.START_OBJECT) { + throw new XContentParseException(p.getTokenLocation(), "expected " + XContentParser.Token.START_OBJECT); + } + while (true) { + switch (p.nextToken()) { + case END_OBJECT -> { + success = true; + return columns; + } + case FIELD_NAME -> { + String[] fname = p.currentName().split(":"); + if (fname.length != 2) { + throw new XContentParseException( + p.getTokenLocation(), + "expected columns named name:type but was [" + p.currentName() + "]" + ); + } + if (columns.containsKey(fname[0])) { + throw new XContentParseException(p.getTokenLocation(), "duplicate column name [" + fname[0] + "]"); + } + columns.put(fname[0], parseColumn(fname[1])); + } + default -> throw new XContentParseException( + p.getTokenLocation(), + "expected " + XContentParser.Token.END_OBJECT + " or " + XContentParser.Token.FIELD_NAME + ); + } + } + } finally { + if (success == false) { + Releasables.close(columns.values()); + } + } + } + + private Column parseColumn(String type) throws IOException { + return switch (type) { + case "integer" -> parseIntColumn(); + case "keyword" -> parseKeywordColumn(); + case "long" -> parseLongColumn(); + default -> throw new XContentParseException(p.getTokenLocation(), "unsupported type [" + type + "]"); + }; + } + + private Column parseKeywordColumn() throws IOException { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(100)) { // TODO 100?! + XContentParser.Token token = p.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + throw new XContentParseException(p.getTokenLocation(), "expected " + XContentParser.Token.START_ARRAY); + } + BytesRefBuilder scratch = new BytesRefBuilder(); + while (true) { + switch (p.nextToken()) { + case END_ARRAY -> { + return new Column(DataTypes.KEYWORD, builder.build()); + } + case START_ARRAY -> parseTextArray(builder, scratch); + case VALUE_NULL -> builder.appendNull(); + case VALUE_STRING, VALUE_NUMBER, VALUE_BOOLEAN -> appendText(builder, scratch); + default -> throw new XContentParseException(p.getTokenLocation(), "expected string, array of strings, or null"); + } + } + } + } + + private void parseTextArray(BytesRefBlock.Builder builder, BytesRefBuilder scratch) throws IOException { + builder.beginPositionEntry(); + while (true) { + switch (p.nextToken()) { + case END_ARRAY -> { + builder.endPositionEntry(); + return; + } + case VALUE_STRING -> appendText(builder, scratch); + default -> throw new XContentParseException(p.getTokenLocation(), "expected string"); + } + } + } + + private void appendText(BytesRefBlock.Builder builder, BytesRefBuilder scratch) throws IOException { + scratch.clear(); + String v = p.text(); + scratch.copyChars(v, 0, v.length()); + length += scratch.length(); + if (length > MAX_LENGTH) { + throw new XContentParseException(p.getTokenLocation(), "tables too big"); + } + builder.appendBytesRef(scratch.get()); + } + + private Column parseIntColumn() throws IOException { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(100)) { // TODO 100?! + XContentParser.Token token = p.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + throw new XContentParseException(p.getTokenLocation(), "expected " + XContentParser.Token.START_ARRAY); + } + while (true) { + switch (p.nextToken()) { + case END_ARRAY -> { + return new Column(DataTypes.INTEGER, builder.build()); + } + case START_ARRAY -> parseIntArray(builder); + case VALUE_NULL -> builder.appendNull(); + case VALUE_NUMBER, VALUE_STRING -> appendInt(builder); + default -> throw new XContentParseException(p.getTokenLocation(), "expected number, array of numbers, or null"); + } + } + } + } + + private void parseIntArray(IntBlock.Builder builder) throws IOException { + builder.beginPositionEntry(); + while (true) { + switch (p.nextToken()) { + case END_ARRAY -> { + builder.endPositionEntry(); + return; + } + case VALUE_NUMBER, VALUE_STRING -> appendInt(builder); + default -> throw new XContentParseException(p.getTokenLocation(), "expected number"); + } + } + } + + private void appendInt(IntBlock.Builder builder) throws IOException { + length += Integer.BYTES; + if (length > MAX_LENGTH) { + throw new XContentParseException(p.getTokenLocation(), "tables too big"); + } + builder.appendInt(p.intValue()); + } + + private Column parseLongColumn() throws IOException { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(100)) { // TODO 100?! + XContentParser.Token token = p.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + throw new XContentParseException(p.getTokenLocation(), "expected " + XContentParser.Token.START_ARRAY); + } + while (true) { + switch (p.nextToken()) { + case END_ARRAY -> { + return new Column(DataTypes.LONG, builder.build()); + } + case START_ARRAY -> parseLongArray(builder); + case VALUE_NULL -> builder.appendNull(); + case VALUE_NUMBER, VALUE_STRING -> appendLong(builder); + default -> throw new XContentParseException(p.getTokenLocation(), "expected number, array of numbers, or null"); + } + } + } + } + + private void parseLongArray(LongBlock.Builder builder) throws IOException { + builder.beginPositionEntry(); + while (true) { + switch (p.nextToken()) { + case END_ARRAY -> { + builder.endPositionEntry(); + return; + } + case VALUE_NUMBER, VALUE_STRING -> appendLong(builder); + default -> throw new XContentParseException(p.getTokenLocation(), "expected number"); + } + } + } + + private void appendLong(LongBlock.Builder builder) throws IOException { + length += Long.BYTES; + if (length > MAX_LENGTH) { + throw new XContentParseException(p.getTokenLocation(), "tables too big"); + } + builder.appendLong(p.longValue()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index ef82f666ce904..014d445f79564 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -54,6 +54,7 @@ final class RequestXContent { private static final ParseField PARAMS_FIELD = new ParseField("params"); private static final ParseField LOCALE_FIELD = new ParseField("locale"); private static final ParseField PROFILE_FIELD = new ParseField("profile"); + static final ParseField TABLES_FIELD = new ParseField("tables"); static final ParseField WAIT_FOR_COMPLETION_TIMEOUT = new ParseField("wait_for_completion_timeout"); static final ParseField KEEP_ALIVE = new ParseField("keep_alive"); @@ -85,6 +86,7 @@ private static void objectParserCommon(ObjectParser parser) parser.declareField(EsqlQueryRequest::params, RequestXContent::parseParams, PARAMS_FIELD, VALUE_ARRAY); parser.declareString((request, localeTag) -> request.locale(Locale.forLanguageTag(localeTag)), LOCALE_FIELD); parser.declareBoolean(EsqlQueryRequest::profile, PROFILE_FIELD); + parser.declareField((p, r, c) -> new ParseTables(r, p).parseTables(), TABLES_FIELD, ObjectParser.ValueType.OBJECT); } private static ObjectParser objectParserSync(Supplier supplier) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 86637e543b43c..ed48138e5c30b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -377,11 +377,11 @@ private LogicalPlan resolveAggregate(Aggregate a, List childrenOutput } } - if (a.expressionsResolved() == false && Resolvables.resolved(groupings)) { + if (a.expressionsResolved() == false) { AttributeMap resolved = new AttributeMap<>(); for (Expression e : groupings) { Attribute attr = Expressions.attribute(e); - if (attr != null) { + if (attr != null && attr.resolved()) { resolved.put(attr, attr); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 17d189626d4e7..e9a2fb88e1991 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -29,6 +29,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; +import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; @@ -250,9 +251,10 @@ private void doLookup( ) { Block inputBlock = inputPage.getBlock(0); final IntBlock selectedPositions; - if (inputBlock instanceof OrdinalBytesRefBlock ordinalBytesRefBlock) { - inputBlock = ordinalBytesRefBlock.getDictionaryVector().asBlock(); - selectedPositions = ordinalBytesRefBlock.getOrdinalsBlock(); + final OrdinalBytesRefBlock ordinalsBytesRefBlock; + if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { + inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); + selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); selectedPositions.mustIncRef(); } else { selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index c7342325764d6..7ab3dfefc3250 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -87,6 +87,13 @@ public abstract class ExpressionBuilder extends IdentifierBuilder { + private int expressionDepth = 0; + + /** + * Maximum depth for nested expressions + */ + public static final int MAX_EXPRESSION_DEPTH = 500; + private final Map params; ExpressionBuilder(Map params) { @@ -94,7 +101,19 @@ public abstract class ExpressionBuilder extends IdentifierBuilder { } protected Expression expression(ParseTree ctx) { - return typedParsing(this, ctx, Expression.class); + expressionDepth++; + if (expressionDepth > MAX_EXPRESSION_DEPTH) { + throw new ParsingException( + "ESQL statement exceeded the maximum expression depth allowed ({}): [{}]", + MAX_EXPRESSION_DEPTH, + ctx.getParent().getText() + ); + } + try { + return typedParsing(this, ctx, Expression.class); + } finally { + expressionDepth--; + } } protected List expressions(List contexts) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 8906014adeecd..aea835c11ad3d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -76,6 +76,13 @@ public class LogicalPlanBuilder extends ExpressionBuilder { + private int queryDepth = 0; + + /** + * Maximum number of commands allowed per query + */ + public static final int MAX_QUERY_DEPTH = 500; + public LogicalPlanBuilder(Map params) { super(params); } @@ -95,9 +102,21 @@ public LogicalPlan visitSingleStatement(EsqlBaseParser.SingleStatementContext ct @Override public LogicalPlan visitCompositeQuery(EsqlBaseParser.CompositeQueryContext ctx) { - LogicalPlan input = plan(ctx.query()); - PlanFactory makePlan = typedParsing(this, ctx.processingCommand(), PlanFactory.class); - return makePlan.apply(input); + queryDepth++; + if (queryDepth > MAX_QUERY_DEPTH) { + throw new ParsingException( + "ESQL statement exceeded the maximum query depth allowed ({}): [{}]", + MAX_QUERY_DEPTH, + ctx.getText() + ); + } + try { + LogicalPlan input = plan(ctx.query()); + PlanFactory makePlan = typedParsing(this, ctx.processingCommand(), PlanFactory.class); + return makePlan.apply(input); + } finally { + queryDepth--; + } } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java index e25136f4d9532..2f5920a4e32c9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java @@ -9,8 +9,13 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -73,7 +78,10 @@ final class ClusterComputeRequest extends TransportRequest implements IndicesReq super(in); this.clusterAlias = in.readString(); this.sessionId = in.readString(); - this.configuration = new EsqlConfiguration(in); + this.configuration = new EsqlConfiguration( + // TODO make EsqlConfiguration Releasable + new BlockStreamInput(in, new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE)) + ); this.plan = new PlanStreamInput(in, planNameRegistry, in.namedWriteableRegistry(), configuration).readPhysicalPlanNode(); this.indices = in.readStringArray(); this.originalIndices = in.readStringArray(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java index 5067e62fa6970..6c87b226aa590 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java @@ -10,8 +10,13 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.internal.AliasFilter; @@ -61,7 +66,10 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { DataNodeRequest(StreamInput in) throws IOException { super(in); this.sessionId = in.readString(); - this.configuration = new EsqlConfiguration(in); + this.configuration = new EsqlConfiguration( + // TODO make EsqlConfiguration Releasable + new BlockStreamInput(in, new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE)) + ); if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CLUSTER_ALIAS)) { this.clusterAlias = in.readString(); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java index f24619ff80d9a..70bc5760adcc0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java @@ -156,4 +156,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(settings); } + + @Override + public String toString() { + return settings.toString(); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index bce189754b485..34422a3e5c197 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -154,7 +154,8 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener> tables; + public EsqlConfiguration( ZoneId zi, Locale locale, @@ -49,7 +54,8 @@ public EsqlConfiguration( int resultTruncationMaxSize, int resultTruncationDefaultSize, String query, - boolean profile + boolean profile, + Map> tables ) { super(zi, username, clusterName); this.locale = locale; @@ -58,9 +64,11 @@ public EsqlConfiguration( this.resultTruncationDefaultSize = resultTruncationDefaultSize; this.query = query; this.profile = profile; + this.tables = tables; + assert tables != null; } - public EsqlConfiguration(StreamInput in) throws IOException { + public EsqlConfiguration(BlockStreamInput in) throws IOException { super(in.readZoneId(), Instant.ofEpochSecond(in.readVLong(), in.readVInt()), in.readOptionalString(), in.readOptionalString()); locale = Locale.forLanguageTag(in.readString()); this.pragmas = new QueryPragmas(in); @@ -72,6 +80,11 @@ public EsqlConfiguration(StreamInput in) throws IOException { } else { this.profile = false; } + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_REQUEST_TABLES)) { + this.tables = in.readImmutableMap(i1 -> i1.readImmutableMap(i2 -> new Column((BlockStreamInput) i2))); + } else { + this.tables = Map.of(); + } } @Override @@ -90,6 +103,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeBoolean(profile); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_REQUEST_TABLES)) { + out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, (o2, column) -> column.writeTo(o2))); + } } public QueryPragmas pragmas() { @@ -121,6 +137,13 @@ public long absoluteStartedTimeInMillis() { return System.currentTimeMillis(); } + /** + * Tables specified in the request. + */ + public Map> tables() { + return tables; + } + /** * Enable profiling, sacrificing performance to return information about * what operations are taking the most time. @@ -161,13 +184,44 @@ public boolean equals(Object o) { && Objects.equals(pragmas, that.pragmas) && Objects.equals(locale, that.locale) && Objects.equals(that.query, query) - && profile == that.profile; + && profile == that.profile + && tables.equals(that.tables); } return false; } @Override public int hashCode() { - return Objects.hash(super.hashCode(), pragmas, resultTruncationMaxSize, resultTruncationDefaultSize, locale, query, profile); + return Objects.hash( + super.hashCode(), + pragmas, + resultTruncationMaxSize, + resultTruncationDefaultSize, + locale, + query, + profile, + tables + ); + } + + @Override + public String toString() { + return "EsqlConfiguration{" + + "pragmas=" + + pragmas + + ", resultTruncationMaxSize=" + + resultTruncationMaxSize + + ", resultTruncationDefaultSize=" + + resultTruncationDefaultSize + + ", locale=" + + locale + + ", query='" + + query + + '\'' + + ", profile=" + + profile + + ", tables=" + + tables + + '}'; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index d7445b4ef7c74..7e87552881d09 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -7,9 +7,17 @@ package org.elasticsearch.xpack.esql.action; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.Build; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.QueryBuilder; @@ -24,9 +32,11 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.parser.TypedParamValue; import org.elasticsearch.xpack.esql.version.EsqlVersion; import org.elasticsearch.xpack.esql.version.EsqlVersionTests; +import org.elasticsearch.xpack.ql.type.DataTypes; import java.io.IOException; import java.util.ArrayList; @@ -39,6 +49,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; public class EsqlQueryRequestTests extends ESTestCase { @@ -286,6 +297,142 @@ public void testPragmasOnlyValidOnSnapshot() throws IOException { assertThat(request.validate().getMessage(), containsString("[pragma] only allowed in snapshot builds")); } + public void testTablesKeyword() throws IOException { + String json = """ + { + "version": "2024.04.01", + "query": "ROW x = 1", + "tables": {"a": {"c:keyword": ["a", "b", null, 1, 2.0, ["c", "d"], false]}} + } + """; + EsqlQueryRequest request = parseEsqlQueryRequest(json, randomBoolean()); + Column c = request.tables().get("a").get("c"); + assertThat(c.type(), equalTo(DataTypes.KEYWORD)); + try ( + BytesRefBlock.Builder builder = new BlockFactory( + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + BigArrays.NON_RECYCLING_INSTANCE + ).newBytesRefBlockBuilder(10) + ) { + builder.appendBytesRef(new BytesRef("a")); + builder.appendBytesRef(new BytesRef("b")); + builder.appendNull(); + builder.appendBytesRef(new BytesRef("1")); + builder.appendBytesRef(new BytesRef("2.0")); + builder.beginPositionEntry(); + builder.appendBytesRef(new BytesRef("c")); + builder.appendBytesRef(new BytesRef("d")); + builder.endPositionEntry(); + builder.appendBytesRef(new BytesRef("false")); + assertThat(c.values(), equalTo(builder.build())); + } + assertTablesOnlyValidOnSnapshot(request); + } + + public void testTablesInteger() throws IOException { + String json = """ + { + "version": "2024.04.01", + "query": "ROW x = 1", + "tables": {"a": {"c:integer": [1, 2, "3", null, [5, 6]]}} + } + """; + + EsqlQueryRequest request = parseEsqlQueryRequest(json, randomBoolean()); + Column c = request.tables().get("a").get("c"); + assertThat(c.type(), equalTo(DataTypes.INTEGER)); + try ( + IntBlock.Builder builder = new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE) + .newIntBlockBuilder(10) + ) { + builder.appendInt(1); + builder.appendInt(2); + builder.appendInt(3); + builder.appendNull(); + builder.beginPositionEntry(); + builder.appendInt(5); + builder.appendInt(6); + builder.endPositionEntry(); + assertThat(c.values(), equalTo(builder.build())); + } + assertTablesOnlyValidOnSnapshot(request); + } + + public void testTablesLong() throws IOException { + String json = """ + { + "version": "2024.04.01", + "query": "ROW x = 1", + "tables": {"a": {"c:long": [1, 2, "3", null, [5, 6]]}} + } + """; + + EsqlQueryRequest request = parseEsqlQueryRequest(json, randomBoolean()); + Column c = request.tables().get("a").get("c"); + assertThat(c.type(), equalTo(DataTypes.LONG)); + try ( + LongBlock.Builder builder = new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE) + .newLongBlockBuilder(10) + ) { + builder.appendLong(1); + builder.appendLong(2); + builder.appendLong(3); + builder.appendNull(); + builder.beginPositionEntry(); + builder.appendLong(5); + builder.appendLong(6); + builder.endPositionEntry(); + assertThat(c.values(), equalTo(builder.build())); + } + assertTablesOnlyValidOnSnapshot(request); + } + + public void testManyTables() throws IOException { + String json = """ + { + "version": "2024.04.01", + "query": "ROW x = 1", + "tables": { + "t1": { + "a:long": [1], + "b:long": [1], + "c:keyword": [1], + "d:long": [1] + }, + "t2": { + "a:long": [1], + "b:integer": [1], + "c:long": [1], + "d:long": [1] + } + } + } + """; + + EsqlQueryRequest request = parseEsqlQueryRequest(json, randomBoolean()); + assertThat(request.tables().keySet(), hasSize(2)); + Map t1 = request.tables().get("t1"); + assertThat(t1.get("a").type(), equalTo(DataTypes.LONG)); + assertThat(t1.get("b").type(), equalTo(DataTypes.LONG)); + assertThat(t1.get("c").type(), equalTo(DataTypes.KEYWORD)); + assertThat(t1.get("d").type(), equalTo(DataTypes.LONG)); + Map t2 = request.tables().get("t2"); + assertThat(t2.get("a").type(), equalTo(DataTypes.LONG)); + assertThat(t2.get("b").type(), equalTo(DataTypes.INTEGER)); + assertThat(t2.get("c").type(), equalTo(DataTypes.LONG)); + assertThat(t2.get("d").type(), equalTo(DataTypes.LONG)); + assertTablesOnlyValidOnSnapshot(request); + } + + private void assertTablesOnlyValidOnSnapshot(EsqlQueryRequest request) { + request.onSnapshotBuild(true); + assertNull(request.validate()); + + request.onSnapshotBuild(false); + assertNotNull(request.validate()); + assertThat(request.validate().getMessage(), containsString("[tables] only allowed in snapshot builds")); + } + public void testTask() throws IOException { String query = randomAlphaOfLength(10); int id = randomInt(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index f563e1a6cb25c..8969c1b48934e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -22,6 +22,7 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.matchesRegex; //@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") public class VerifierTests extends ESTestCase { @@ -517,6 +518,33 @@ public void testGroupByCounter() { ); } + public void testAggsResolutionWithUnresolvedGroupings() { + String agg_func = randomFrom( + new String[] { "avg", "count", "count_distinct", "min", "max", "median", "median_absolute_deviation", "sum", "values" } + ); + + assertThat(error("FROM tests | STATS " + agg_func + "(emp_no) by foobar"), matchesRegex("1:\\d+: Unknown column \\[foobar]")); + assertThat( + error("FROM tests | STATS " + agg_func + "(x) by foobar, x = emp_no"), + matchesRegex("1:\\d+: Unknown column \\[foobar]") + ); + assertThat(error("FROM tests | STATS " + agg_func + "(foobar) by foobar"), matchesRegex("1:\\d+: Unknown column \\[foobar]")); + assertThat( + error("FROM tests | STATS " + agg_func + "(foobar) by BUCKET(languages, 10)"), + matchesRegex( + "1:\\d+: function expects exactly four arguments when the first one is of type \\[INTEGER]" + + " and the second of type \\[INTEGER]\n" + + "line 1:\\d+: Unknown column \\[foobar]" + ) + ); + assertThat(error("FROM tests | STATS " + agg_func + "(foobar) by emp_no"), matchesRegex("1:\\d+: Unknown column \\[foobar]")); + // TODO: Ideally, we'd detect that count_distinct(x) doesn't require an error message. + assertThat( + error("FROM tests | STATS " + agg_func + "(x) by x = foobar"), + matchesRegex("1:\\d+: Unknown column \\[foobar]\n" + "line 1:\\d+: Unknown column \\[x]") + ); + } + private String error(String query) { return error(query, defaultAnalyzer); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java index 56c56870ccbb2..47df642a96942 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.ql.util.StringUtils; import java.util.List; +import java.util.Map; import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; @@ -40,7 +41,8 @@ static EsqlConfiguration randomConfiguration() { EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), StringUtils.EMPTY, - randomBoolean() + randomBoolean(), + Map.of() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index 2e0494723a518..4b1109892b00c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -67,7 +68,8 @@ private EsqlConfiguration randomLocaleConfig() { EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), "", - false + false, + Map.of() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index f5d0283d0691b..1d7c479cca0e2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -67,7 +68,8 @@ private EsqlConfiguration randomLocaleConfig() { EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), "", - false + false, + Map.of() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 82334afbffd03..b2a074ab4723b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; @@ -122,6 +123,8 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.statsForMissingField; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; +import static org.elasticsearch.xpack.esql.parser.ExpressionBuilder.MAX_EXPRESSION_DEPTH; +import static org.elasticsearch.xpack.esql.parser.LogicalPlanBuilder.MAX_QUERY_DEPTH; import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.FINAL; import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.PARTIAL; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; @@ -4023,6 +4026,118 @@ public void testRejectRemoteEnrichAfterCoordinatorEnrich() { ); } + public void testMaxExpressionDepth_cast() { + StringBuilder queryBuilder = new StringBuilder(randomBoolean() ? "row a = 1" : "row a = 1 | eval b = a"); + queryBuilder.append("::long::int".repeat(MAX_EXPRESSION_DEPTH / 2 - 1)); + var query = queryBuilder.toString(); + + physicalPlan(query); + + var e = expectThrows(ParsingException.class, () -> physicalPlan(query + "::long")); + assertThat( + e.getMessage(), + containsString("ESQL statement exceeded the maximum expression depth allowed (" + MAX_EXPRESSION_DEPTH + ")") + ); + } + + public void testMaxExpressionDepth_math() { + StringBuilder queryBuilder = new StringBuilder(randomBoolean() ? "row a = 1" : "row a = 1 | eval b = a"); + String expression = " " + randomFrom("+", "-", "*", "/") + " 1"; + queryBuilder.append(expression.repeat(MAX_EXPRESSION_DEPTH - 2)); + var query = queryBuilder.toString(); + + physicalPlan(query); + + var e = expectThrows(ParsingException.class, () -> physicalPlan(query + expression)); + assertThat( + e.getMessage(), + containsString("ESQL statement exceeded the maximum expression depth allowed (" + MAX_EXPRESSION_DEPTH + ")") + ); + } + + public void testMaxExpressionDepth_boolean() { + StringBuilder queryBuilder = new StringBuilder(randomBoolean() ? "row a = true " : "row a = true | eval b = a"); + String expression = " " + randomFrom("and", "or") + " true"; + queryBuilder.append(expression.repeat(MAX_EXPRESSION_DEPTH - 2)); + var query = queryBuilder.toString(); + + physicalPlan(query); + + var e = expectThrows(ParsingException.class, () -> physicalPlan(query + expression)); + assertThat( + e.getMessage(), + containsString("ESQL statement exceeded the maximum expression depth allowed (" + MAX_EXPRESSION_DEPTH + ")") + ); + } + + public void testMaxExpressionDepth_parentheses() { + String query = "row a = true | eval b = "; + StringBuilder expression = new StringBuilder("(".repeat(MAX_EXPRESSION_DEPTH / 2 - 1)); + expression.append("a"); + expression.append(")".repeat(MAX_EXPRESSION_DEPTH / 2 - 1)); + + physicalPlan(query + expression); + + var e = expectThrows(ParsingException.class, () -> physicalPlan(query + "(" + expression + ")")); + assertThat( + e.getMessage(), + containsString("ESQL statement exceeded the maximum expression depth allowed (" + MAX_EXPRESSION_DEPTH + ")") + ); + } + + public void testMaxExpressionDepth_mixed() { + String prefix = "abs("; + String suffix = " + 12)"; + + String from = "row a = 1 | eval b = "; + + StringBuilder queryBuilder = new StringBuilder(); + queryBuilder.append(prefix.repeat(MAX_EXPRESSION_DEPTH / 2 - 1)); + queryBuilder.append("a"); + queryBuilder.append(suffix.repeat(MAX_EXPRESSION_DEPTH / 2 - 1)); + var expression = queryBuilder.toString(); + + physicalPlan(from + expression); + + var e = expectThrows(ParsingException.class, () -> physicalPlan(from + prefix + expression + suffix)); + assertThat( + e.getMessage(), + containsString("ESQL statement exceeded the maximum expression depth allowed (" + MAX_EXPRESSION_DEPTH + ")") + ); + } + + public void testMaxQueryDepth() { + StringBuilder from = new StringBuilder("row a = 1 "); + for (int i = 0; i < MAX_QUERY_DEPTH; i++) { + from.append(randomBoolean() ? "| where a > 0 " : " | eval b" + i + " = a + " + i); + } + physicalPlan(from.toString()); + var e = expectThrows(ParsingException.class, () -> physicalPlan(from + (randomBoolean() ? "| sort a" : " | eval c = 10"))); + assertThat(e.getMessage(), containsString("ESQL statement exceeded the maximum query depth allowed (" + MAX_QUERY_DEPTH + ")")); + } + + public void testMaxQueryDepthPlusExpressionDepth() { + StringBuilder mainQuery = new StringBuilder("row a = 1 "); + for (int i = 0; i < MAX_QUERY_DEPTH; i++) { + mainQuery.append(" | eval b" + i + " = a + " + i); + } + + physicalPlan(mainQuery.toString()); + + var cast = "::long::int".repeat(MAX_EXPRESSION_DEPTH / 2 - 2) + "::long"; + + physicalPlan(mainQuery + cast); + + var e = expectThrows(ParsingException.class, () -> physicalPlan(mainQuery + cast + "::int")); + assertThat( + e.getMessage(), + containsString("ESQL statement exceeded the maximum expression depth allowed (" + MAX_EXPRESSION_DEPTH + ")") + ); + + e = expectThrows(ParsingException.class, () -> physicalPlan(mainQuery + cast + " | eval x = 10")); + assertThat(e.getMessage(), containsString("ESQL statement exceeded the maximum query depth allowed (" + MAX_QUERY_DEPTH + ")")); + } + @SuppressWarnings("SameParameterValue") private static void assertFilterCondition( Filter filter, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 05fdff8a75f80..880caf2cc67cf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -59,6 +59,7 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; public class EvalMapperTests extends ESTestCase { private static final FieldAttribute DOUBLE1 = field("foo", DataTypes.DOUBLE); @@ -75,7 +76,8 @@ public class EvalMapperTests extends ESTestCase { 10000000, 10000, StringUtils.EMPTY, - false + false, + Map.of() ); @ParametersFactory(argumentFormatting = "%1$s") diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index c1ef69a0bf7ca..89c7e2f81b132 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -143,7 +143,8 @@ private EsqlConfiguration config() { EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(null), EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(null), StringUtils.EMPTY, - false + false, + Map.of() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java index 4e6c3a545da06..45d57b2fa411e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.data.Block; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.ShardId; @@ -35,6 +36,7 @@ import org.elasticsearch.xpack.ql.type.EsField; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; @@ -54,8 +56,10 @@ protected Writeable.Reader instanceReader() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - SearchModule searchModule = new SearchModule(Settings.EMPTY, List.of()); - return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + List writeables = new ArrayList<>(); + writeables.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); + writeables.addAll(Block.getNamedWriteables()); + return new NamedWriteableRegistry(writeables); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlConfigurationSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlConfigurationSerializationTests.java index 9879f7c9ed23d..3e91321651928 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlConfigurationSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlConfigurationSerializationTests.java @@ -7,13 +7,32 @@ package org.elasticsearch.xpack.esql.session; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockStreamInput; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.lucene.DataPartitioning; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.esql.Column; +import org.elasticsearch.xpack.esql.action.ParseTables; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.elasticsearch.xpack.ql.type.DataType; -import java.io.IOException; +import java.time.ZoneId; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; import static org.elasticsearch.xpack.esql.session.EsqlConfiguration.QUERY_COMPRESS_THRESHOLD_CHARS; @@ -21,7 +40,9 @@ public class EsqlConfigurationSerializationTests extends AbstractWireSerializing @Override protected Writeable.Reader instanceReader() { - return EsqlConfiguration::new; + return in -> new EsqlConfiguration( + new BlockStreamInput(in, new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE)) + ); } private static QueryPragmas randomQueryPragmas() { @@ -53,30 +74,133 @@ public static EsqlConfiguration randomConfiguration(String query) { truncation, defaultTruncation, query, - profile + profile, + randomTables() ); } + static Map> randomTables() { + if (randomBoolean()) { + return Map.of(); + } + int count = between(1, 10); + Map> tables = new HashMap<>(count); + try { + for (int i = 0; i < 10; i++) { + tables.put(randomAlphaOfLength(i + 1), randomColumns()); + } + return tables; + } finally { + if (tables.size() != count) { + Releasables.close( + Releasables.wrap( + Iterators.flatMap(tables.values().iterator(), columns -> Iterators.map(columns.values().iterator(), Column::values)) + ) + ); + } + } + } + + static Map randomColumns() { + int count = between(1, 10); + Map columns = new HashMap<>(count); + int positions = between(1, 10_000); + try { + for (int i = 0; i < count; i++) { + String name = randomAlphaOfLength(i + 1); + DataType dataType = randomFrom(ParseTables.SUPPORTED_TYPES); + ElementType type = PlannerUtils.toElementType(dataType); + try ( + Block.Builder builder = type.newBlockBuilder( + positions, + new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE) + ) + ) { + for (int p = 0; p < positions; p++) { + BlockUtils.appendValue(builder, AbstractFunctionTestCase.randomLiteral(dataType).value(), type); + } + columns.put(name, new Column(dataType, builder.build())); + } + } + return columns; + } finally { + if (columns.size() != count) { + Releasables.close(Releasables.wrap(Iterators.map(columns.values().iterator(), Column::values))); + } + } + } + @Override protected EsqlConfiguration createTestInstance() { return randomConfiguration(); } @Override - protected EsqlConfiguration mutateInstance(EsqlConfiguration in) throws IOException { - int ordinal = between(0, 8); + protected EsqlConfiguration mutateInstance(EsqlConfiguration in) { + ZoneId zoneId = in.zoneId(); + Locale locale = in.locale(); + String username = in.username(); + String clusterName = in.clusterName(); + QueryPragmas pragmas = in.pragmas(); + int resultTruncationMaxSize = in.resultTruncationMaxSize(); + int resultTruncationDefaultSize = in.resultTruncationDefaultSize(); + String query = in.query(); + boolean profile = in.profile(); + Map> tables = in.tables(); + switch (between(0, 9)) { + case 0 -> zoneId = randomValueOtherThan(zoneId, () -> randomZone().normalized()); + case 1 -> locale = randomValueOtherThan(in.locale(), () -> randomLocale(random())); + case 2 -> username = randomAlphaOfLength(15); + case 3 -> clusterName = randomAlphaOfLength(15); + case 4 -> pragmas = new QueryPragmas( + Settings.builder().put(QueryPragmas.EXCHANGE_BUFFER_SIZE.getKey(), between(1, 10)).build() + ); + case 5 -> resultTruncationMaxSize += randomIntBetween(3, 10); + case 6 -> resultTruncationDefaultSize += randomIntBetween(3, 10); + case 7 -> query += randomAlphaOfLength(2); + case 8 -> profile = false == profile; + case 9 -> { + while (true) { + Map> newTables = null; + try { + newTables = randomTables(); + if (false == tables.equals(newTables)) { + tables = newTables; + newTables = null; + break; + } + } finally { + if (newTables != null) { + Releasables.close( + Releasables.wrap( + Iterators.flatMap( + newTables.values().iterator(), + columns -> Iterators.map(columns.values().iterator(), Column::values) + ) + ) + ); + } + } + } + } + } return new EsqlConfiguration( - ordinal == 0 ? randomValueOtherThan(in.zoneId(), () -> randomZone().normalized()) : in.zoneId(), - ordinal == 1 ? randomValueOtherThan(in.locale(), () -> randomLocale(random())) : in.locale(), - ordinal == 2 ? randomAlphaOfLength(15) : in.username(), - ordinal == 3 ? randomAlphaOfLength(15) : in.clusterName(), - ordinal == 4 - ? new QueryPragmas(Settings.builder().put(QueryPragmas.EXCHANGE_BUFFER_SIZE.getKey(), between(1, 10)).build()) - : in.pragmas(), - ordinal == 5 ? in.resultTruncationMaxSize() + randomIntBetween(3, 10) : in.resultTruncationMaxSize(), - ordinal == 6 ? in.resultTruncationDefaultSize() + randomIntBetween(3, 10) : in.resultTruncationDefaultSize(), - ordinal == 7 ? randomAlphaOfLength(100) : in.query(), - ordinal == 8 ? in.profile() == false : in.profile() + zoneId, + locale, + username, + clusterName, + pragmas, + resultTruncationMaxSize, + resultTruncationDefaultSize, + query, + profile, + tables ); + + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Block.getNamedWriteables()); } } diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolverTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolverTests.java index 7b569e405732f..eaad561b7515b 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolverTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolverTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.elasticsearch.xpack.core.security.user.User; import org.junit.Before; @@ -95,7 +96,8 @@ public void setupTest() { Set.of(), Set.of(appPriv1, appPriv2, discardedAppPriv), Set.of(), - Set.of() + Set.of(), + RemoteClusterPermissions.NONE ); listener.onResponse(response); return null; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java index 464ee4e7a8e37..536de2d2b5aec 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java @@ -8,15 +8,12 @@ package org.elasticsearch.xpack.ilm; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.LifecycleExecutionState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; @@ -120,24 +117,18 @@ public void testOnFailure() throws IllegalAccessException { SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "warning", - SetStepInfoUpdateTask.class.getCanonicalName(), - Level.WARN, - "*policy [" + policy + "] for index [" + index + "] failed trying to set step info for step [" + currentStepKey + "]." - ) - ); + try (var ignored = mockAppender.capturing(SetStepInfoUpdateTask.class)) { + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "warning", + SetStepInfoUpdateTask.class.getCanonicalName(), + Level.WARN, + "*policy [" + policy + "] for index [" + index + "] failed trying to set step info for step [" + currentStepKey + "]." + ) + ); - final Logger taskLogger = LogManager.getLogger(SetStepInfoUpdateTask.class); - Loggers.addAppender(taskLogger, mockAppender); - try { task.onFailure(new RuntimeException("test exception")); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(taskLogger, mockAppender); - mockAppender.stop(); } } diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index f595153e4d6dd..7a724ee202c37 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -8,8 +8,6 @@ package org.elasticsearch.xpack.logstash.action; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -24,7 +22,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; @@ -57,16 +54,6 @@ public class TransportGetPipelineActionTests extends ESTestCase { public void testGetPipelineMultipleIDsPartialFailure() throws Exception { // Set up a log appender for detecting log messages final MockLogAppender mockLogAppender = new MockLogAppender(); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message", - "org.elasticsearch.xpack.logstash.action.TransportGetPipelineAction", - Level.INFO, - "Could not retrieve logstash pipelines with ids: [2]" - ) - ); - mockLogAppender.start(); - final Logger logger = LogManager.getLogger(TransportGetPipelineAction.class); // Set up a MultiGetResponse GetResponse mockResponse = mock(GetResponse.class); @@ -79,35 +66,40 @@ public void testGetPipelineMultipleIDsPartialFailure() throws Exception { new MultiGetItemResponse[] { new MultiGetItemResponse(mockResponse, null), new MultiGetItemResponse(null, failure) } ); - GetPipelineRequest request = new GetPipelineRequest(List.of("1", "2")); + try (var threadPool = createThreadPool(); var ignored = mockLogAppender.capturing(TransportGetPipelineAction.class)) { + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "message", + "org.elasticsearch.xpack.logstash.action.TransportGetPipelineAction", + Level.INFO, + "Could not retrieve logstash pipelines with ids: [2]" + ) + ); - // Set up an ActionListener for the actual test conditions - ActionListener testActionListener = new ActionListener<>() { - @Override - public void onResponse(GetPipelineResponse getPipelineResponse) { - // check successful pipeline get - assertThat(getPipelineResponse, is(notNullValue())); - assertThat(getPipelineResponse.pipelines().size(), equalTo(1)); + final var client = getMockClient(threadPool, multiGetResponse); + TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); + GetPipelineRequest request = new GetPipelineRequest(List.of("1", "2")); - // check that failed pipeline get is logged - mockLogAppender.assertAllExpectationsMatched(); - } + // Set up an ActionListener for the actual test conditions + ActionListener testActionListener = new ActionListener<>() { + @Override + public void onResponse(GetPipelineResponse getPipelineResponse) { + // check successful pipeline get + assertThat(getPipelineResponse, is(notNullValue())); + assertThat(getPipelineResponse.pipelines().size(), equalTo(1)); - @Override - public void onFailure(Exception e) { - // do nothing - } - }; + // check that failed pipeline get is logged + mockLogAppender.assertAllExpectationsMatched(); + } + + @Override + public void onFailure(Exception e) { + // do nothing + } + }; - try (var threadPool = createThreadPool()) { - final var client = getMockClient(threadPool, multiGetResponse); - Loggers.addAppender(logger, mockLogAppender); - TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); TransportGetPipelineAction action = new TransportGetPipelineAction(transportService, mock(ActionFilters.class), client); action.doExecute(null, request, testActionListener); - } finally { - Loggers.removeAppender(logger, mockLogAppender); - mockLogAppender.stop(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java index 2ce5bf74cd9be..88d4e7e563cf2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java @@ -105,7 +105,6 @@ public void testThrottlingSummary() throws IllegalAccessException, TimeoutExcept ); MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", @@ -156,7 +155,6 @@ public void testThrottlingSummaryOneRepeat() throws IllegalAccessException, Time ); MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", @@ -213,7 +211,6 @@ public void testThrottlingSummaryLevelChanges() throws IllegalAccessException, T ); MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", @@ -281,7 +278,6 @@ public void testThrottlingLastMessageRepeast() throws IllegalAccessException, Ti ); MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", @@ -318,7 +314,6 @@ public void testThrottlingDebug() throws IllegalAccessException, TimeoutExceptio ); MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", @@ -381,18 +376,16 @@ public void testParseFatalError() throws IOException, IllegalAccessException { private static void executeLoggingTest(InputStream is, MockLogAppender mockAppender, Level level, String jobId) throws IOException { Logger cppMessageLogger = LogManager.getLogger(CppLogMessageHandler.class); - Loggers.addAppender(cppMessageLogger, mockAppender); - Level oldLevel = cppMessageLogger.getLevel(); Loggers.setLevel(cppMessageLogger, level); - try (CppLogMessageHandler handler = new CppLogMessageHandler(jobId, is)) { + try ( + var ignored = mockAppender.capturing(CppLogMessageHandler.class); + CppLogMessageHandler handler = new CppLogMessageHandler(jobId, is) + ) { handler.tailStream(); + mockAppender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(cppMessageLogger, mockAppender); Loggers.setLevel(cppMessageLogger, oldLevel); - mockAppender.stop(); } - - mockAppender.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java index 894d3af8d75b8..152f7c0ea9a73 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java @@ -8,9 +8,7 @@ package org.elasticsearch.xpack.searchablesnapshots; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.recovery.plan.ShardSnapshotsService; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -67,28 +65,24 @@ public void testSearchableSnapshotRelocationDoNotUseSnapshotBasedRecoveries() th final var newNode = internalCluster().startDataOnlyNode(); final var mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "Error fetching segments file", - ShardSnapshotsService.class.getCanonicalName(), - Level.WARN, - "Unable to fetch shard snapshot files for*" - ) - ); - - final var logger = LogManager.getLogger(ShardSnapshotsService.class); - Loggers.addAppender(logger, mockAppender); + try (var ignored = mockAppender.capturing(ShardSnapshotsService.class)) { + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "Error fetching segments file", + ShardSnapshotsService.class.getCanonicalName(), + Level.WARN, + "Unable to fetch shard snapshot files for*" + ) + ); - // Relocate the searchable snapshot shard to the new node - updateIndexSettings(Settings.builder().put("index.routing.allocation.require._name", newNode), restoredIndexName); + // Relocate the searchable snapshot shard to the new node + updateIndexSettings(Settings.builder().put("index.routing.allocation.require._name", newNode), restoredIndexName); - ensureGreen(restoredIndexName); + ensureGreen(restoredIndexName); - assertHitCount(prepareSearch(restoredIndexName).setTrackTotalHits(true), totalHits.value); + assertHitCount(prepareSearch(restoredIndexName).setTrackTotalHits(true), totalHits.value); - mockAppender.assertAllExpectationsMatched(); - Loggers.removeAppender(logger, mockAppender); - mockAppender.stop(); + mockAppender.assertAllExpectationsMatched(); + } } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 931d3b94669fb..0299eca2db7dd 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -287,13 +287,19 @@ public void populateData() throws Exception { "privileges": ["read"] } ], - "cluster": [ "monitor_enrich" ], + "cluster": [ "monitor_enrich", "manage_own_api_key" ], "remote_indices": [ { "names": ["employees"], "privileges": ["read"], "clusters": ["my_remote_cluster"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster"] + } ] }"""); assertOK(adminClient().performRequest(putRoleRequest)); @@ -324,12 +330,16 @@ public void testCrossClusterQuery() throws Exception { populateData(); // query remote cluster only - Response response = performRequestWithRemoteSearchUser(esqlRequest(""" + Request request = esqlRequest(""" FROM my_remote_cluster:employees | SORT emp_id ASC | LIMIT 2 - | KEEP emp_id, department""")); - assertOK(response); + | KEEP emp_id, department"""); + Response response = performRequestWithRemoteSearchUser(request); + assertRemoteOnlyResults(response); + + // same as above but authenticate with API key + response = performRequestWithRemoteSearchUserViaAPIKey(request); assertRemoteOnlyResults(response); // query remote and local cluster @@ -337,7 +347,6 @@ public void testCrossClusterQuery() throws Exception { FROM my_remote_cluster:employees,employees | SORT emp_id ASC | LIMIT 10""")); - assertOK(response); assertRemoteAndLocalResults(response); // query remote cluster only - but also include employees2 which the user does not have access to @@ -346,7 +355,6 @@ public void testCrossClusterQuery() throws Exception { | SORT emp_id ASC | LIMIT 2 | KEEP emp_id, department""")); - assertOK(response); assertRemoteOnlyResults(response); // same as above since the user only has access to employees // query remote and local cluster - but also include employees2 which the user does not have access to @@ -354,7 +362,6 @@ public void testCrossClusterQuery() throws Exception { FROM my_remote_cluster:employees,my_remote_cluster:employees2,employees,employees2 | SORT emp_id ASC | LIMIT 10""")); - assertOK(response); assertRemoteAndLocalResults(response); // same as above since the user only has access to employees // update role to include both employees and employees2 for the remote cluster @@ -379,7 +386,6 @@ public void testCrossClusterQuery() throws Exception { | SORT emp_id ASC | LIMIT 2 | KEEP emp_id, department""")); - assertOK(response); assertRemoteOnlyAgainst2IndexResults(response); } @@ -518,7 +524,6 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { // invalid remote with local index should return local results var q = "FROM invalid_remote:employees,employees | SORT emp_id DESC | LIMIT 10"; Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); - assertOK(response); assertLocalOnlyResults(response); // only calling an invalid remote should error @@ -562,7 +567,6 @@ public void testCrossClusterQueryWithOnlyRemotePrivs() throws Exception { | SORT emp_id ASC | LIMIT 2 | KEEP emp_id, department""")); - assertOK(response); assertRemoteOnlyResults(response); // without the remote index priv @@ -618,25 +622,32 @@ public void testCrossClusterQueryWithOnlyRemotePrivs() throws Exception { ); } - @AwaitsFix(bugUrl = "cross-clusters enrich doesn't work with RCS 2.0") + @SuppressWarnings("unchecked") public void testCrossClusterEnrich() throws Exception { configureRemoteCluster(); populateData(); // Query cluster { // ESQL with enrich is okay when user has access to enrich polices - Response response = performRequestWithRemoteSearchUser(esqlRequest(""" + Request request = esqlRequest(""" FROM my_remote_cluster:employees,employees | ENRICH countries | STATS size=count(*) by country | SORT size DESC - | LIMIT 2""")); - assertOK(response); - Map values = entityAsMap(response); + | LIMIT 2"""); - // ESQL with enrich is denied when user has no access to enrich policies - final var putLocalSearchRoleRequest = new Request("PUT", "/_security/role/local_search"); - putLocalSearchRoleRequest.setJsonEntity(""" + Response response = performRequestWithRemoteSearchUser(request); + assertWithEnrich(response); + + // same as above but authenticate with API key + response = performRequestWithRemoteSearchUserViaAPIKey(request); + assertWithEnrich(response); + + // Query cluster + final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + + // no remote_cluster privs should fail the request + putRoleRequest.setJsonEntity(""" { "indices": [ { @@ -644,7 +655,7 @@ public void testCrossClusterEnrich() throws Exception { "privileges": ["read"] } ], - "cluster": [ ], + "cluster": [ "monitor_enrich" ], "remote_indices": [ { "names": ["employees"], @@ -653,31 +664,84 @@ public void testCrossClusterEnrich() throws Exception { } ] }"""); - assertOK(adminClient().performRequest(putLocalSearchRoleRequest)); - final var putlocalSearchUserRequest = new Request("PUT", "/_security/user/local_search_user"); - putlocalSearchUserRequest.setJsonEntity(""" - { - "password": "x-pack-test-password", - "roles" : ["local_search"] - }"""); - assertOK(adminClient().performRequest(putlocalSearchUserRequest)); - for (String indices : List.of("my_remote_cluster:employees,employees", "my_remote_cluster:employees")) { - ResponseException error = expectThrows(ResponseException.class, () -> { - var q = "FROM " + indices + "| ENRICH countries | STATS size=count(*) by country | SORT size | LIMIT 2"; - performRequestWithLocalSearchUser(esqlRequest(q)); - }); - assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); - assertThat( - error.getMessage(), - containsString( - "action [cluster:monitor/xpack/enrich/esql/resolve_policy] towards remote cluster [my_remote_cluster]" - + " is unauthorized for user [local_search_user] with effective roles [local_search]" - ) - ); - } + assertOK(adminClient().performRequest(putRoleRequest)); + + ResponseException error = expectThrows(ResponseException.class, () -> { performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees,employees + | ENRICH countries + | STATS size=count(*) by country + | SORT size DESC + | LIMIT 2""")); }); + + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString( + "action [cluster:monitor/xpack/enrich/esql/resolve_policy] towards remote cluster is unauthorized for user " + + "[remote_search_user] with assigned roles [remote_search] authenticated by API key id [" + ) + ); + assertThat( + error.getMessage(), + containsString( + "this action is granted by the cluster privileges " + + "[cross_cluster_search,monitor_enrich,manage_enrich,monitor,manage,all]" + ) + ); } } + @SuppressWarnings("unchecked") + public void testCrossClusterEnrichWithOnlyRemotePrivs() throws Exception { + configureRemoteCluster(); + populateData(); + + // Query cluster + final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + + // local cross_cluster_search cluster priv is required for enrich + // ideally, remote only enrichment wouldn't need this local privilege, however remote only enrichment is not currently supported + putRoleRequest.setJsonEntity(""" + { + "indices": [{"names": [""], "privileges": ["read_cross_cluster"]}], + "cluster": ["cross_cluster_search"], + "remote_indices": [ + { + "names": ["employees"], + "privileges": ["read"], + "clusters": ["my_remote_cluster"] + } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleRequest)); + + // Query cluster + // ESQL with enrich is okay when user has access to enrich polices + Response response = performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees + | ENRICH countries + | STATS size=count(*) by country + | SORT size DESC + | LIMIT 2""")); + assertOK(response); + + Map responseAsMap = entityAsMap(response); + List columns = (List) responseAsMap.get("columns"); + List values = (List) responseAsMap.get("values"); + assertEquals(2, columns.size()); + assertEquals(2, values.size()); + List flatList = values.stream() + .flatMap(innerList -> innerList instanceof List ? ((List) innerList).stream() : Stream.empty()) + .collect(Collectors.toList()); + assertThat(flatList, containsInAnyOrder(1, 3, "usa", "germany")); + } + protected Request esqlRequest(String command) throws IOException { XContentBuilder body = JsonXContent.contentBuilder(); body.startObject(); @@ -717,15 +781,27 @@ private Response performRequestWithRemoteSearchUser(final Request request) throw return client().performRequest(request); } - private Response performRequestWithLocalSearchUser(final Request request) throws IOException { - request.setOptions( - RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", headerFromRandomAuthMethod("local_search_user", PASS)) + private Response performRequestWithRemoteSearchUserViaAPIKey(final Request request) throws IOException { + final Request createApiKeyRequest = new Request("POST", "_security/api_key"); + createApiKeyRequest.setJsonEntity(""" + { + "name": "myapikey" + }"""); + // ensure that the API key is created with the correct user + createApiKeyRequest.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_SEARCH_USER, PASS)) ); + Response response = client().performRequest(createApiKeyRequest); + assertOK(response); + final Map responseAsMap = responseAsMap(response); + final String encoded = (String) responseAsMap.get("encoded"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encoded)); return client().performRequest(request); } @SuppressWarnings("unchecked") private void assertRemoteOnlyResults(Response response) throws IOException { + assertOK(response); Map responseAsMap = entityAsMap(response); List columns = (List) responseAsMap.get("columns"); List values = (List) responseAsMap.get("values"); @@ -739,6 +815,7 @@ private void assertRemoteOnlyResults(Response response) throws IOException { @SuppressWarnings("unchecked") private void assertRemoteOnlyAgainst2IndexResults(Response response) throws IOException { + assertOK(response); Map responseAsMap = entityAsMap(response); List columns = (List) responseAsMap.get("columns"); List values = (List) responseAsMap.get("values"); @@ -752,6 +829,7 @@ private void assertRemoteOnlyAgainst2IndexResults(Response response) throws IOEx @SuppressWarnings("unchecked") private void assertLocalOnlyResults(Response response) throws IOException { + assertOK(response); Map responseAsMap = entityAsMap(response); List columns = (List) responseAsMap.get("columns"); List values = (List) responseAsMap.get("values"); @@ -766,6 +844,7 @@ private void assertLocalOnlyResults(Response response) throws IOException { @SuppressWarnings("unchecked") private void assertRemoteAndLocalResults(Response response) throws IOException { + assertOK(response); Map responseAsMap = entityAsMap(response); List columns = (List) responseAsMap.get("columns"); List values = (List) responseAsMap.get("values"); @@ -798,4 +877,18 @@ private void assertRemoteAndLocalResults(Response response) throws IOException { ) ); } + + private void assertWithEnrich(Response response) throws IOException { + assertOK(response); + Map responseAsMap = entityAsMap(response); + List columns = (List) responseAsMap.get("columns"); + List values = (List) responseAsMap.get("values"); + assertEquals(2, columns.size()); + assertEquals(2, values.size()); + List flatList = values.stream() + .flatMap(innerList -> innerList instanceof List ? ((List) innerList).stream() : Stream.empty()) + .collect(Collectors.toList()); + assertThat(flatList, containsInAnyOrder(2, 3, "usa", "canada")); + } + } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index d8e6bc21fb4ed..fc522b0213eeb 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -28,6 +29,8 @@ import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import org.junit.After; import org.junit.Before; @@ -79,7 +82,7 @@ public class ApiKeyRestIT extends SecurityOnTrialLicenseRestTestCase { private static final String END_USER = "end_user"; private static final SecureString END_USER_PASSWORD = new SecureString("end-user-password".toCharArray()); private static final String MANAGE_OWN_API_KEY_USER = "manage_own_api_key_user"; - private static final String REMOTE_INDICES_USER = "remote_indices_user"; + private static final String REMOTE_PERMISSIONS_USER = "remote_permissions_user"; private static final String MANAGE_API_KEY_USER = "manage_api_key_user"; private static final String MANAGE_SECURITY_USER = "manage_security_user"; @@ -653,7 +656,7 @@ public void testGetPrivilegesForApiKeyThrows400IfItHasAssignedPrivileges() throw } public void testRemoteIndicesSupportForApiKeys() throws IOException { - createUser(REMOTE_INDICES_USER, END_USER_PASSWORD, List.of("remote_indices_role")); + createUser(REMOTE_PERMISSIONS_USER, END_USER_PASSWORD, List.of("remote_indices_role")); createRole("remote_indices_role", Set.of("grant_api_key", "manage_own_api_key"), "remote"); final String remoteIndicesSection = """ "remote_indices": [ @@ -674,20 +677,26 @@ public void testRemoteIndicesSupportForApiKeys() throws IOException { assertOK(response); final Request grantApiKeyRequest = new Request("POST", "_security/api_key/grant"); - grantApiKeyRequest.setJsonEntity(Strings.format(""" - { - "grant_type":"password", - "username":"%s", - "password":"end-user-password", - "api_key":{ - "name":"k1", - "role_descriptors":{ - "r1":{ - %s - } - } - } - }""", includeRemoteIndices ? MANAGE_OWN_API_KEY_USER : REMOTE_INDICES_USER, includeRemoteIndices ? remoteIndicesSection : "")); + grantApiKeyRequest.setJsonEntity( + Strings.format( + """ + { + "grant_type":"password", + "username":"%s", + "password":"end-user-password", + "api_key":{ + "name":"k1", + "role_descriptors":{ + "r1":{ + %s + } + } + } + }""", + includeRemoteIndices ? MANAGE_OWN_API_KEY_USER : REMOTE_PERMISSIONS_USER, + includeRemoteIndices ? remoteIndicesSection : "" + ) + ); response = sendRequestWithRemoteIndices(grantApiKeyRequest, false == includeRemoteIndices); final String updatedRemoteIndicesSection = """ @@ -735,11 +744,150 @@ public void testRemoteIndicesSupportForApiKeys() throws IOException { assertThat(ObjectPath.createFromResponse(response).evaluate("noops"), contains(apiKeyId)); } - deleteUser(REMOTE_INDICES_USER); + deleteUser(REMOTE_PERMISSIONS_USER); deleteRole("remote_indices_role"); } + public void testRemoteClusterSupportForApiKeys() throws IOException { + createUser(REMOTE_PERMISSIONS_USER, END_USER_PASSWORD, List.of("remote_cluster_role")); + createRole("remote_cluster_role", Set.of("grant_api_key", "manage_own_api_key"), "remote"); + final String remoteClusterSectionTemplate = """ + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": [%s] + } + ]"""; + String remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"remote-a\", \"*\""); + final Request createApiKeyRequest = new Request("POST", "_security/api_key"); + final boolean includeRemoteCluster = randomBoolean(); + createApiKeyRequest.setJsonEntity(Strings.format(""" + {"name": "k1", "role_descriptors": {"r1": {%s}}}""", includeRemoteCluster ? remoteClusterSection : "")); + + // create API key as the admin user which does not have any remote_cluster limited_by permissions + Response response = sendRequestAsAdminUser(createApiKeyRequest); + String apiKeyId = ObjectPath.createFromResponse(response).evaluate("id"); + assertThat(apiKeyId, notNullValue()); + assertOK(response); + assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, false, null, null); + + // update that API key (as the admin user) + Request updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId); + remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"foo\", \"bar\""); + updateApiKeyRequest.setJsonEntity(Strings.format(""" + {"role_descriptors": {"r1": {%s}}}""", includeRemoteCluster ? remoteClusterSection : "")); + response = sendRequestAsAdminUser(updateApiKeyRequest); + assertThat(ObjectPath.createFromResponse(response).evaluate("updated"), equalTo(includeRemoteCluster)); + assertOK(response); + assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, false, null, new String[] { "foo", "bar" }); + + // create API key as the remote user which does remote_cluster limited_by permissions + response = sendRequestAsRemoteUser(createApiKeyRequest); + apiKeyId = ObjectPath.createFromResponse(response).evaluate("id"); + assertThat(apiKeyId, notNullValue()); + assertOK(response); + assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, true, null, null); + + // update that API key (as the remote user) + updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId); + remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"foo\", \"bar\""); + updateApiKeyRequest.setJsonEntity(Strings.format(""" + {"role_descriptors": {"r1": {%s}}}""", includeRemoteCluster ? remoteClusterSection : "")); + response = sendRequestAsRemoteUser(updateApiKeyRequest); + assertThat(ObjectPath.createFromResponse(response).evaluate("updated"), equalTo(includeRemoteCluster)); + assertOK(response); + assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, true, null, new String[] { "foo", "bar" }); + + // reset the clusters to the original value and setup grant API key requests + remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"remote-a\", \"*\""); + final Request grantApiKeyRequest = new Request("POST", "_security/api_key/grant"); + String getApiKeyRequestTemplate = """ + { + "grant_type":"password", + "username":"%s", + "password":"end-user-password", + "api_key":{ + "name":"k1", + "role_descriptors":{ + "r1":{ + %s + } + } + } + }"""; + + // grant API key as the remote user which does remote_cluster limited_by permissions + grantApiKeyRequest.setJsonEntity( + Strings.format(getApiKeyRequestTemplate, REMOTE_PERMISSIONS_USER, includeRemoteCluster ? remoteClusterSection : "") + ); + response = sendRequestAsRemoteUser(grantApiKeyRequest); + apiKeyId = ObjectPath.createFromResponse(response).evaluate("id"); + assertThat(apiKeyId, notNullValue()); + assertOK(response); + assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, true, null, null); + + // grant API key as a different user which does not have remote_cluster limited_by permissions + grantApiKeyRequest.setJsonEntity( + Strings.format(getApiKeyRequestTemplate, MANAGE_OWN_API_KEY_USER, includeRemoteCluster ? remoteClusterSection : "") + ); + response = sendRequestAsRemoteUser(grantApiKeyRequest); + apiKeyId = ObjectPath.createFromResponse(response).evaluate("id"); + assertThat(apiKeyId, notNullValue()); + assertOK(response); + assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, false, "manage_own_api_key_role", null); + + // clean up + deleteUser(REMOTE_PERMISSIONS_USER); + deleteRole("remote_cluster_role"); + } + + @SuppressWarnings("unchecked") + private void assertAPIKeyWithRemoteClusterPermissions( + String apiKeyId, + boolean hasRemoteClusterInBaseRole, + boolean hasRemoteClusterInLimitedByRole, + @Nullable String limitedByRoleName, + @Nullable String[] baseRoleClusters + ) throws IOException { + Request getAPIKeyRequest = new Request("GET", String.format("_security/api_key?id=%s&with_limited_by=true", apiKeyId)); + Response response = sendRequestAsAdminUser(getAPIKeyRequest); + Map> root = ObjectPath.createFromResponse(response).evaluate("api_keys.0"); + if (hasRemoteClusterInBaseRole) { + // explicit permissions + baseRoleClusters = baseRoleClusters == null ? new String[] { "remote-a", "*" } : baseRoleClusters; + Map> roleDescriptors = (Map>) root.get("role_descriptors"); + List>> remoteCluster = (List>>) roleDescriptors.get("r1") + .get("remote_cluster"); + assertThat(remoteCluster.get(0).get("privileges"), containsInAnyOrder("monitor_enrich")); + assertThat(remoteCluster.get(0).get("clusters"), containsInAnyOrder(baseRoleClusters)); + } else { + // no explicit permissions + Map> roleDescriptors = (Map>) root.get("role_descriptors"); + Map> baseRole = (Map>) roleDescriptors.get("r1"); + assertNotNull(baseRole); + assertNull(baseRole.get("remote_cluster")); + } + if (hasRemoteClusterInLimitedByRole) { + // has limited by permissions + limitedByRoleName = limitedByRoleName == null ? "remote_cluster_role" : limitedByRoleName; + List>> limitedBy = (List>>) root.get("limited_by"); + Map> limitedByRole = (Map>) limitedBy.get(0).get(limitedByRoleName); + assertNotNull(limitedByRole); + + List>> remoteCluster = (List>>) limitedByRole.get("remote_cluster"); + assertThat(remoteCluster.get(0).get("privileges"), containsInAnyOrder("monitor_enrich")); + assertThat(remoteCluster.get(0).get("clusters"), containsInAnyOrder("remote")); + } else { + // no limited by permissions + limitedByRoleName = limitedByRoleName == null ? "_es_test_root" : limitedByRoleName; + List>> limitedBy = (List>>) root.get("limited_by"); + Map> limitedByRole = (Map>) limitedBy.get(0).get(limitedByRoleName); + assertNotNull(limitedByRole); + assertNull(limitedByRole.get("remote_cluster")); + } + } + @SuppressWarnings("unchecked") public void testQueryCrossClusterApiKeysByType() throws IOException { final List apiKeyIds = new ArrayList<>(3); @@ -932,7 +1080,7 @@ public void testCreateCrossClusterApiKey() throws IOException { XContentTestUtils.convertToMap( new RoleDescriptor( "cross_cluster", - new String[] { "cross_cluster_search", "cross_cluster_replication" }, + new String[] { "cross_cluster_search", "monitor_enrich", "cross_cluster_replication" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("metrics") @@ -1284,7 +1432,7 @@ public void testUpdateCrossClusterApiKey() throws IOException { assertThat(updateResponse1.evaluate("updated"), is(true)); final RoleDescriptor updatedRoleDescriptor1 = new RoleDescriptor( "cross_cluster", - new String[] { "cross_cluster_search", "cross_cluster_replication" }, + new String[] { "cross_cluster_search", "monitor_enrich", "cross_cluster_replication" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("data") @@ -1357,7 +1505,7 @@ public void testUpdateCrossClusterApiKey() throws IOException { final ObjectPath fetchResponse3 = fetchCrossClusterApiKeyById(apiKeyId); final RoleDescriptor updatedRoleDescriptors2 = new RoleDescriptor( "cross_cluster", - new String[] { "cross_cluster_search" }, + new String[] { "cross_cluster_search", "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("blogs") @@ -1779,16 +1927,24 @@ private void assertBadCreateCrossClusterApiKeyRequest(String body, String expect private Response sendRequestWithRemoteIndices(final Request request, final boolean executeAsRemoteIndicesUser) throws IOException { if (executeAsRemoteIndicesUser) { - request.setOptions( - RequestOptions.DEFAULT.toBuilder() - .addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_INDICES_USER, END_USER_PASSWORD)) - ); - return client().performRequest(request); + return sendRequestAsRemoteUser(request); } else { - return adminClient().performRequest(request); + return sendRequestAsAdminUser(request); } } + private Response sendRequestAsRemoteUser(final Request request) throws IOException { + request.setOptions( + RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_PERMISSIONS_USER, END_USER_PASSWORD)) + ); + return client().performRequest(request); + } + + private Response sendRequestAsAdminUser(final Request request) throws IOException { + return adminClient().performRequest(request); + } + private void doTestAuthenticationWithApiKey(final String apiKeyName, final String apiKeyId, final String apiKeyEncoded) throws IOException { final var authenticateRequest = new Request("GET", "_security/_authenticate"); @@ -2003,10 +2159,11 @@ private void expectErrorFields(final String type, final String reason, final Map private record EncodedApiKey(String id, String encoded, String name) {} - private void createRole(String name, Collection clusterPrivileges, String... remoteIndicesClusterAliases) throws IOException { + private void createRole(String name, Collection localClusterPrivileges, String... remoteIndicesClusterAliases) + throws IOException { final RoleDescriptor role = new RoleDescriptor( name, - clusterPrivileges.toArray(String[]::new), + localClusterPrivileges.toArray(String[]::new), new RoleDescriptor.IndicesPrivileges[0], new RoleDescriptor.ApplicationResourcePrivileges[0], null, @@ -2015,6 +2172,12 @@ private void createRole(String name, Collection clusterPrivileges, Strin null, new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder(remoteIndicesClusterAliases).indices("*").privileges("read").build() }, + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + remoteIndicesClusterAliases + ) + ), null ); getSecurityClient().putRole(role); diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java index 745ea34e8eb89..9402d627063c4 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java @@ -120,6 +120,16 @@ public void setup() throws IOException { "privileges": ["read", "read_cross_cluster"], "clusters": ["my_remote_cluster_b"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster*"] + }, + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster_b"] + } ] }"""); assertOK(adminClient().performRequest(putRoleRequest)); @@ -181,7 +191,7 @@ public void testCrossClusterAccessHeadersSentSingleRemote() throws Exception { new RoleDescriptorsIntersection( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -193,6 +203,7 @@ public void testCrossClusterAccessHeadersSentSingleRemote() throws Exception { null, null, null, + null, null ) ) @@ -249,7 +260,7 @@ public void testCrossClusterAccessHeadersSentMultipleRemotes() throws Exception new RoleDescriptorsIntersection( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -261,6 +272,7 @@ public void testCrossClusterAccessHeadersSentMultipleRemotes() throws Exception null, null, null, + null, null ) ) @@ -276,7 +288,7 @@ public void testCrossClusterAccessHeadersSentMultipleRemotes() throws Exception Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -292,6 +304,7 @@ public void testCrossClusterAccessHeadersSentMultipleRemotes() throws Exception null, null, null, + null, null ) ) @@ -334,6 +347,12 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce "privileges": ["all"], "clusters": ["my_remote_cluster_b"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster_b"] + } ] } } @@ -389,7 +408,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + null, // intentionally null to test that cluster A does not have remote_cluster privs new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("index-a*").privileges("all").build() }, null, @@ -398,6 +417,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ), @@ -405,7 +425,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -417,6 +437,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ) @@ -434,7 +455,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("index-a*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices("index-b*").privileges("all").build() }, @@ -444,13 +465,14 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ), Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -466,6 +488,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ) @@ -496,6 +519,12 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti "privileges": ["all"], "clusters": ["my_remote_cluster*"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster*"] + } ] }, "role-b": { @@ -511,6 +540,12 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti "privileges": ["all"], "clusters": ["my_remote_cluster_b"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster_b"] + } ] } } @@ -536,7 +571,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("index-a*").privileges("all").build() }, null, @@ -545,6 +580,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ), @@ -552,7 +588,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -564,6 +600,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) @@ -575,7 +612,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -587,6 +624,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) @@ -619,6 +657,12 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti "privileges": ["all"], "clusters": ["my_remote_cluster*"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster*"] + } ] }, "role-b": { @@ -634,6 +678,12 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti "privileges": ["all"], "clusters": ["my_remote_cluster_b"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["my_remote_cluster_b"] + } ] } } @@ -653,7 +703,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("index-a*").privileges("all").build() }, null, @@ -662,6 +712,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ), @@ -669,7 +720,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -681,6 +732,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) @@ -692,7 +744,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti Set.of( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + new String[] { "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("index-a") @@ -704,6 +756,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java index d76902efc35b5..28da12b226a66 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java @@ -88,6 +88,7 @@ public void testRemoteIndexPrivileges() throws IOException { .privileges("read") .grantedFields("field") .build() }, + null, null ) ); @@ -161,6 +162,7 @@ public void testRemoteIndexPrivileges() throws IOException { .query("{\"match\":{\"field\":\"a\"}}") .grantedFields("field") .build() }, + null, null ) ); @@ -180,6 +182,12 @@ public void testGetUserPrivileges() throws IOException { "grant": ["field"] } } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["remote-a", "*"] + } ] }"""); final Response putRoleResponse1 = adminClient().performRequest(putRoleRequest); @@ -203,6 +211,12 @@ public void testGetUserPrivileges() throws IOException { "query": ["{\\"match\\":{\\"field\\":\\"a\\"}}"], "field_security": [{"grant": ["field"]}] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["remote-a", "*"] + } ] }"""))); @@ -222,6 +236,12 @@ public void testGetUserPrivileges() throws IOException { "privileges": ["read"], "clusters": ["remote-a", "*"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["remote-c"] + } ] }"""); final Response putRoleResponse2 = adminClient().performRequest(putRoleRequest2); @@ -249,6 +269,12 @@ public void testGetUserPrivileges() throws IOException { "allow_restricted_indices": false, "clusters": ["remote-a", "*"] } + ], + "remote_cluster": [ + { + "privileges": ["monitor_enrich"], + "clusters": ["remote-c"] + } ] }"""))); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index dc2d4ecc1dd74..351cf05b2096d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -2769,7 +2769,7 @@ private List randomRoleDescriptors() { new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null), randomValueOtherThanMany( rd -> RoleDescriptorRequestValidator.validate(rd) != null, - () -> RoleDescriptorTests.randomRoleDescriptor(false, true, false) + () -> RoleDescriptorTests.randomRoleDescriptor(false, true, false, true) ) ); case 2 -> null; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index f4a314c55acfc..433f6aac1840e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -595,7 +595,7 @@ public void testCreateCrossClusterApiKey() throws IOException { final RoleDescriptor expectedRoleDescriptor = new RoleDescriptor( "cross_cluster", - new String[] { "cross_cluster_search" }, + new String[] { "cross_cluster_search", "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("logs") @@ -647,7 +647,7 @@ public void testCreateCrossClusterApiKey() throws IOException { public void testUpdateCrossClusterApiKey() throws IOException { final RoleDescriptor originalRoleDescriptor = new RoleDescriptor( "cross_cluster", - new String[] { "cross_cluster_search" }, + new String[] { "cross_cluster_search", "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("logs") diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index fa9b53c5af935..6d76fac71e900 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -126,7 +126,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -136,6 +135,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; @@ -328,6 +328,17 @@ && hasRemoteIndices(request.getRoleDescriptors())) { ); return; } + if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { + // Creating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ROLE_REMOTE_CLUSTER_PRIVS + + "] or higher to support remote cluster privileges for API keys" + ) + ); + return; + } if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && request.getType() == ApiKey.Type.CROSS_CLUSTER) { listener.onFailure( @@ -349,7 +360,7 @@ && hasRemoteIndices(request.getRoleDescriptors())) { return; } - final Set filteredUserRoleDescriptors = maybeRemoveRemoteIndicesPrivileges( + final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( userRoleDescriptors, transportVersion, request.getId() @@ -367,6 +378,10 @@ private static boolean hasRemoteIndices(Collection roleDescripto return roleDescriptors != null && roleDescriptors.stream().anyMatch(RoleDescriptor::hasRemoteIndicesPrivileges); } + private static boolean hasRemoteCluster(Collection roleDescriptors) { + return roleDescriptors != null && roleDescriptors.stream().anyMatch(RoleDescriptor::hasRemoteClusterPermissions); + } + private static IllegalArgumentException validateWorkflowsRestrictionConstraints( TransportVersion transportVersion, List requestRoleDescriptors, @@ -497,6 +512,17 @@ public void updateApiKeys( ); return; } + if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { + // Updating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ROLE_REMOTE_CLUSTER_PRIVS + + "] or higher to support remote indices privileges for API keys" + ) + ); + return; + } final Exception workflowsValidationException = validateWorkflowsRestrictionConstraints( transportVersion, request.getRoleDescriptors(), @@ -508,7 +534,7 @@ public void updateApiKeys( } final String[] apiKeyIds = request.getIds().toArray(String[]::new); - final Set filteredUserRoleDescriptors = maybeRemoveRemoteIndicesPrivileges( + final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( userRoleDescriptors, transportVersion, apiKeyIds @@ -517,6 +543,7 @@ public void updateApiKeys( if (logger.isDebugEnabled()) { logger.debug("Updating [{}] API keys", buildDelimitedStringWithLimit(10, apiKeyIds)); } + findVersionedApiKeyDocsForSubject( authentication, apiKeyIds, @@ -613,22 +640,23 @@ void validateForUpdate( } /** - * This method removes remote indices privileges from the given role descriptors - * when we are in a mixed cluster in which some of the nodes do not support remote indices. + * This method removes remote indices and cluster privileges from the given role descriptors + * when we are in a mixed cluster in which some of the nodes do not support remote indices/clusters. * Storing these roles would cause parsing issues on old nodes * (i.e. nodes running with transport version before * {@link org.elasticsearch.transport.RemoteClusterPortSettings#TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY}). */ - static Set maybeRemoveRemoteIndicesPrivileges( + static Set maybeRemoveRemotePrivileges( final Set userRoleDescriptors, final TransportVersion transportVersion, final String... apiKeyIds ) { - if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) { - final Set affectedRoles = new TreeSet<>(); + if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) + || transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS)) { + final Set affectedRoles = new HashSet<>(); final Set result = userRoleDescriptors.stream().map(roleDescriptor -> { - if (roleDescriptor.hasRemoteIndicesPrivileges()) { - affectedRoles.add(roleDescriptor.getName()); + if (roleDescriptor.hasRemoteIndicesPrivileges() || roleDescriptor.hasRemoteClusterPermissions()) { + affectedRoles.add(roleDescriptor); return new RoleDescriptor( roleDescriptor.getName(), roleDescriptor.getClusterPrivileges(), @@ -638,7 +666,13 @@ static Set maybeRemoveRemoteIndicesPrivileges( roleDescriptor.getRunAs(), roleDescriptor.getMetadata(), roleDescriptor.getTransientMetadata(), - null, + roleDescriptor.hasRemoteIndicesPrivileges() + && transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) + ? null + : roleDescriptor.getRemoteIndicesPrivileges(), + roleDescriptor.hasRemoteClusterPermissions() && transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) + ? null + : roleDescriptor.getRemoteClusterPermissions(), roleDescriptor.getRestriction() ); } @@ -646,17 +680,33 @@ static Set maybeRemoveRemoteIndicesPrivileges( }).collect(Collectors.toSet()); if (false == affectedRoles.isEmpty()) { - logger.info( - "removed remote indices privileges from role(s) {} for API key(s) [{}]", - affectedRoles, - buildDelimitedStringWithLimit(10, apiKeyIds) - ); - HeaderWarning.addWarning( - "Removed API key's remote indices privileges from role(s) " - + affectedRoles - + ". Remote indices are not supported by all nodes in the cluster. " - + "Use the update API Key API to re-assign remote indices to the API key(s), after the cluster upgrade is complete." - ); + List affectedRolesNames = affectedRoles.stream().map(RoleDescriptor::getName).sorted().collect(Collectors.toList()); + if (affectedRoles.stream().anyMatch(RoleDescriptor::hasRemoteIndicesPrivileges) + && transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) { + logger.info( + "removed remote indices privileges from role(s) {} for API key(s) [{}]", + affectedRolesNames, + buildDelimitedStringWithLimit(10, apiKeyIds) + ); + HeaderWarning.addWarning( + "Removed API key's remote indices privileges from role(s) " + + affectedRolesNames + + ". Remote indices are not supported by all nodes in the cluster. " + ); + } + if (affectedRoles.stream().anyMatch(RoleDescriptor::hasRemoteClusterPermissions) + && transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS)) { + logger.info( + "removed remote cluster privileges from role(s) {} for API key(s) [{}]", + affectedRolesNames, + buildDelimitedStringWithLimit(10, apiKeyIds) + ); + HeaderWarning.addWarning( + "Removed API key's remote cluster privileges from role(s) " + + affectedRolesNames + + ". Remote cluster privileges are not supported by all nodes in the cluster." + ); + } } return result; } @@ -767,6 +817,7 @@ static XContentBuilder maybeBuildUpdatedDocument( addApiKeyHash(builder, currentApiKeyDoc.hash.toCharArray()); final List keyRoles = request.getRoleDescriptors(); + if (keyRoles != null) { logger.trace(() -> format("Building API key doc with updated role descriptors [%s]", keyRoles)); addRoleDescriptors(builder, keyRoles); @@ -862,12 +913,24 @@ private static boolean isNoop( } final List newRoleDescriptors = request.getRoleDescriptors(); + if (newRoleDescriptors != null) { final List currentRoleDescriptors = parseRoleDescriptorsBytes(apiKeyId, apiKeyDoc.roleDescriptorsBytes, false); if (false == (newRoleDescriptors.size() == currentRoleDescriptors.size() && Set.copyOf(newRoleDescriptors).containsAll(currentRoleDescriptors))) { return false; } + + if (newRoleDescriptors.size() == currentRoleDescriptors.size()) { + for (int i = 0; i < currentRoleDescriptors.size(); i++) { + // if remote cluster permissions are not equal, then it is not a noop + if (currentRoleDescriptors.get(i) + .getRemoteClusterPermissions() + .equals(newRoleDescriptors.get(i).getRemoteClusterPermissions()) == false) { + return false; + } + } + } } assert userRoleDescriptors != null; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationDenialMessages.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationDenialMessages.java index ae3a09af4751d..d553c0794ca9c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationDenialMessages.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationDenialMessages.java @@ -116,11 +116,12 @@ public String remoteActionDenied( String action, String clusterAlias ) { - assert isIndexAction(action); String userText = successfulAuthenticationDescription(authentication, authorizationInfo); String remoteClusterText = remoteClusterText(clusterAlias); - return actionIsUnauthorizedMessage(action, remoteClusterText, userText) - + " because no remote indices privileges apply for the target cluster"; + String message = isIndexAction(action) + ? " because no remote indices privileges apply for the target cluster" + : " because no remote cluster privileges apply for the target cluster"; + return actionIsUnauthorizedMessage(action, remoteClusterText, userText) + message; } protected Collection findClusterPrivilegesThatGrant( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 0c28ea1e37354..718602b758072 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchRoleRestrictionException; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DelegatingActionListener; import org.elasticsearch.action.DocWriteRequest; @@ -218,6 +219,7 @@ public void retrieveUserPrivileges( public void getRoleDescriptorsIntersectionForRemoteCluster( final String remoteClusterAlias, + final TransportVersion remoteClusterVersion, final Subject subject, final ActionListener listener ) { @@ -242,6 +244,7 @@ public void getRoleDescriptorsIntersectionForRemoteCluster( listener.delegateFailure( (delegatedLister, resolvedAuthzInfo) -> authorizationEngine.getRoleDescriptorsIntersectionForRemoteCluster( remoteClusterAlias, + remoteClusterVersion, resolvedAuthzInfo, wrapPreservingContext(delegatedLister, threadContext) ) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 320dd4c6f8e09..1c773a6e3963f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.ElasticsearchRoleRestrictionException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.AliasesRequest; @@ -726,12 +727,13 @@ public void getUserPrivileges(AuthorizationInfo authorizationInfo, ActionListene @Override public void getRoleDescriptorsIntersectionForRemoteCluster( final String remoteClusterAlias, + final TransportVersion remoteClusterVersion, final AuthorizationInfo authorizationInfo, final ActionListener listener ) { if (authorizationInfo instanceof RBACAuthorizationInfo rbacAuthzInfo) { final Role role = rbacAuthzInfo.getRole(); - listener.onResponse(role.getRoleDescriptorsIntersectionForRemoteCluster(remoteClusterAlias)); + listener.onResponse(role.getRoleDescriptorsIntersectionForRemoteCluster(remoteClusterAlias, remoteClusterVersion)); } else { listener.onFailure( new IllegalArgumentException("unsupported authorization info: " + authorizationInfo.getClass().getSimpleName()) @@ -798,7 +800,15 @@ static GetUserPrivilegesResponse buildUserPrivilegesResponseObject(Role userRole runAs = runAsPrivilege.name(); } - return new GetUserPrivilegesResponse(cluster, conditionalCluster, indices, application, runAs, remoteIndices); + return new GetUserPrivilegesResponse( + cluster, + conditionalCluster, + indices, + application, + runAs, + remoteIndices, + userRole.remoteCluster() + ); } private static GetUserPrivilegesResponse.Indices toIndices(final IndicesPermission.Group group) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 8a10981e51ebb..4007a1e5b2ec8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -33,6 +33,8 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; @@ -429,6 +431,7 @@ public static void buildRoleFromDescriptors( final Map>, Set> applicationPrivilegesMap = new HashMap<>(); final Set workflows = new HashSet<>(); final List roleNames = new ArrayList<>(roleDescriptors.size()); + final RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); for (RoleDescriptor descriptor : roleDescriptors) { roleNames.add(descriptor.getName()); if (descriptor.getClusterPrivileges() != null) { @@ -448,6 +451,12 @@ public static void buildRoleFromDescriptors( groupIndexPrivilegesByCluster(descriptor.getRemoteIndicesPrivileges(), remoteIndicesPrivilegesByCluster); } + if (descriptor.hasRemoteClusterPermissions()) { + for (RemoteClusterPermissionGroup groups : descriptor.getRemoteClusterPermissions().groups()) { + remoteClusterPermissions.addGroup(groups); + } + } + for (RoleDescriptor.ApplicationResourcePrivileges appPrivilege : descriptor.getApplicationPrivileges()) { Tuple> key = new Tuple<>(appPrivilege.getApplication(), newHashSet(appPrivilege.getResources())); applicationPrivilegesMap.compute(key, (k, v) -> { @@ -490,7 +499,7 @@ public static void buildRoleFromDescriptors( remoteIndicesPrivilegesByCluster.forEach((clusterAliasKey, remoteIndicesPrivilegesForCluster) -> { remoteIndicesPrivilegesForCluster.forEach( - (privilege) -> builder.addRemoteGroup( + (privilege) -> builder.addRemoteIndicesGroup( clusterAliasKey, fieldPermissionsCache.getFieldPermissions( new FieldPermissionsDefinition(privilege.getGrantedFields(), privilege.getDeniedFields()) @@ -502,6 +511,13 @@ public static void buildRoleFromDescriptors( ) ); }); + + if (remoteClusterPermissions.hasPrivileges()) { + builder.addRemoteClusterPermissions(remoteClusterPermissions); + } else { + builder.addRemoteClusterPermissions(RemoteClusterPermissions.NONE); + } + if (false == workflows.isEmpty()) { builder.workflows(workflows); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java index 368ec3825c0c2..71a78c1627946 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -154,6 +154,7 @@ public Map usageStats() { usageStats.put("fls", fls); usageStats.put("dls", dls); usageStats.put("remote_indices", localPermissions.values().stream().filter(RoleDescriptor::hasRemoteIndicesPrivileges).count()); + usageStats.put("remote_cluster", localPermissions.values().stream().filter(RoleDescriptor::hasRemoteClusterPermissions).count()); return usageStats; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index 41269ea049d66..69ec275946327 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -61,6 +61,7 @@ import java.util.function.BiConsumer; import java.util.function.Supplier; +import static org.elasticsearch.TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; @@ -264,9 +265,16 @@ public void putRole(final PutRoleRequest request, final RoleDescriptor role, fin + "] or higher to support remote indices privileges" ) ); - } else { - innerPutRole(request, role, listener); - } + } else if (role.hasRemoteClusterPermissions() + && clusterService.state().getMinTransportVersion().before(ROLE_REMOTE_CLUSTER_PRIVS)) { + listener.onFailure( + new IllegalStateException( + "all nodes must have version [" + ROLE_REMOTE_CLUSTER_PRIVS + "] or higher to support remote cluster privileges" + ) + ); + } else { + innerPutRole(request, role, listener); + } } // pkg-private for testing @@ -369,6 +377,16 @@ public void usageStats(ActionListener> listener) { .setTrackTotalHits(true) .setSize(0) ) + .add( + client.prepareSearch(SECURITY_MAIN_ALIAS) + .setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .filter(existsQuery("remote_cluster")) + ) + .setTrackTotalHits(true) + .setSize(0) + ) .request(), new DelegatingActionListener>(listener) { @Override @@ -395,6 +413,11 @@ public void onResponse(MultiSearchResponse items) { } else { usageStats.put("remote_indices", responses[3].getResponse().getHits().getTotalHits().value); } + if (responses[4].isFailure()) { + usageStats.put("remote_cluster", 0); + } else { + usageStats.put("remote_cluster", responses[4].getResponse().getHits().getTotalHits().value); + } delegate.onResponse(usageStats); } }, @@ -511,6 +534,7 @@ static RoleDescriptor transformRole(String id, BytesReference sourceBytes, Logge roleDescriptor.getMetadata(), transientMap, roleDescriptor.getRemoteIndicesPrivileges(), + roleDescriptor.getRemoteClusterPermissions(), roleDescriptor.getRestriction() ); } else { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java index ad4d0d4434622..ae7df99a55302 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java @@ -144,8 +144,8 @@ public void resolveCrossClusterAccessRoleReference( ) { final Set roleDescriptors = crossClusterAccessRoleReference.getRoleDescriptorsBytes().toRoleDescriptors(); for (RoleDescriptor roleDescriptor : roleDescriptors) { - if (roleDescriptor.hasPrivilegesOtherThanIndex()) { - final String message = "Role descriptor for cross cluster access can only contain index privileges " + if (roleDescriptor.hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster()) { + final String message = "Role descriptor for cross cluster access can only contain index and cluster privileges " + "but other privileges found for subject [" + crossClusterAccessRoleReference.getUserPrincipal() + "]"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java index d89e3e2279034..96e8ffd74a314 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesAction.java @@ -96,7 +96,9 @@ public RestResponse buildResponse(GetUserPrivilegesResponse response, XContentBu if (response.hasRemoteIndicesPrivileges()) { builder.field(RoleDescriptor.Fields.REMOTE_INDICES.getPreferredName(), response.getRemoteIndexPrivileges()); } - + if (response.hasRemoteClusterPrivileges()) { + builder.array(RoleDescriptor.Fields.REMOTE_CLUSTER.getPreferredName(), response.getRemoteClusterPermissions()); + } builder.endObject(); return new RestResponse(RestStatus.OK, builder); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 3e46a370c6e92..ed198834d24f1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -304,6 +304,23 @@ private XContentBuilder getMainIndexMappings() { } builder.endObject(); + builder.startObject("remote_cluster"); + { + builder.field("type", "object"); + builder.startObject("properties"); + { + builder.startObject("clusters"); + builder.field("type", "keyword"); + builder.endObject(); + + builder.startObject("privileges"); + builder.field("type", "keyword"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + builder.startObject("applications"); { builder.field("type", "object"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 1a68887646731..d8914d4646fd3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -378,6 +378,7 @@ private void sendWithCrossClusterAccessHeaders( assert false == action.startsWith("internal:") : "internal action must be sent with system user"; authzService.getRoleDescriptorsIntersectionForRemoteCluster( remoteClusterAlias, + connection.getTransportVersion(), authentication.getEffectiveSubject(), ActionListener.wrap(roleDescriptorsIntersection -> { logger.trace( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index f575bb6adc50e..25e03c6d87e34 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -783,14 +783,12 @@ public void testSecurityRestHandlerInterceptorCanBeInstalled() throws IllegalAcc final Logger amLogger = LogManager.getLogger(ActionModule.class); Loggers.setLevel(amLogger, Level.DEBUG); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(amLogger, appender); - appender.start(); Settings settings = Settings.builder().put("xpack.security.enabled", false).put("path.home", createTempDir()).build(); SettingsModule settingsModule = new SettingsModule(Settings.EMPTY); ThreadPool threadPool = new TestThreadPool(getTestName()); - try { + try (var ignored = appender.capturing(ActionModule.class)) { UsageService usageService = new UsageService(); Security security = new Security(settings); @@ -829,8 +827,6 @@ public void testSecurityRestHandlerInterceptorCanBeInstalled() throws IllegalAcc appender.assertAllExpectationsMatched(); } finally { threadPool.shutdown(); - appender.stop(); - Loggers.removeAppender(amLogger, appender); } } @@ -839,8 +835,6 @@ public void testSecurityStatusMessageInLog() throws Exception { boolean securityEnabled = true; Loggers.setLevel(mockLogger, Level.INFO); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(mockLogger, appender); - appender.start(); Settings.Builder settings = Settings.builder().put("path.home", createTempDir()); if (randomBoolean()) { @@ -849,7 +843,7 @@ public void testSecurityStatusMessageInLog() throws Exception { settings.put("xpack.security.enabled", securityEnabled); } - try { + try (var ignored = appender.capturing(Security.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "message", @@ -860,9 +854,6 @@ public void testSecurityStatusMessageInLog() throws Exception { ); createComponents(settings.build()); appender.assertAllExpectationsMatched(); - } finally { - appender.stop(); - Loggers.removeAppender(mockLogger, appender); } } @@ -1164,16 +1155,10 @@ private MockLogAppender.SeenEventExpectation logEventForNonCompliantStoredHash(S private void expectLogs(Class clazz, List expected, Runnable runnable) throws IllegalAccessException { final MockLogAppender mockAppender = new MockLogAppender(); - final Logger logger = LogManager.getLogger(clazz); - mockAppender.start(); - try { - Loggers.addAppender(logger, mockAppender); + try (var ignored = mockAppender.capturing(clazz)) { expected.forEach(mockAppender::addExpectation); runnable.run(); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockAppender); - mockAppender.stop(); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java index 9c434ada7bb6c..1a6ed48efe1d0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java @@ -7,9 +7,6 @@ package org.elasticsearch.xpack.security.audit; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.license.License; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.rest.RestRequest; @@ -63,59 +60,56 @@ public void init() throws Exception { public void testLogWhenLicenseProhibitsAuditing() throws Exception { MockLogAppender mockLogAppender = new MockLogAppender(); - mockLogAppender.start(); - Logger auditTrailServiceLogger = LogManager.getLogger(AuditTrailService.class); - Loggers.addAppender(auditTrailServiceLogger, mockLogAppender); - when(licenseState.getOperationMode()).thenReturn(randomFrom(License.OperationMode.values())); - if (isAuditingAllowed) { + try (var ignored = mockLogAppender.capturing(AuditTrailService.class)) { + when(licenseState.getOperationMode()).thenReturn(randomFrom(License.OperationMode.values())); + if (isAuditingAllowed) { + mockLogAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "audit disabled because of license", + AuditTrailService.class.getName(), + Level.WARN, + "Auditing logging is DISABLED because the currently active license [" + + licenseState.getOperationMode() + + "] does not permit it" + ) + ); + } else { + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "audit disabled because of license", + AuditTrailService.class.getName(), + Level.WARN, + "Auditing logging is DISABLED because the currently active license [" + + licenseState.getOperationMode() + + "] does not permit it" + ) + ); + } + for (int i = 1; i <= randomIntBetween(2, 6); i++) { + service.get(); + } + + mockLogAppender.assertAllExpectationsMatched(); + } + } + + public void testNoLogRecentlyWhenLicenseProhibitsAuditing() throws Exception { + MockLogAppender mockLogAppender = new MockLogAppender(); + try (var ignored = mockLogAppender.capturing(AuditTrailService.class)) { + service.nextLogInstantAtomic.set(randomFrom(Instant.now().minus(Duration.ofMinutes(5)), Instant.now())); mockLogAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "audit disabled because of license", AuditTrailService.class.getName(), Level.WARN, - "Auditing logging is DISABLED because the currently active license [" - + licenseState.getOperationMode() - + "] does not permit it" + "Security auditing is DISABLED because the currently active license [*] does not permit it" ) ); - } else { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "audit disabled because of license", - AuditTrailService.class.getName(), - Level.WARN, - "Auditing logging is DISABLED because the currently active license [" - + licenseState.getOperationMode() - + "] does not permit it" - ) - ); - } - for (int i = 1; i <= randomIntBetween(2, 6); i++) { - service.get(); - } - mockLogAppender.assertAllExpectationsMatched(); - Loggers.removeAppender(auditTrailServiceLogger, mockLogAppender); - } - - public void testNoLogRecentlyWhenLicenseProhibitsAuditing() throws Exception { - MockLogAppender mockLogAppender = new MockLogAppender(); - mockLogAppender.start(); - Logger auditTrailServiceLogger = LogManager.getLogger(AuditTrailService.class); - Loggers.addAppender(auditTrailServiceLogger, mockLogAppender); - service.nextLogInstantAtomic.set(randomFrom(Instant.now().minus(Duration.ofMinutes(5)), Instant.now())); - mockLogAppender.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "audit disabled because of license", - AuditTrailService.class.getName(), - Level.WARN, - "Security auditing is DISABLED because the currently active license [*] does not permit it" - ) - ); - for (int i = 1; i <= randomIntBetween(2, 6); i++) { - service.get(); + for (int i = 1; i <= randomIntBetween(2, 6); i++) { + service.get(); + } + mockLogAppender.assertAllExpectationsMatched(); } - mockLogAppender.assertAllExpectationsMatched(); - Loggers.removeAppender(auditTrailServiceLogger, mockLogAppender); } public void testAuthenticationFailed() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index 2438e625259d1..fb5a49428887f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -3247,7 +3247,7 @@ private CrossClusterApiKeyAccessWithSerialization randomCrossClusterApiKeyAccess } ] }""", - "[{\"cluster\":[\"cross_cluster_search\"]," + "[{\"cluster\":[\"cross_cluster_search\",\"monitor_enrich\"]," + "\"indices\":[{\"names\":[\"logs*\"]," + "\"privileges\":[\"read\",\"read_cross_cluster\",\"view_index_metadata\"]}]," + "\"applications\":[],\"run_as\":[]}]" @@ -3300,7 +3300,7 @@ private CrossClusterApiKeyAccessWithSerialization randomCrossClusterApiKeyAccess } ] }""", - "[{\"cluster\":[\"cross_cluster_search\",\"cross_cluster_replication\"]," + "[{\"cluster\":[\"cross_cluster_search\",\"monitor_enrich\",\"cross_cluster_replication\"]," + "\"indices\":[{\"names\":[\"logs*\"],\"privileges\":[\"read\",\"read_cross_cluster\",\"view_index_metadata\"]," + "\"field_security\":{\"grant\":[\"*\"],\"except\":[\"private\"]},\"query\":\"{\\\"term\\\":{\\\"tag\\\":42}}\"}," + "{\"names\":[\"archive\"],\"privileges\":[\"cross_cluster_replication\",\"cross_cluster_replication_internal\"]," diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index b3ec3ef117c3e..107f7c0632ea7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -105,6 +105,8 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; import org.elasticsearch.xpack.core.security.authz.RoleRestrictionTests; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; @@ -150,6 +152,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; @@ -1038,7 +1041,7 @@ private SearchHit searchHitForCrossClusterApiKey(int crossClusterAccessLevel) { final String roleDescriptor = switch (crossClusterAccessLevel) { case 0 -> """ { - "cluster": ["cross_cluster_search"] + "cluster": ["cross_cluster_search", "monitor_enrich"] }"""; case 1 -> """ { @@ -1046,7 +1049,7 @@ private SearchHit searchHitForCrossClusterApiKey(int crossClusterAccessLevel) { }"""; default -> """ { - "cluster": ["cross_cluster_search", "cross_cluster_replication"] + "cluster": ["cross_cluster_search", "monitor_enrich", "cross_cluster_replication"] }"""; }; final int docId = randomIntBetween(0, Integer.MAX_VALUE); @@ -1510,10 +1513,8 @@ public void testApiKeyAuthCacheWillTraceLogOnEvictionDueToCacheSize() throws Ill final Logger logger = LogManager.getLogger(ApiKeyService.class); Loggers.setLevel(logger, Level.TRACE); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(logger, appender); - appender.start(); - try { + try (var ignored = appender.capturing(ApiKeyService.class)) { appender.addExpectation( new MockLogAppender.PatternSeenEventExpectation( "evict", @@ -1556,9 +1557,7 @@ public void testApiKeyAuthCacheWillTraceLogOnEvictionDueToCacheSize() throws Ill apiKeyAuthCache.invalidateAll(); appender.assertAllExpectationsMatched(); } finally { - appender.stop(); Loggers.setLevel(logger, Level.INFO); - Loggers.removeAppender(logger, appender); } } @@ -1575,10 +1574,8 @@ public void testApiKeyCacheWillNotTraceLogOnEvictionDueToCacheTtl() throws Illeg final Logger logger = LogManager.getLogger(ApiKeyService.class); Loggers.setLevel(logger, Level.TRACE); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(logger, appender); - appender.start(); - try { + try (var ignored = appender.capturing(ApiKeyService.class)) { appender.addExpectation( new MockLogAppender.UnseenEventExpectation( "evict", @@ -1596,9 +1593,7 @@ public void testApiKeyCacheWillNotTraceLogOnEvictionDueToCacheTtl() throws Illeg assertEquals(1, apiKeyAuthCache.count()); appender.assertAllExpectationsMatched(); } finally { - appender.stop(); Loggers.setLevel(logger, Level.INFO); - Loggers.removeAppender(logger, appender); } } @@ -1612,10 +1607,8 @@ public void testApiKeyAuthCacheWillLogWarningOnPossibleThrashing() throws Except final Logger logger = LogManager.getLogger(ApiKeyService.class); Loggers.setLevel(logger, Level.TRACE); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(logger, appender); - appender.start(); - try { + try (var ignored = appender.capturing(ApiKeyService.class)) { // Prepare the warning logging to trigger service.getEvictionCounter().add(4500); final long thrashingCheckIntervalInSeconds = 300L; @@ -1670,9 +1663,7 @@ public void testApiKeyAuthCacheWillLogWarningOnPossibleThrashing() throws Except apiKeyAuthCache.put(randomAlphaOfLength(23), new ListenableFuture<>()); appender.assertAllExpectationsMatched(); } finally { - appender.stop(); Loggers.setLevel(logger, Level.INFO); - Loggers.removeAppender(logger, appender); } } @@ -2591,7 +2582,7 @@ public void testGetApiKeyMetadata() throws IOException { public void testMaybeRemoveRemoteIndicesPrivilegesWithUnsupportedVersion() { final String apiKeyId = randomAlphaOfLengthBetween(5, 8); final Set userRoleDescriptors = Set.copyOf( - randomList(2, 5, () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), randomBoolean())) + randomList(2, 5, () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), randomBoolean(), false)) ); // Selecting random unsupported version. @@ -2601,11 +2592,7 @@ public void testMaybeRemoveRemoteIndicesPrivilegesWithUnsupportedVersion() { TransportVersionUtils.getPreviousVersion(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) ); - final Set result = ApiKeyService.maybeRemoveRemoteIndicesPrivileges( - userRoleDescriptors, - minTransportVersion, - apiKeyId - ); + final Set result = ApiKeyService.maybeRemoveRemotePrivileges(userRoleDescriptors, minTransportVersion, apiKeyId); assertThat(result.stream().anyMatch(RoleDescriptor::hasRemoteIndicesPrivileges), equalTo(false)); assertThat(result.size(), equalTo(userRoleDescriptors.size())); @@ -2621,30 +2608,62 @@ public void testMaybeRemoveRemoteIndicesPrivilegesWithUnsupportedVersion() { "Removed API key's remote indices privileges from role(s) " + userRoleNamesWithRemoteIndicesPrivileges + ". Remote indices are not supported by all nodes in the cluster. " - + "Use the update API Key API to re-assign remote indices to the API key(s), after the cluster upgrade is complete." ); } } - public void testMaybeRemoveRemoteIndicesPrivilegesWithSupportedVersion() { + public void testMaybeRemoveRemoteClusterPrivilegesWithUnsupportedVersion() { final String apiKeyId = randomAlphaOfLengthBetween(5, 8); final Set userRoleDescriptors = Set.copyOf( - randomList(1, 3, ApiKeyServiceTests::randomRoleDescriptorWithRemoteIndexPrivileges) + randomList( + 2, + 5, + () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()) + ) ); - // Selecting random supported version. + // Selecting random unsupported version. final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( random(), TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY, - TransportVersion.current() + TransportVersionUtils.getPreviousVersion(ROLE_REMOTE_CLUSTER_PRIVS) + ); + + final Set result = ApiKeyService.maybeRemoveRemotePrivileges(userRoleDescriptors, minTransportVersion, apiKeyId); + assertThat(result.stream().anyMatch(RoleDescriptor::hasRemoteClusterPermissions), equalTo(false)); + assertThat(result.size(), equalTo(userRoleDescriptors.size())); + + // Roles for which warning headers are added. + final List userRoleNamesWithRemoteClusterPrivileges = userRoleDescriptors.stream() + .filter(RoleDescriptor::hasRemoteClusterPermissions) + .map(RoleDescriptor::getName) + .sorted() + .toList(); + + if (false == userRoleNamesWithRemoteClusterPrivileges.isEmpty()) { + assertWarnings( + "Removed API key's remote cluster privileges from role(s) " + + userRoleNamesWithRemoteClusterPrivileges + + ". Remote cluster privileges are not supported by all nodes in the cluster." + ); + } + } + + public void testMaybeRemoveRemotePrivilegesWithSupportedVersion() { + final String apiKeyId = randomAlphaOfLengthBetween(5, 8); + final Set userRoleDescriptors = Set.copyOf( + randomList(1, 3, ApiKeyServiceTests::randomRoleDescriptorWithRemotePrivileges) ); - final Set result = ApiKeyService.maybeRemoveRemoteIndicesPrivileges( - userRoleDescriptors, - minTransportVersion, - apiKeyId + // Selecting random supported version. + final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( + random(), + ROLE_REMOTE_CLUSTER_PRIVS, + TransportVersion.current() ); + final Set result = ApiKeyService.maybeRemoveRemotePrivileges(userRoleDescriptors, minTransportVersion, apiKeyId); + // User roles should be unchanged. assertThat(result, equalTo(userRoleDescriptors)); } @@ -2912,7 +2931,7 @@ public void testValidateOwnerUserRoleDescriptorsWithWorkflowsRestriction() { final List requestRoleDescriptors = randomList( 0, 1, - () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), false, randomBoolean()) + () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), false, randomBoolean(), false) ); final AbstractCreateApiKeyRequest createRequest = mock(AbstractCreateApiKeyRequest.class); @@ -2936,7 +2955,7 @@ public void testValidateOwnerUserRoleDescriptorsWithWorkflowsRestriction() { assertThat(e2.getMessage(), containsString("owner user role descriptors must not include restriction")); } - private static RoleDescriptor randomRoleDescriptorWithRemoteIndexPrivileges() { + private static RoleDescriptor randomRoleDescriptorWithRemotePrivileges() { return new RoleDescriptor( randomAlphaOfLengthBetween(3, 90), randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), @@ -2947,6 +2966,9 @@ private static RoleDescriptor randomRoleDescriptorWithRemoteIndexPrivileges() { RoleDescriptorTests.randomRoleDescriptorMetadata(randomBoolean()), Map.of(), RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 3), + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" }) + ), RoleRestrictionTests.randomWorkflowsRestriction(1, 3) ); } @@ -2962,6 +2984,7 @@ private static RoleDescriptor randomRoleDescriptorWithWorkflowsRestriction() { RoleDescriptorTests.randomRoleDescriptorMetadata(randomBoolean()), Map.of(), null, + null, RoleRestrictionTests.randomWorkflowsRestriction(1, 3) ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 57b656dc0ddde..68e703fb26e26 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.security.authc; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; @@ -33,7 +31,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -420,11 +417,9 @@ public void testTokenFirstMissingSecondFound() throws Exception { } public void testTokenMissing() throws Exception { - final Logger unlicensedRealmsLogger = LogManager.getLogger(RealmsAuthenticator.class); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(unlicensedRealmsLogger, mockAppender); + + try (var ignored = mockAppender.capturing(RealmsAuthenticator.class)) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "unlicensed realms", @@ -471,9 +466,6 @@ public void testTokenMissing() throws Exception { service.authenticate("_action", transportRequest, true, listener); } assertThat(completed.get(), is(true)); - } finally { - Loggers.removeAppender(unlicensedRealmsLogger, mockAppender); - mockAppender.stop(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java index 97672335dd3ac..82b0a06a6dc52 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticatorChainTests.java @@ -481,10 +481,8 @@ public void testRunAsIsIgnoredForUnsupportedAuthenticationTypes() throws Illegal final Logger logger = LogManager.getLogger(AuthenticatorChain.class); Loggers.setLevel(logger, Level.INFO); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(logger, appender); - appender.start(); - try { + try (var ignored = appender.capturing(AuthenticatorChain.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "run-as", @@ -498,9 +496,7 @@ public void testRunAsIsIgnoredForUnsupportedAuthenticationTypes() throws Illegal assertThat(future.actionGet(), equalTo(authentication)); appender.assertAllExpectationsMatched(); } finally { - appender.stop(); Loggers.setLevel(logger, Level.INFO); - Loggers.removeAppender(logger, appender); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java index 3bb776e0f726c..20555ced32bd7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java @@ -164,7 +164,7 @@ public void testExceptionProcessingRequestOnInvalidCrossClusterAccessSubjectInfo // Invalid internal user AuthenticationTestHelper.builder().internal(InternalUsers.XPACK_USER).build(), new RoleDescriptorsIntersection( - new RoleDescriptor("invalid_role", new String[] { "all" }, null, null, null, null, null, null, null, null) + new RoleDescriptor("invalid_role", new String[] { "all" }, null, null, null, null, null, null, null, null, null) ) ) ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java index b62fc4ab6b04d..1c2c617a46bb7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java @@ -8,13 +8,10 @@ package org.elasticsearch.xpack.security.authc; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.cache.Cache; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; @@ -208,25 +205,21 @@ public void testNullUser() throws IllegalAccessException { final ElasticsearchSecurityException e = new ElasticsearchSecurityException("fail"); when(request.authenticationFailed(authenticationToken)).thenReturn(e); - final Logger unlicensedRealmsLogger = LogManager.getLogger(RealmsAuthenticator.class); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { + try (var ignored = mockAppender.capturing(RealmsAuthenticator.class)) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "unlicensed realms", RealmsAuthenticator.class.getName(), Level.WARN, - "Authentication failed using realms [realm1/realm1,realm2/reaml2]." + "Authentication failed using realms [realm1/realm1,realm2/realm2]." + " Realms [realm3/realm3] were skipped because they are not permitted on the current license" ) ); final PlainActionFuture> future = new PlainActionFuture<>(); realmsAuthenticator.authenticate(context, future); assertThat(expectThrows(ElasticsearchSecurityException.class, future::actionGet), is(e)); - } finally { - Loggers.removeAppender(unlicensedRealmsLogger, mockAppender); - mockAppender.stop(); + mockAppender.assertAllExpectationsMatched(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 28b3a1ead9414..3fa0c8a52b65f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -937,11 +936,9 @@ public void testRealmsAreDisabledOnLicenseDowngrade() throws Exception { final Logger realmsLogger = LogManager.getLogger(Realms.class); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(realmsLogger, appender); - appender.start(); when(licenseState.statusDescription()).thenReturn("mock license"); - try { + try (var ignored = appender.capturing(Realms.class)) { for (String realmId : List.of("kerberos.kerberos_realm", "type_0.custom_realm_1", "type_1.custom_realm_2")) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( @@ -954,9 +951,6 @@ public void testRealmsAreDisabledOnLicenseDowngrade() throws Exception { } allowOnlyStandardRealms(); appender.assertAllExpectationsMatched(); - } finally { - appender.stop(); - Loggers.removeAppender(realmsLogger, appender); } final List unlicensedRealmNames = realms.getUnlicensedRealms().stream().map(r -> r.name()).collect(Collectors.toList()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index c4e4d58d27178..7fa3ee96de469 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -971,8 +971,6 @@ public void testHandleUserinfoResponseFailure() throws Exception { public void testLogIdTokenAndNonce() throws URISyntaxException, BadJOSEException, JOSEException, IllegalAccessException { final Logger logger = LogManager.getLogger(OpenIdConnectAuthenticator.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(logger, appender); Loggers.setLevel(logger, Level.DEBUG); final RealmConfig config = buildConfig(getBasicRealmSettings().build(), threadContext); @@ -999,7 +997,7 @@ public void testLogIdTokenAndNonce() throws URISyntaxException, BadJOSEException final Nonce expectedNonce = new Nonce(randomAlphaOfLength(10)); - try { + try (var ignored = appender.capturing(OpenIdConnectAuthenticator.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation("JWT header", logger.getName(), Level.DEBUG, "ID Token Header: " + headerString) ); @@ -1018,8 +1016,6 @@ public void testLogIdTokenAndNonce() throws URISyntaxException, BadJOSEException // The logging message assertion is the only thing we actually care in this test appender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); Loggers.setLevel(logger, (Level) null); openIdConnectAuthenticator.close(); } @@ -1063,11 +1059,9 @@ public void testHttpClientConnectionTtlBehaviour() throws URISyntaxException, Il // In addition, capture logs to show that kept alive (TTL) is honored final Logger logger = LogManager.getLogger(PoolingNHttpClientConnectionManager.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(logger, appender); // Note: Setting an org.apache.http logger to DEBUG requires es.insecure_network_trace_enabled=true Loggers.setLevel(logger, Level.DEBUG); - try { + try (var ignored = appender.capturing(PoolingNHttpClientConnectionManager.class)) { appender.addExpectation( new MockLogAppender.PatternSeenEventExpectation( "log", @@ -1101,8 +1095,6 @@ public void cancelled() { appender.assertAllExpectationsMatched(); assertThat(portTested.get(), is(true)); } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); Loggers.setLevel(logger, (Level) null); authenticator.close(); httpServer.stop(1); @@ -1211,10 +1203,8 @@ public Object next() { final Logger logger = LogManager.getLogger(OpenIdConnectAuthenticator.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(logger, appender); Loggers.setLevel(logger, Level.DEBUG); - try { + try (var ignored = appender.capturing(OpenIdConnectAuthenticator.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "log", @@ -1227,8 +1217,6 @@ public Object next() { assertThat(keepAliveStrategy.getKeepAliveDuration(httpResponse, null), equalTo(effectiveTtlInMs)); appender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); Loggers.setLevel(logger, (Level) null); authenticator.close(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java index 60f6cc53902b9..ad836b8131934 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.NamedFormatter; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -216,11 +215,8 @@ private void testLoggingWarnOnSpecialAttributeName(String attributeName, String .add(getAttribute(attributeName, attributeFriendlyName, null, List.of("daredevil"))); SamlToken token = token(signResponse(response)); - final Logger samlLogger = LogManager.getLogger(authenticator.getClass()); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(samlLogger, mockAppender); + try (var ignored = mockAppender.capturing(authenticator.getClass())) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "attribute name warning", @@ -232,9 +228,6 @@ private void testLoggingWarnOnSpecialAttributeName(String attributeName, String final SamlAttributes attributes = authenticator.authenticate(token); assertThat(attributes, notNullValue()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(samlLogger, mockAppender); - mockAppender.stop(); } } @@ -247,17 +240,11 @@ public void testLoggingNoLogIfNotSpecialAttributeName() throws Exception { assertion.getAttributeStatements().get(0).getAttributes().add(getAttribute(UID_OID, "friendly", null, List.of("daredevil"))); SamlToken token = token(signResponse(response)); - final Logger samlLogger = LogManager.getLogger(authenticator.getClass()); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(samlLogger, mockAppender); + try (var ignored = mockAppender.capturing(authenticator.getClass())) { final SamlAttributes attributes = authenticator.authenticate(token); assertThat(attributes, notNullValue()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(samlLogger, mockAppender); - mockAppender.stop(); } } @@ -275,9 +262,7 @@ public void testLoggingWarnOnSpecialAttributeNameInNameAndFriendlyName() throws final Logger samlLogger = LogManager.getLogger(authenticator.getClass()); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(samlLogger, mockAppender); + try (var ignored = mockAppender.capturing(authenticator.getClass())) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "attribute name warning", @@ -297,9 +282,6 @@ public void testLoggingWarnOnSpecialAttributeNameInNameAndFriendlyName() throws final SamlAttributes attributes = authenticator.authenticate(token); assertThat(attributes, notNullValue()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(samlLogger, mockAppender); - mockAppender.stop(); } } @@ -884,12 +866,8 @@ public void testLoggingWhenAudienceCheckFails() throws Exception { String xml = SamlUtils.getXmlContent(response, false); final SamlToken token = token(signResponse(xml)); - final Logger samlLogger = LogManager.getLogger(authenticator.getClass()); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { - Loggers.addAppender(samlLogger, mockAppender); - + try (var ignored = mockAppender.capturing(authenticator.getClass())) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "similar audience", @@ -915,9 +893,6 @@ public void testLoggingWhenAudienceCheckFails() throws Exception { final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); assertThat(exception.getMessage(), containsString("required audience")); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(samlLogger, mockAppender); - mockAppender.stop(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java index 89a6684108149..1529fda3d6578 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java @@ -111,14 +111,15 @@ public void testTryParseToken() throws IOException, IllegalAccessException { final Logger sasLogger = LogManager.getLogger(ServiceAccountService.class); Loggers.setLevel(sasLogger, Level.TRACE); - final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(satLogger, appender); - Loggers.addAppender(sasLogger, appender); - appender.start(); + final MockLogAppender satAppender = new MockLogAppender(); + final MockLogAppender sasAppender = new MockLogAppender(); - try { + try ( + var ignored1 = satAppender.capturing(ServiceAccountToken.class); + var ignored2 = sasAppender.capturing(ServiceAccountService.class) + ) { // Less than 4 bytes - appender.addExpectation( + satAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "less than 4 bytes", ServiceAccountToken.class.getName(), @@ -128,10 +129,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ); final SecureString bearerString0 = createBearerString(List.of(Arrays.copyOfRange(magicBytes, 0, randomIntBetween(0, 3)))); assertNull(ServiceAccountService.tryParseToken(bearerString0)); - appender.assertAllExpectationsMatched(); + satAppender.assertAllExpectationsMatched(); // Prefix mismatch - appender.addExpectation( + satAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "prefix mismatch", ServiceAccountToken.class.getName(), @@ -146,10 +147,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ) ); assertNull(ServiceAccountService.tryParseToken(bearerString1)); - appender.assertAllExpectationsMatched(); + satAppender.assertAllExpectationsMatched(); // No colon - appender.addExpectation( + satAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "no colon", ServiceAccountToken.class.getName(), @@ -161,10 +162,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { List.of(magicBytes, randomAlphaOfLengthBetween(30, 50).getBytes(StandardCharsets.UTF_8)) ); assertNull(ServiceAccountService.tryParseToken(bearerString2)); - appender.assertAllExpectationsMatched(); + satAppender.assertAllExpectationsMatched(); // Invalid delimiter for qualified name - appender.addExpectation( + satAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "invalid delimiter for qualified name", ServiceAccountToken.class.getName(), @@ -193,10 +194,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ); assertNull(ServiceAccountService.tryParseToken(bearerString3)); } - appender.assertAllExpectationsMatched(); + satAppender.assertAllExpectationsMatched(); // Invalid token name - appender.addExpectation( + sasAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "invalid token name", ServiceAccountService.class.getName(), @@ -217,7 +218,7 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ) ); assertNull(ServiceAccountService.tryParseToken(bearerString4)); - appender.assertAllExpectationsMatched(); + sasAppender.assertAllExpectationsMatched(); // Everything is good final String namespace = randomAlphaOfLengthBetween(3, 8); @@ -241,7 +242,7 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertThat(parsedToken, equalTo(serviceAccountToken2)); // Invalid magic byte - appender.addExpectation( + satAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "invalid magic byte again", ServiceAccountToken.class.getName(), @@ -252,10 +253,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AQEAAWVsYXN0aWMvZmxlZXQvdG9rZW4xOnN1cGVyc2VjcmV0".toCharArray())) ); - appender.assertAllExpectationsMatched(); + satAppender.assertAllExpectationsMatched(); // No colon - appender.addExpectation( + satAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "no colon again", ServiceAccountToken.class.getName(), @@ -266,10 +267,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXQvdG9rZW4xX3N1cGVyc2VjcmV0".toCharArray())) ); - appender.assertAllExpectationsMatched(); + satAppender.assertAllExpectationsMatched(); // Invalid qualified name - appender.addExpectation( + satAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "invalid delimiter for qualified name again", ServiceAccountToken.class.getName(), @@ -280,10 +281,10 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXRfdG9rZW4xOnN1cGVyc2VjcmV0".toCharArray())) ); - appender.assertAllExpectationsMatched(); + satAppender.assertAllExpectationsMatched(); // Invalid token name - appender.addExpectation( + sasAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "invalid token name again", ServiceAccountService.class.getName(), @@ -294,7 +295,7 @@ public void testTryParseToken() throws IOException, IllegalAccessException { assertNull( ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXQvdG9rZW4hOnN1cGVyc2VjcmV0".toCharArray())) ); - appender.assertAllExpectationsMatched(); + sasAppender.assertAllExpectationsMatched(); // everything is fine assertThat( @@ -310,11 +311,8 @@ public void testTryParseToken() throws IOException, IllegalAccessException { ) ); } finally { - appender.stop(); Loggers.setLevel(satLogger, Level.INFO); Loggers.setLevel(sasLogger, Level.INFO); - Loggers.removeAppender(satLogger, appender); - Loggers.removeAppender(sasLogger, appender); } } @@ -369,10 +367,7 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx Loggers.setLevel(sasLogger, Level.TRACE); final MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(sasLogger, appender); - appender.start(); - - try { + try (var ignored = appender.capturing(ServiceAccountService.class)) { // non-elastic service account final ServiceAccountId accountId1 = new ServiceAccountId( randomValueOtherThan(ElasticServiceAccounts.NAMESPACE, () -> randomAlphaOfLengthBetween(3, 8)), @@ -557,9 +552,7 @@ public void testAuthenticateWithToken() throws ExecutionException, InterruptedEx ); appender.assertAllExpectationsMatched(); } finally { - appender.stop(); Loggers.setLevel(sasLogger, Level.INFO); - Loggers.removeAppender(sasLogger, appender); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java index 633d40922df6e..08628c1a5f5af 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authz; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; @@ -73,6 +74,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() throws IOExcept .build(), randomNonEmptySubsetOf(List.of(concreteClusterAlias, "*")).toArray(new String[0]) ) }, + null, // TODO: add tests here null ) ); @@ -129,7 +131,10 @@ public void testCrossClusterAccessWithInvalidRoleDescriptors() { final AuthorizationService authzService = internalCluster().getInstance(AuthorizationService.class, nodeName); final CrossClusterAccessSubjectInfo crossClusterAccessSubjectInfo = AuthenticationTestHelper.randomCrossClusterAccessSubjectInfo( new RoleDescriptorsIntersection( - randomValueOtherThanMany(rd -> false == rd.hasPrivilegesOtherThanIndex(), () -> RoleDescriptorTests.randomRoleDescriptor()) + randomValueOtherThanMany( + rd -> false == rd.hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + () -> RoleDescriptorTests.randomRoleDescriptor() + ) ) ); final Authentication authentication = AuthenticationTestHelper.builder() @@ -147,7 +152,8 @@ public void testCrossClusterAccessWithInvalidRoleDescriptors() { assertThat( actual.getMessage(), equalTo( - "Role descriptor for cross cluster access can only contain index privileges but other privileges found for subject [" + "Role descriptor for cross cluster access can only contain index and " + + "cluster privileges but other privileges found for subject [" + expectedPrincipal + "]" ) @@ -181,6 +187,7 @@ private RoleDescriptorsIntersection authorizeThenGetRoleDescriptorsIntersectionF ActionTestUtils.assertNoFailureListener(nothing -> { authzService.getRoleDescriptorsIntersectionForRemoteCluster( concreteClusterAlias, + TransportVersion.current(), authentication.getEffectiveSubject(), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(newValue -> { assertThat(threadContext.getTransient(AUTHORIZATION_INFO_KEY), not(nullValue())); @@ -192,6 +199,7 @@ private RoleDescriptorsIntersection authorizeThenGetRoleDescriptorsIntersectionF } else { authzService.getRoleDescriptorsIntersectionForRemoteCluster( concreteClusterAlias, + TransportVersion.current(), authentication.getEffectiveSubject(), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(newValue -> { assertThat(threadContext.getTransient(AUTHORIZATION_INFO_KEY), nullValue()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index ae33c4e5e31e8..e2fe682943a84 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -3557,8 +3557,9 @@ public void testRemoteActionDenied() { when(authorizationInfo.asMap()).thenReturn( Map.of(PRINCIPAL_ROLES_FIELD_NAME, randomArray(0, 3, String[]::new, () -> randomAlphaOfLengthBetween(5, 8))) ); + String actionPrefix = randomFrom("indices", "cluster"); threadContext.putTransient(AUTHORIZATION_INFO_KEY, authorizationInfo); - final String action = "indices:/some/action/" + randomAlphaOfLengthBetween(0, 8); + final String action = actionPrefix + ":/some/action/" + randomAlphaOfLengthBetween(0, 8); final String clusterAlias = randomAlphaOfLengthBetween(5, 12); final ElasticsearchSecurityException e = authorizationService.remoteActionDenied(authentication, action, clusterAlias); assertThat(e.getCause(), nullValue()); @@ -3567,10 +3568,11 @@ public void testRemoteActionDenied() { equalTo( Strings.format( "action [%s] towards remote cluster [%s] is unauthorized for %s" - + " because no remote indices privileges apply for the target cluster", + + " because no remote %s privileges apply for the target cluster", action, clusterAlias, - new AuthorizationDenialMessages.Default().successfulAuthenticationDescription(authentication, authorizationInfo) + new AuthorizationDenialMessages.Default().successfulAuthenticationDescription(authentication, authorizationInfo), + actionPrefix ) ) ); @@ -3583,7 +3585,8 @@ public void testActionDeniedForCrossClusterAccessAuthentication() { Map.of(PRINCIPAL_ROLES_FIELD_NAME, randomArray(0, 3, String[]::new, () -> randomAlphaOfLengthBetween(5, 8))) ); threadContext.putTransient(AUTHORIZATION_INFO_KEY, authorizationInfo); - final String action = "indices:/some/action/" + randomAlphaOfLengthBetween(0, 8); + String actionPrefix = randomFrom("indices", "cluster"); + final String action = actionPrefix + ":/some/action/" + randomAlphaOfLengthBetween(0, 8); final ElasticsearchSecurityException e = authorizationService.actionDenied(authentication, authorizationInfo, action, mock()); assertThat(e.getCause(), nullValue()); assertThat( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java index ed9250cb82826..e06f6f212c687 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java @@ -194,15 +194,11 @@ private void testLogging( thresholds ); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - try { + try (var ignored = mockAppender.capturing(timerLogger.getName())) { Loggers.addAppender(timerLogger, mockAppender); mockAppender.addExpectation(expectation); checker.accept(List.of()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(timerLogger, mockAppender); - mockAppender.stop(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index ab5450f3ab4dd..1923d4d86dc71 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security.authz; import org.elasticsearch.ElasticsearchRoleRestrictionException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; @@ -81,6 +82,8 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.permission.RemoteIndicesPermission; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.permission.Role; @@ -196,7 +199,7 @@ public void testResolveAuthorizationInfoForEmptyRestrictedRolesWithAuthenticatio @SuppressWarnings("unchecked") final var listener = (ActionListener>) invocation.getArgument(1); final Supplier randomRoleSupplier = () -> Role.buildFromRoleDescriptor( - RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), false, randomBoolean()), + RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), false, randomBoolean(), false), new FieldPermissionsCache(Settings.EMPTY), RESTRICTED_INDICES, List.of() @@ -1291,8 +1294,8 @@ public void testBuildUserPrivilegeResponse() { ) .addApplicationPrivilege(ApplicationPrivilegeTests.createPrivilege("app01", "read", "data:read"), Collections.singleton("*")) .runAs(new Privilege(Sets.newHashSet("user01", "user02"), "user01", "user02")) - .addRemoteGroup(Set.of("remote-1"), FieldPermissions.DEFAULT, null, IndexPrivilege.READ, false, "remote-index-1") - .addRemoteGroup( + .addRemoteIndicesGroup(Set.of("remote-1"), FieldPermissions.DEFAULT, null, IndexPrivilege.READ, false, "remote-index-1") + .addRemoteIndicesGroup( Set.of("remote-2", "remote-3"), new FieldPermissions(new FieldPermissionsDefinition(new String[] { "public.*" }, new String[0])), Collections.singleton(query), @@ -1301,6 +1304,20 @@ public void testBuildUserPrivilegeResponse() { "remote-index-2", "remote-index-3" ) + .addRemoteClusterPermissions( + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "remote-1" } + ) + ) + .addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "remote-2", "remote-3" } + ) + ) + ) .build(); final GetUserPrivilegesResponse response = RBACEngine.buildUserPrivilegesResponseObject(role); @@ -1357,6 +1374,30 @@ public void testBuildUserPrivilegeResponse() { containsInAnyOrder(new FieldPermissionsDefinition.FieldGrantExcludeGroup(new String[] { "public.*" }, new String[0])) ); assertThat(remoteIndex2.indices().getQueries(), containsInAnyOrder(query)); + + RemoteClusterPermissions remoteClusterPermissions = response.getRemoteClusterPermissions(); + String[] allRemoteClusterPermissions = RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]); + assert allRemoteClusterPermissions.length == 1 + : "if more remote cluster permissions are added this test needs to be updated to ensure the correct remotes receive the " + + "correct permissions. "; + // 2 groups with 3 aliases + assertThat(response.getRemoteClusterPermissions().groups(), iterableWithSize(2)); + assertEquals( + 3, + response.getRemoteClusterPermissions() + .groups() + .stream() + .map(RemoteClusterPermissionGroup::remoteClusterAliases) + .flatMap(Arrays::stream) + .distinct() + .count() + ); + + for (String permission : RemoteClusterPermissions.getSupportedRemoteClusterPermissions()) { + assertThat(Arrays.asList(remoteClusterPermissions.privilegeNames("remote-1", TransportVersion.current())), hasItem(permission)); + assertThat(Arrays.asList(remoteClusterPermissions.privilegeNames("remote-2", TransportVersion.current())), hasItem(permission)); + assertThat(Arrays.asList(remoteClusterPermissions.privilegeNames("remote-3", TransportVersion.current())), hasItem(permission)); + } } public void testBackingIndicesAreIncludedForAuthorizedDataStreams() { @@ -1530,7 +1571,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() throws Executio when(authorizationInfo.getRole()).thenReturn(role); final PlainActionFuture future = new PlainActionFuture<>(); - engine.getRoleDescriptorsIntersectionForRemoteCluster(concreteClusterAlias, authorizationInfo, future); + engine.getRoleDescriptorsIntersectionForRemoteCluster(concreteClusterAlias, TransportVersion.current(), authorizationInfo, future); final RoleDescriptorsIntersection actual = future.get(); assertThat( @@ -1585,7 +1626,12 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterHasDeterministicOr final RBACAuthorizationInfo authorizationInfo1 = mock(RBACAuthorizationInfo.class); when(authorizationInfo1.getRole()).thenReturn(role1); final PlainActionFuture future1 = new PlainActionFuture<>(); - engine.getRoleDescriptorsIntersectionForRemoteCluster(concreteClusterAlias, authorizationInfo1, future1); + engine.getRoleDescriptorsIntersectionForRemoteCluster( + concreteClusterAlias, + TransportVersion.current(), + authorizationInfo1, + future1 + ); final RoleDescriptorsIntersection actual1 = future1.get(); // Randomize the order of both remote indices groups and each of the indices permissions groups each group holds @@ -1605,7 +1651,12 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterHasDeterministicOr final RBACAuthorizationInfo authorizationInfo2 = mock(RBACAuthorizationInfo.class); when(authorizationInfo2.getRole()).thenReturn(role2); final PlainActionFuture future2 = new PlainActionFuture<>(); - engine.getRoleDescriptorsIntersectionForRemoteCluster(concreteClusterAlias, authorizationInfo2, future2); + engine.getRoleDescriptorsIntersectionForRemoteCluster( + concreteClusterAlias, + TransportVersion.current(), + authorizationInfo2, + future2 + ); final RoleDescriptorsIntersection actual2 = future2.get(); assertThat(actual1, equalTo(actual2)); @@ -1632,6 +1683,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterWithoutMatchingGro final PlainActionFuture future = new PlainActionFuture<>(); engine.getRoleDescriptorsIntersectionForRemoteCluster( randomValueOtherThan(concreteClusterAlias, () -> randomAlphaOfLength(10)), + TransportVersion.current(), authorizationInfo, future ); @@ -1649,6 +1701,7 @@ public void testGetRoleDescriptorsIntersectionForRemoteClusterWithoutRemoteIndic final PlainActionFuture future = new PlainActionFuture<>(); engine.getRoleDescriptorsIntersectionForRemoteCluster( randomValueOtherThan(concreteClusterAlias, () -> randomAlphaOfLength(10)), + TransportVersion.current(), authorizationInfo, future ); @@ -1670,14 +1723,19 @@ public void testGetRoleDescriptorsForRemoteClusterForReservedRoles() { final RBACAuthorizationInfo authorizationInfo = mock(RBACAuthorizationInfo.class); when(authorizationInfo.getRole()).thenReturn(role); final PlainActionFuture future = new PlainActionFuture<>(); - engine.getRoleDescriptorsIntersectionForRemoteCluster(randomAlphaOfLengthBetween(5, 20), authorizationInfo, future); + engine.getRoleDescriptorsIntersectionForRemoteCluster( + randomAlphaOfLengthBetween(5, 20), + TransportVersion.current(), + authorizationInfo, + future + ); assertThat( future.actionGet(), equalTo( new RoleDescriptorsIntersection( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("all").allowRestrictedIndices(false).build(), IndicesPrivileges.builder() @@ -1706,7 +1764,12 @@ public void testGetRoleDescriptorsForRemoteClusterForReservedRoles() { final RBACAuthorizationInfo authorizationInfo = mock(RBACAuthorizationInfo.class); when(authorizationInfo.getRole()).thenReturn(role); final PlainActionFuture future = new PlainActionFuture<>(); - engine.getRoleDescriptorsIntersectionForRemoteCluster(randomAlphaOfLengthBetween(5, 20), authorizationInfo, future); + engine.getRoleDescriptorsIntersectionForRemoteCluster( + randomAlphaOfLengthBetween(5, 20), + TransportVersion.current(), + authorizationInfo, + future + ); assertThat( future.actionGet(), equalTo( @@ -1742,7 +1805,12 @@ public void testGetRoleDescriptorsForRemoteClusterForReservedRoles() { final RBACAuthorizationInfo authorizationInfo = mock(RBACAuthorizationInfo.class); when(authorizationInfo.getRole()).thenReturn(role); final PlainActionFuture future = new PlainActionFuture<>(); - engine.getRoleDescriptorsIntersectionForRemoteCluster(randomAlphaOfLengthBetween(5, 20), authorizationInfo, future); + engine.getRoleDescriptorsIntersectionForRemoteCluster( + randomAlphaOfLengthBetween(5, 20), + TransportVersion.current(), + authorizationInfo, + future + ); assertThat( future.actionGet(), equalTo( @@ -2021,7 +2089,7 @@ private Role createSimpleRoleWithRemoteIndices(final RemoteIndicesPermission rem remoteIndicesPermission.remoteIndicesGroups().forEach(group -> { group.indicesPermissionGroups() .forEach( - p -> roleBuilder.addRemoteGroup( + p -> roleBuilder.addRemoteIndicesGroup( group.remoteClusterAliases(), p.getFieldPermissions(), p.getQuery(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 23d1f4854c23a..e039f0c66eaeb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.security.authz.store; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -35,7 +33,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -77,6 +74,8 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission.IsResourceAuthorizedPredicate; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.permission.RemoteIndicesPermission; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ActionClusterPrivilege; @@ -133,6 +132,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.Stream; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.elasticsearch.test.TestMatchers.throwableWithMessage; @@ -297,10 +297,7 @@ public void testLoggingWarnWhenDlsUnlicensed() throws IOException, IllegalAccess ); final MockLogAppender mockAppender = new MockLogAppender(); - final Logger logger = LogManager.getLogger(RoleDescriptorStore.class); - mockAppender.start(); - try { - Loggers.addAppender(logger, mockAppender); + try (var ignored = mockAppender.capturing(RoleDescriptorStore.class)) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "disabled role warning", @@ -315,9 +312,6 @@ public void testLoggingWarnWhenDlsUnlicensed() throws IOException, IllegalAccess assertEquals(Role.EMPTY, roleFuture.actionGet()); assertThat(effectiveRoleDescriptors.get(), empty()); mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockAppender); - mockAppender.stop(); } } @@ -964,6 +958,7 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*", "remote").indices("abc-*", "xyz-*").privileges("read").build(), RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*").indices("remote-idx-1-*").privileges("read").build(), }, + getValidRemoteClusterPermissions(new String[] { "remote-*" }), null ); @@ -992,6 +987,7 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("*").indices("remote-idx-2-*").privileges("read").build(), RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*").indices("remote-idx-3-*").privileges("read").build() }, + null, null ); @@ -1064,36 +1060,47 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build role.application().grants(ApplicationPrivilegeTests.createPrivilege("app2a", "app2a-all", "all"), "user/joe"); role.application().grants(ApplicationPrivilegeTests.createPrivilege("app2b", "app2b-read", "read"), "settings/hostname"); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("remote-*", "remote"), Set.of("*"), Set.of("remote-*")); - assertHasIndexGroupsForClusters( + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("remote-*", "remote"), Set.of("*"), Set.of("remote-*")); + assertHasRemoteIndexGroupsForClusters( role.remoteIndices(), Set.of("remote-*"), indexGroup("remote-idx-1-*"), indexGroup("remote-idx-3-*") ); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-*", "remote"), indexGroup("xyz-*", "abc-*")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("remote-idx-2-*")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-*", "remote"), indexGroup("xyz-*", "abc-*")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("remote-idx-2-*")); final RemoteIndicesPermission forRemote = role.remoteIndices().forCluster("remote"); - assertHasIndexGroupsForClusters(forRemote, Set.of("remote-*", "remote"), indexGroup("xyz-*", "abc-*")); - assertHasIndexGroupsForClusters(forRemote, Set.of("*"), indexGroup("remote-idx-2-*")); + assertHasRemoteIndexGroupsForClusters(forRemote, Set.of("remote-*", "remote"), indexGroup("xyz-*", "abc-*")); + assertHasRemoteIndexGroupsForClusters(forRemote, Set.of("*"), indexGroup("remote-idx-2-*")); + assertValidRemoteClusterPermissions(role.remoteCluster(), new String[] { "remote-*" }); + assertThat( + role.remoteCluster().privilegeNames("remote-foobar", TransportVersion.current()), + equalTo(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0])) + ); } public void testBuildRoleWithSingleRemoteIndicesDefinition() { final String clusterAlias = randomFrom("remote-1", "*"); final Role role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder(clusterAlias).indices("index-1").privileges("read").build() } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias), indexGroup("index-1")); + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias), indexGroup("index-1")); + } + + public void testBuildRoleWithSingleRemoteClusterDefinition() { + final String[] clusterAliases = randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8)); + final Role role = buildRole(roleDescriptorWithRemoteClusterPrivileges("r1", getValidRemoteClusterPermissions(clusterAliases))); + assertValidRemoteClusterPermissions(role.remoteCluster(), clusterAliases); } public void testBuildRoleFromDescriptorsWithSingleRestriction() { - Role role = buildRole(RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true)); + Role role = buildRole(RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true, randomBoolean())); assertThat(role.hasWorkflowsRestriction(), equalTo(true)); } @@ -1101,8 +1108,8 @@ public void testBuildRoleFromDescriptorsWithViolationOfRestrictionValidation() { var e = expectThrows( IllegalArgumentException.class, () -> buildRole( - RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true), - RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true) + RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true, randomBoolean()), + RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true, randomBoolean()) ) ); assertThat(e.getMessage(), containsString("more than one role descriptor with restriction is not allowed")); @@ -1110,9 +1117,9 @@ public void testBuildRoleFromDescriptorsWithViolationOfRestrictionValidation() { e = expectThrows( IllegalArgumentException.class, () -> buildRole( - RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true), - RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), false), - RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), false) + RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), true, randomBoolean()), + RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), false, randomBoolean()), + RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), false, randomBoolean()) ) ); assertThat(e.getMessage(), containsString("combining role descriptors with and without restriction is not allowed")); @@ -1121,7 +1128,7 @@ public void testBuildRoleFromDescriptorsWithViolationOfRestrictionValidation() { public void testBuildRoleWithFlsAndDlsInRemoteIndicesDefinition() { String clusterAlias = randomFrom("remote-1", "*"); Role role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder(clusterAlias) @@ -1132,8 +1139,8 @@ public void testBuildRoleWithFlsAndDlsInRemoteIndicesDefinition() { .build() } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); - assertHasIndexGroupsForClusters( + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); + assertHasRemoteIndexGroupsForClusters( role.remoteIndices(), Set.of(clusterAlias), indexGroup( @@ -1146,7 +1153,7 @@ public void testBuildRoleWithFlsAndDlsInRemoteIndicesDefinition() { ); role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder(clusterAlias) @@ -1156,7 +1163,7 @@ public void testBuildRoleWithFlsAndDlsInRemoteIndicesDefinition() { .grantedFields("field") .build() } ), - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder(clusterAlias) @@ -1167,8 +1174,8 @@ public void testBuildRoleWithFlsAndDlsInRemoteIndicesDefinition() { .build() } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); - assertHasIndexGroupsForClusters( + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); + assertHasRemoteIndexGroupsForClusters( role.remoteIndices(), Set.of(clusterAlias), indexGroup( @@ -1190,7 +1197,7 @@ public void testBuildRoleWithFlsAndDlsInRemoteIndicesDefinition() { public void testBuildRoleWithEmptyOrNoneRemoteIndices() { Role role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("none").build() } @@ -1198,14 +1205,19 @@ public void testBuildRoleWithEmptyOrNoneRemoteIndices() { ); assertThat(role.remoteIndices().remoteIndicesGroups(), empty()); - role = buildRole(roleDescriptorWithIndicesPrivileges("r1", new RoleDescriptor.RemoteIndicesPrivileges[] {})); + role = buildRole(roleDescriptorWithRemoteIndicesPrivileges("r1", new RoleDescriptor.RemoteIndicesPrivileges[] {})); assertThat(role.remoteIndices().remoteIndicesGroups(), empty()); } + public void testBuildRoleWithoutRemoteCluster() { + final Role role = buildRole(roleDescriptorWithRemoteClusterPrivileges("r1", null)); + assertThat(role.remoteCluster(), equalTo(RemoteClusterPermissions.NONE)); + } + public void testBuildRoleWithSingleRemoteIndicesDefinitionWithAllowRestricted() { final String clusterAlias = randomFrom("remote-1", "*"); final Role role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder(clusterAlias) @@ -1215,8 +1227,8 @@ public void testBuildRoleWithSingleRemoteIndicesDefinitionWithAllowRestricted() .build() } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias), indexGroup(IndexPrivilege.READ, true, "index-1")); + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias)); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of(clusterAlias), indexGroup(IndexPrivilege.READ, true, "index-1")); } public void testBuildRoleWithRemoteIndicesDoesNotMergeWhenNothingToMerge() { @@ -1229,9 +1241,9 @@ public void testBuildRoleWithRemoteIndicesDoesNotMergeWhenNothingToMerge() { new IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("index-1").privileges("all").build() } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), Set.of("*")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("index-1")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), indexGroup("index-1")); + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), Set.of("*")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("index-1")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), indexGroup("index-1")); final IsResourceAuthorizedPredicate allowedRead = role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()); assertThat(allowedRead.test(mockIndexAbstraction("index-1")), equalTo(true)); assertThat(allowedRead.test(mockIndexAbstraction("foo")), equalTo(false)); @@ -1249,8 +1261,8 @@ public void testBuildRoleWithRemoteIndicesDoesNotCombineRemotesAndLocals() { RoleDescriptor.IndicesPrivileges.builder().indices("index-1").privileges("read").build(), } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("*")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("index-1")); + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("*")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("index-1")); final IsResourceAuthorizedPredicate allowedRead = role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()); assertThat(allowedRead.test(mockIndexAbstraction("index-1")), equalTo(true)); final IsResourceAuthorizedPredicate allowedWrite = role.indices().allowedIndicesMatcher(TransportIndexAction.NAME); @@ -1259,7 +1271,7 @@ public void testBuildRoleWithRemoteIndicesDoesNotCombineRemotesAndLocals() { public void testBuildRoleWithRemoteIndicesDoesNotMergeRestrictedAndNonRestricted() { final Role role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1") @@ -1268,7 +1280,7 @@ public void testBuildRoleWithRemoteIndicesDoesNotMergeRestrictedAndNonRestricted .allowRestrictedIndices(false) .build() } ), - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r2", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1") @@ -1278,8 +1290,8 @@ public void testBuildRoleWithRemoteIndicesDoesNotMergeRestrictedAndNonRestricted .build() } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("remote-1")); - assertHasIndexGroupsForClusters( + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("remote-1")); + assertHasRemoteIndexGroupsForClusters( role.remoteIndices(), Set.of("remote-1"), indexGroup(IndexPrivilege.READ, true, "index-1"), @@ -1289,7 +1301,7 @@ public void testBuildRoleWithRemoteIndicesDoesNotMergeRestrictedAndNonRestricted public void testBuildRoleWithMultipleRemoteMergedAcrossPrivilegesAndDescriptors() { Role role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1", "index-2").privileges("read").build(), @@ -1298,60 +1310,65 @@ public void testBuildRoleWithMultipleRemoteMergedAcrossPrivilegesAndDescriptors( .privileges("read") .build(), } ), - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r2", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("read").build() } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), Set.of("remote-1", "remote-2")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), indexGroup("index-1", "index-2"), indexGroup("index-1")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1", "remote-2"), indexGroup("index-1", "index-2")); + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), Set.of("remote-1", "remote-2")); + assertHasRemoteIndexGroupsForClusters( + role.remoteIndices(), + Set.of("remote-1"), + indexGroup("index-1", "index-2"), + indexGroup("index-1") + ); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1", "remote-2"), indexGroup("index-1", "index-2")); role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("read").build(), RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("read").build(), RoleDescriptor.RemoteIndicesPrivileges.builder("*").indices("*").privileges("read").build(), } ), - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r2", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("read").build(), RoleDescriptor.RemoteIndicesPrivileges.builder("*").indices("*").privileges("read").build(), } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), Set.of("*")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), indexGroup("index-1")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("*")); + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), Set.of("*")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), indexGroup("index-1")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("*"), indexGroup("*")); role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("read").build(), } ), - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r2", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("none").build(), } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("remote-1")); - assertHasIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), indexGroup("index-1")); + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("remote-1")); + assertHasRemoteIndexGroupsForClusters(role.remoteIndices(), Set.of("remote-1"), indexGroup("index-1")); role = buildRole( - roleDescriptorWithIndicesPrivileges( + roleDescriptorWithRemoteIndicesPrivileges( "r1", new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("none").build(), RoleDescriptor.RemoteIndicesPrivileges.builder("remote-1").indices("index-1").privileges("read").build(), } ) ); - assertHasRemoteGroupsForClusters(role.remoteIndices(), Set.of("remote-1")); - assertHasIndexGroupsForClusters( + assertHasRemoteIndicesGroupsForClusters(role.remoteIndices(), Set.of("remote-1")); + assertHasRemoteIndexGroupsForClusters( role.remoteIndices(), Set.of("remote-1"), indexGroup(IndexPrivilege.get(Set.of("read")), false, "index-1"), @@ -1359,6 +1376,39 @@ public void testBuildRoleWithMultipleRemoteMergedAcrossPrivilegesAndDescriptors( ); } + public void testBuildRoleWithMultipleRemoteClusterMerged() { + final String[] clusterAliases1 = randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8)); + final String[] clusterAliases2 = randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8)); + final String[] clusterAliases3 = randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8)); + final Role role = buildRole( + roleDescriptorWithRemoteClusterPrivileges("r1", getValidRemoteClusterPermissions(clusterAliases1)), + roleDescriptorWithRemoteClusterPrivileges("r2", getValidRemoteClusterPermissions(clusterAliases2)), + roleDescriptorWithRemoteClusterPrivileges("r3", getValidRemoteClusterPermissions(clusterAliases3)) + ); + assertValidRemoteClusterPermissionsParent(role.remoteCluster(), clusterAliases1); + assertValidRemoteClusterPermissionsParent(role.remoteCluster(), clusterAliases2); + assertValidRemoteClusterPermissionsParent(role.remoteCluster(), clusterAliases3); + assertValidRemoteClusterPermissionsParent(role.remoteCluster(), clusterAliases3); + assertValidRemoteClusterPermissionsParent( + role.remoteCluster(), + Stream.of(clusterAliases1, clusterAliases2, clusterAliases3).flatMap(Arrays::stream).toArray(String[]::new) + ); + + assertThat(role.remoteCluster().groups().size(), equalTo(3)); + for (RemoteClusterPermissionGroup group : role.remoteCluster().groups()) { + // order here is not guaranteed, so try them all + if (Arrays.equals(group.remoteClusterAliases(), clusterAliases1)) { + assertValidRemoteClusterPermissionsGroups(List.of(group), clusterAliases1); + } else if (Arrays.equals(group.remoteClusterAliases(), clusterAliases2)) { + assertValidRemoteClusterPermissionsGroups(List.of(group), clusterAliases2); + } else if (Arrays.equals(group.remoteClusterAliases(), clusterAliases3)) { + assertValidRemoteClusterPermissionsGroups(List.of(group), clusterAliases3); + } else { + fail("unexpected remote cluster group: " + Arrays.toString(group.remoteClusterAliases())); + } + } + } + public void testCustomRolesProviderFailures() throws Exception { final FileRolesStore fileRolesStore = mock(FileRolesStore.class); doCallRealMethod().when(fileRolesStore).accept(anySet(), anyActionListener()); @@ -2094,6 +2144,7 @@ public void testGetRoleForCrossClusterAccessAuthentication() throws Exception { null, null, null, + null, null ) ) @@ -3026,7 +3077,10 @@ private static Set isASet() { return isA(Set.class); } - private RoleDescriptor roleDescriptorWithIndicesPrivileges(final String name, final RoleDescriptor.RemoteIndicesPrivileges[] rips) { + private RoleDescriptor roleDescriptorWithRemoteIndicesPrivileges( + final String name, + final RoleDescriptor.RemoteIndicesPrivileges[] rips + ) { return roleDescriptorWithIndicesPrivileges(name, rips, null); } @@ -3035,7 +3089,20 @@ private RoleDescriptor roleDescriptorWithIndicesPrivileges( final RoleDescriptor.RemoteIndicesPrivileges[] rips, final IndicesPrivileges[] ips ) { - return new RoleDescriptor(name, null, ips, null, null, null, null, null, rips, null); + return new RoleDescriptor(name, null, ips, null, null, null, null, null, rips, null, null); + } + + private RoleDescriptor roleDescriptorWithRemoteClusterPrivileges(final String name, RemoteClusterPermissions remoteClusterPermissions) { + return new RoleDescriptor(name, null, null, null, null, null, null, null, null, remoteClusterPermissions, null); + } + + private RemoteClusterPermissions getValidRemoteClusterPermissions(String[] aliases) { + return new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + aliases + ) + ); } private Role buildRole(final RoleDescriptor... roleDescriptors) { @@ -3061,16 +3128,47 @@ private Role buildRole(final RoleDescriptor... roleDescriptors) { @SafeVarargs @SuppressWarnings("varargs") - private void assertHasRemoteGroupsForClusters(final RemoteIndicesPermission permission, final Set... remoteClustersAliases) { + private void assertHasRemoteIndicesGroupsForClusters( + final RemoteIndicesPermission permission, + final Set... remoteClustersAliases + ) { assertThat( permission.remoteIndicesGroups().stream().map(RemoteIndicesPermission.RemoteIndicesGroup::remoteClusterAliases).toList(), containsInAnyOrder(remoteClustersAliases) ); } + private void assertValidRemoteClusterPermissions(RemoteClusterPermissions permissions, String[] aliases) { + assertValidRemoteClusterPermissionsParent(permissions, aliases); + assertValidRemoteClusterPermissionsGroups(permissions.groups(), aliases); + + } + + private void assertValidRemoteClusterPermissionsParent(RemoteClusterPermissions permissions, String[] aliases) { + assertTrue(permissions.hasPrivileges()); + for (String alias : aliases) { + assertTrue(permissions.hasPrivileges(alias)); + assertFalse(permissions.hasPrivileges(randomValueOtherThan(alias, () -> randomAlphaOfLength(5)))); + assertThat( + permissions.privilegeNames(alias, TransportVersion.current()), + arrayContaining(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0])) + ); + } + } + + private void assertValidRemoteClusterPermissionsGroups(List groups, String[] aliases) { + for (RemoteClusterPermissionGroup group : groups) { + assertThat(group.remoteClusterAliases(), arrayContaining(aliases)); + assertThat( + group.clusterPrivileges(), + arrayContaining(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0])) + ); + } + } + @SafeVarargs @SuppressWarnings("varargs") - private void assertHasIndexGroupsForClusters( + private void assertHasRemoteIndexGroupsForClusters( final RemoteIndicesPermission permission, final Set remoteClustersAliases, final Matcher... matchers diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java index 65f2919541e07..3d30a3534d422 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.permission.RunAsPermission; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; @@ -287,11 +288,11 @@ public void testParseFile() throws Exception { assertThat(roles.get("role_query_invalid"), nullValue()); } - public void testParseFileWithRemoteIndices() throws IllegalAccessException, IOException { + public void testParseFileWithRemoteIndicesAndCluster() throws IllegalAccessException, IOException { final Logger logger = CapturingLogger.newCapturingLogger(Level.ERROR, null); final List events = CapturingLogger.output(logger.getName(), Level.ERROR); events.clear(); - final Path path = getDataPath("roles_with_remote_indices.yml"); + final Path path = getDataPath("roles_with_remote_indices_and_cluster.yml"); final Map roles = FileRolesStore.parseFile( path, logger, @@ -313,6 +314,14 @@ public void testParseFileWithRemoteIndices() throws IllegalAccessException, IOEx assertThat(remoteIndicesPrivileges.indicesPrivileges().allowRestrictedIndices(), is(false)); assertThat(remoteIndicesPrivileges.indicesPrivileges().getQuery(), nullValue()); + RemoteClusterPermissions remoteClusterPermissions = roleDescriptor.getRemoteClusterPermissions(); + remoteClusterPermissions.validate(); // no exception should be thrown + assertThat(remoteClusterPermissions.groups().size(), equalTo(2)); + assertThat(remoteClusterPermissions.groups().get(0).remoteClusterAliases(), arrayContaining("remote0")); + assertThat(remoteClusterPermissions.groups().get(1).remoteClusterAliases(), arrayContaining("remote1")); + assertThat(remoteClusterPermissions.groups().get(0).clusterPrivileges(), arrayContaining("monitor_enrich")); + assertThat(remoteClusterPermissions.groups().get(1).clusterPrivileges(), arrayContaining("monitor_enrich")); + final RoleDescriptor roleDescriptor2 = roles.get("role_with_fls_dls"); assertNotNull(roleDescriptor2); assertThat(roleDescriptor2.getRemoteIndicesPrivileges().length, equalTo(1)); @@ -325,10 +334,16 @@ public void testParseFileWithRemoteIndices() throws IllegalAccessException, IOEx assertThat(remoteIndicesPrivileges4.indicesPrivileges().getDeniedFields(), arrayContaining("boo")); assertThat(remoteIndicesPrivileges4.indicesPrivileges().getQuery().utf8ToString(), equalTo("{ \"match_all\": {} }")); + remoteClusterPermissions = roleDescriptor2.getRemoteClusterPermissions(); + assertThat(remoteClusterPermissions.groups().size(), equalTo(0)); + assertThat(remoteClusterPermissions, equalTo(RemoteClusterPermissions.NONE)); + assertThat(roles.get("invalid_role_missing_clusters"), nullValue()); assertThat(roles.get("invalid_role_empty_names"), nullValue()); assertThat(roles.get("invalid_role_empty_privileges"), nullValue()); - assertThat(events, hasSize(3)); + assertThat(roles.get("invalid_role_missing_remote_clusters"), nullValue()); + assertThat(roles.get("invalid_role_bad_priv_remote_clusters"), nullValue()); + assertThat(events, hasSize(5)); assertThat( events.get(0), startsWith( @@ -350,6 +365,20 @@ public void testParseFileWithRemoteIndices() throws IllegalAccessException, IOEx + "missing required [privileges] field. skipping role..." ) ); + assertThat( + events.get(3), + startsWith( + "failed to parse remote_cluster for role [invalid_role_missing_remote_clusters]. " + + "expected field [remote_cluster] value to be an array" + ) + ); + assertThat( + events.get(4), + startsWith( + "failed to parse remote_cluster for role [invalid_role_bad_priv_remote_clusters]. " + + "[monitor_enrich] is the only value allowed for [privileges] within [remote_cluster]. skipping role..." + ) + ); } public void testParseFileWithFLSAndDLSDisabled() throws Exception { @@ -719,6 +748,7 @@ public void testUsageStats() throws Exception { assertThat(usageStats.get("size"), is(flsDlsEnabled ? 10 : 7)); assertThat(usageStats.get("remote_indices"), is(1L)); + assertThat(usageStats.get("remote_cluster"), is(1L)); assertThat(usageStats.get("fls"), is(flsDlsEnabled)); assertThat(usageStats.get("dls"), is(flsDlsEnabled)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index 124c72a34ce00..35591f99727f2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -52,6 +52,8 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; import org.elasticsearch.xpack.core.security.authz.RoleRestrictionTests; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -67,6 +69,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -133,6 +136,7 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2), + null, null ); assertFalse(flsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -149,6 +153,7 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2), + null, null ); assertFalse(dlsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -170,6 +175,7 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2), + null, null ); assertFalse(flsDlsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -184,6 +190,7 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2), + null, null ); assertFalse(noFlsDlsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -280,6 +287,7 @@ public void testTransformingRoleWithRestrictionFails() throws IOException { RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2), + null, RoleRestrictionTests.randomWorkflowsRestriction(1, 2) ); @@ -386,63 +394,97 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final assertTrue(future.actionGet()); } - public void testPutRoleWithRemoteIndicesUnsupportedMinNodeVersion() { - final Client client = mock(Client.class); - final TransportVersion transportVersionBeforeAdvancedRemoteClusterSecurity = TransportVersionUtils.getPreviousVersion( - TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY - ); - final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( - random(), - TransportVersions.MINIMUM_COMPATIBLE, - transportVersionBeforeAdvancedRemoteClusterSecurity - ); - final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(minTransportVersion); - - final XPackLicenseState licenseState = mock(XPackLicenseState.class); - final AtomicBoolean methodCalled = new AtomicBoolean(false); - - final SecuritySystemIndices systemIndices = new SecuritySystemIndices(clusterService.getSettings()); - systemIndices.init(client, clusterService); - final SecurityIndexManager securityIndex = systemIndices.getMainIndexManager(); - - final NativeRolesStore rolesStore = new NativeRolesStore(Settings.EMPTY, client, licenseState, securityIndex, clusterService) { - @Override - void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { - if (methodCalled.compareAndSet(false, true)) { - listener.onResponse(true); - } else { - fail("method called more than once!"); + public void testPutRoleWithRemotePrivsUnsupportedMinNodeVersion() { + enum TEST_MODE { + REMOTE_INDICES_PRIVS, + REMOTE_CLUSTER_PRIVS, + REMOTE_INDICES_AND_CLUSTER_PRIVS + } + for (TEST_MODE testMode : TEST_MODE.values()) { + // default to both remote indices and cluster privileges and use the switch below to remove one or the other + TransportVersion transportVersionBeforeAdvancedRemoteClusterSecurity = TransportVersionUtils.getPreviousVersion( + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY + ); + RoleDescriptor.RemoteIndicesPrivileges[] remoteIndicesPrivileges = new RoleDescriptor.RemoteIndicesPrivileges[] { + RoleDescriptor.RemoteIndicesPrivileges.builder("remote").privileges("read").indices("index").build() }; + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "remote" } + ) + ); + switch (testMode) { + case REMOTE_CLUSTER_PRIVS -> { + transportVersionBeforeAdvancedRemoteClusterSecurity = TransportVersionUtils.getPreviousVersion( + TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS + ); + remoteIndicesPrivileges = null; } + case REMOTE_INDICES_PRIVS -> remoteClusterPermissions = null; } - }; - // setup the roles store so the security index exists - securityIndex.clusterChanged(new ClusterChangedEvent("source", getClusterStateWithSecurityIndex(), getEmptyClusterState())); - - PutRoleRequest putRoleRequest = new PutRoleRequest(); - RoleDescriptor remoteIndicesRole = new RoleDescriptor( - "remote", - null, - null, - null, - null, - null, - null, - null, - new RoleDescriptor.RemoteIndicesPrivileges[] { - RoleDescriptor.RemoteIndicesPrivileges.builder("remote").privileges("read").indices("index").build() }, - null - ); - PlainActionFuture future = new PlainActionFuture<>(); - rolesStore.putRole(putRoleRequest, remoteIndicesRole, future); - IllegalStateException e = expectThrows(IllegalStateException.class, future::actionGet); - assertThat( - e.getMessage(), - containsString( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges" - ) - ); + final Client client = mock(Client.class); + + final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersions.MINIMUM_COMPATIBLE, + transportVersionBeforeAdvancedRemoteClusterSecurity + ); + final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(minTransportVersion); + + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + final AtomicBoolean methodCalled = new AtomicBoolean(false); + + final SecuritySystemIndices systemIndices = new SecuritySystemIndices(clusterService.getSettings()); + systemIndices.init(client, clusterService); + final SecurityIndexManager securityIndex = systemIndices.getMainIndexManager(); + + final NativeRolesStore rolesStore = new NativeRolesStore(Settings.EMPTY, client, licenseState, securityIndex, clusterService) { + @Override + void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { + if (methodCalled.compareAndSet(false, true)) { + listener.onResponse(true); + } else { + fail("method called more than once!"); + } + } + }; + // setup the roles store so the security index exists + securityIndex.clusterChanged(new ClusterChangedEvent("source", getClusterStateWithSecurityIndex(), getEmptyClusterState())); + + PutRoleRequest putRoleRequest = new PutRoleRequest(); + RoleDescriptor remoteIndicesRole = new RoleDescriptor( + "remote", + null, + null, + null, + null, + null, + null, + null, + remoteIndicesPrivileges, + remoteClusterPermissions, + null + ); + PlainActionFuture future = new PlainActionFuture<>(); + rolesStore.putRole(putRoleRequest, remoteIndicesRole, future); + IllegalStateException e = expectThrows( + IllegalStateException.class, + String.format(Locale.ROOT, "expected IllegalStateException, but not thrown for mode [%s]", testMode), + future::actionGet + ); + assertThat( + e.getMessage(), + containsString( + "all nodes must have version [" + + (TEST_MODE.REMOTE_CLUSTER_PRIVS.equals(testMode) + ? TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS + : TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()) + + "] or higher to support remote " + + (remoteIndicesPrivileges != null ? "indices" : "cluster") + + " privileges" + ) + ); + } } public void testGetRoleWhenDisabled() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java index 8a7602627b714..a47d51ac2d1c2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/DefaultOperatorPrivilegesTests.java @@ -102,11 +102,9 @@ public void testMarkOperatorUser() throws IllegalAccessException { // Will mark for the operator user final Logger logger = LogManager.getLogger(OperatorPrivileges.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(logger, appender); Loggers.setLevel(logger, Level.DEBUG); - try { + try (var ignored = appender.capturing(OperatorPrivileges.class)) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "marking", @@ -122,8 +120,6 @@ public void testMarkOperatorUser() throws IllegalAccessException { ); appender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); Loggers.setLevel(logger, (Level) null); } @@ -215,11 +211,9 @@ public void testMaybeInterceptRequest() throws IllegalAccessException { final Logger logger = LogManager.getLogger(OperatorPrivileges.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(logger, appender); Loggers.setLevel(logger, Level.DEBUG); - try { + try (var ignored = appender.capturing(OperatorPrivileges.class)) { final RestoreSnapshotRequest restoreSnapshotRequest = mock(RestoreSnapshotRequest.class); appender.addExpectation( new MockLogAppender.SeenEventExpectation( @@ -233,8 +227,6 @@ public void testMaybeInterceptRequest() throws IllegalAccessException { verify(restoreSnapshotRequest).skipOperatorOnlyState(licensed); appender.assertAllExpectationsMatched(); } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); Loggers.setLevel(logger, (Level) null); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java index 1467142072b31..ce216b90f6e77 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java @@ -176,11 +176,12 @@ public void testFileAutoReload() throws Exception { final Logger logger = LogManager.getLogger(FileOperatorUsersStore.class); final MockLogAppender appender = new MockLogAppender(); - appender.start(); - Loggers.addAppender(logger, appender); Loggers.setLevel(logger, Level.TRACE); - try (ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool)) { + try ( + var ignored = appender.capturing(FileOperatorUsersStore.class); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool) + ) { appender.addExpectation( new MockLogAppender.SeenEventExpectation( "1st file parsing", @@ -273,8 +274,6 @@ public void testFileAutoReload() throws Exception { Files.copy(sampleFile, inUseFile, StandardCopyOption.REPLACE_EXISTING); assertBusy(() -> assertEquals(4, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups().size())); } finally { - Loggers.removeAppender(logger, appender); - appender.stop(); Loggers.setLevel(logger, (Level) null); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java index 11b8598768667..ca974e4e1e723 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java @@ -1482,6 +1482,7 @@ private static ApiKey createApiKeyForOwner(String apiKeyId, String username, Str Map.of("_key", "value"), null, null, + null, null ) ), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java index d722eae69f883..812354986d5bc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java @@ -83,7 +83,7 @@ public void testCreateApiKeyRequestHasTypeOfCrossCluster() throws Exception { List.of( new RoleDescriptor( "cross_cluster", - new String[] { "cross_cluster_search" }, + new String[] { "cross_cluster_search", "monitor_enrich" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("logs") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java index 8423d89f000af..e17d651a19748 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java @@ -25,6 +25,8 @@ import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; @@ -142,6 +144,26 @@ public void testBuildResponse() throws Exception { ) ) ); + + boolean hasRemoteClusterPermissions = randomBoolean(); + RemoteClusterPermissions remoteClusterPermissions = hasRemoteClusterPermissions + ? new RemoteClusterPermissions() + : RemoteClusterPermissions.NONE; + if (hasRemoteClusterPermissions) { + remoteClusterPermissions.addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "remote-1" } + ) + ) + .addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "remote-2", "remote-3" } + ) + ); + } + final Set application = Sets.newHashSet( ApplicationResourcePrivileges.builder().application("app01").privileges("read", "write").resources("*").build(), ApplicationResourcePrivileges.builder().application("app01").privileges("admin").resources("department/1").build(), @@ -154,7 +176,8 @@ public void testBuildResponse() throws Exception { index, application, runAs, - remoteIndex + remoteIndex, + remoteClusterPermissions ); XContentBuilder builder = jsonBuilder(); listener.buildResponse(response, builder); @@ -185,6 +208,28 @@ public void testBuildResponse() throws Exception { "clusters": [ "*", "remote-2" ] } ]"""; + + String remoteClusterPermissionsSection = hasRemoteClusterPermissions ? """ + ,"remote_cluster":[ + { + "privileges":[ + "monitor_enrich" + ], + "clusters":[ + "remote-1" + ] + }, + { + "privileges":[ + "monitor_enrich" + ], + "clusters":[ + "remote-2", + "remote-3" + ] + } + ]""" : ""; + assertThat(json, equalTo(XContentHelper.stripWhitespace(Strings.format(""" { "cluster": [ "monitor", "manage_ml", "manage_watcher" ], @@ -243,7 +288,7 @@ public void testBuildResponse() throws Exception { "resources": [ "tenant/42", "tenant/99" ] } ], - "run_as": [ "app-user-*", "backup-user" ]%s - }""", remoteIndicesSection)))); + "run_as": [ "app-user-*", "backup-user" ]%s%s + }""", remoteIndicesSection, remoteClusterPermissionsSection)))); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 2d8307eae8ba6..473cf5ee387b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -735,7 +735,8 @@ private void doTestSendWithCrossClusterAccessHeaders( // We capture the listener so that we can complete the full flow, by calling onResponse further down @SuppressWarnings("unchecked") final ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(ActionListener.class); - doAnswer(i -> null).when(authzService).getRoleDescriptorsIntersectionForRemoteCluster(any(), any(), listenerCaptor.capture()); + doAnswer(i -> null).when(authzService) + .getRoleDescriptorsIntersectionForRemoteCluster(any(), any(), any(), listenerCaptor.capture()); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, @@ -822,6 +823,7 @@ public TransportResponse read(StreamInput in) { ); verify(authzService, never()).getRoleDescriptorsIntersectionForRemoteCluster( eq(remoteClusterAlias), + eq(TransportVersion.current()), eq(authentication.getEffectiveSubject()), anyActionListener() ); @@ -833,6 +835,7 @@ public TransportResponse read(StreamInput in) { listenerCaptor.getValue().onResponse(expectedRoleDescriptorsIntersection); verify(authzService, times(1)).getRoleDescriptorsIntersectionForRemoteCluster( eq(remoteClusterAlias), + eq(TransportVersion.current()), eq(authentication.getEffectiveSubject()), anyActionListener() ); @@ -917,7 +920,7 @@ public void sendRequest( sender.sendRequest(connection, "action", mock(TransportRequest.class), null, null); assertTrue(calledWrappedSender.get()); assertThat(sentAuthentication.get(), equalTo(authentication)); - verify(authzService, never()).getRoleDescriptorsIntersectionForRemoteCluster(any(), any(), anyActionListener()); + verify(authzService, never()).getRoleDescriptorsIntersectionForRemoteCluster(any(), any(), any(), anyActionListener()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); } @@ -1026,10 +1029,10 @@ public void testSendRemoteRequestFailsIfUserHasNoRemoteIndicesPrivileges() throw doAnswer(invocation -> { @SuppressWarnings("unchecked") - final var listener = (ActionListener) invocation.getArgument(2); + final var listener = (ActionListener) invocation.getArgument(3); listener.onResponse(RoleDescriptorsIntersection.EMPTY); return null; - }).when(authzService).getRoleDescriptorsIntersectionForRemoteCluster(any(), any(), anyActionListener()); + }).when(authzService).getRoleDescriptorsIntersectionForRemoteCluster(any(), any(), any(), anyActionListener()); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java index 3efc451e9b28e..19ddb70315388 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageCertificateVerificationTests.java @@ -18,7 +18,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Request; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.DiagnosticTrustManager; import org.elasticsearch.common.ssl.SslClientAuthenticationMode; @@ -122,12 +121,14 @@ public void testDiagnosticTrustManagerForHostnameVerificationFailure() throws Ex final Logger diagnosticLogger = LogManager.getLogger(DiagnosticTrustManager.class); final MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); // Apache clients implement their own hostname checking, but we don't want that. // We use a raw socket so we get the builtin JDK checking (which is what we use for transport protocol SSL checks) - try (MockWebServer webServer = initWebServer(sslService); SSLSocket clientSocket = (SSLSocket) clientSocketFactory.createSocket()) { - Loggers.addAppender(diagnosticLogger, mockAppender); + try ( + var ignored = mockAppender.capturing(DiagnosticTrustManager.class); + MockWebServer webServer = initWebServer(sslService); + SSLSocket clientSocket = (SSLSocket) clientSocketFactory.createSocket() + ) { String fileName = "/x-pack/plugin/security/build/resources/test/org/elasticsearch/xpack/ssl/SSLErrorMessageTests/ca1.crt" .replace('/', platformFileSeparator()); @@ -168,9 +169,6 @@ public void testDiagnosticTrustManagerForHostnameVerificationFailure() throws Ex // Logging message failures are tricky to debug because you just get a "didn't find match" assertion failure. // You should be able to check the log output for the text that was logged and compare to the regex above. mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(diagnosticLogger, mockAppender); - mockAppender.stop(); } } diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles.yml index 3ff445acbb9e4..cb956ff970800 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles.yml +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles.yml @@ -79,7 +79,7 @@ role_query_invalid: - READ query: '{ "unknown": {} }' -role_remote_indices: +role_remote: remote_indices: - clusters: - 'remote-*' @@ -87,3 +87,8 @@ role_remote_indices: - 'shared-index' privileges: - READ + remote_cluster: + - clusters: + - 'remote-*' + privileges: + - "monitor_enrich" diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles_with_remote_indices.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles_with_remote_indices_and_cluster.yml similarity index 66% rename from x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles_with_remote_indices.yml rename to x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles_with_remote_indices_and_cluster.yml index 65c898e5444f6..4dfed61f41f8a 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles_with_remote_indices.yml +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles_with_remote_indices_and_cluster.yml @@ -8,6 +8,15 @@ role: - idx2 privileges: - READ + remote_cluster: + - clusters: + - remote0 + privileges: + - "monitor_enrich" + - clusters: + - remote1 + privileges: + - "monitor_enrich" role_with_fls_dls: remote_indices: @@ -48,3 +57,14 @@ invalid_role_empty_privileges: - idx1 privileges: +invalid_role_missing_remote_clusters: + remote_cluster: + privileges: + - "monitor_enrich" + +invalid_role_bad_priv_remote_clusters: + remote_cluster: + - clusters: + - remote0 + privileges: + - "junk" diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index 8951b91cb76a3..4b59f28a6792d 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.snapshotbasedrecoveries.recovery; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.IndexCommit; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -30,7 +29,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.support.FilterBlobContainer; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CancellableThreads; @@ -378,11 +376,8 @@ public void testFallbacksToSourceNodeWhenSnapshotDownloadFails() throws Exceptio createSnapshot(repoName, "snap", Collections.singletonList(indexName)); String targetNode; - final var recoverySourceHandlerLogger = LogManager.getLogger(RecoverySourceHandler.class); final var mockLogAppender = new MockLogAppender(); - mockLogAppender.start(); - try { - Loggers.addAppender(recoverySourceHandlerLogger, mockLogAppender); + try (var ignored = mockLogAppender.capturing(RecoverySourceHandler.class)) { mockLogAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected warn log about restore failure", @@ -398,9 +393,6 @@ public void testFallbacksToSourceNodeWhenSnapshotDownloadFails() throws Exceptio ensureGreen(); mockLogAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(recoverySourceHandlerLogger, mockLogAppender); - mockLogAppender.stop(); } RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); @@ -619,11 +611,8 @@ public void testRecoveryIsCancelledAfterDeletingTheIndex() throws Exception { recoverSnapshotFileRequestReceived.await(); - final var recoverySourceHandlerLogger = LogManager.getLogger(RecoverySourceHandler.class); final var mockLogAppender = new MockLogAppender(); - mockLogAppender.start(); - try { - Loggers.addAppender(recoverySourceHandlerLogger, mockLogAppender); + try (var ignored = mockLogAppender.capturing(RecoverySourceHandler.class)) { mockLogAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected debug log about restore cancellation", @@ -644,9 +633,6 @@ public void testRecoveryIsCancelledAfterDeletingTheIndex() throws Exception { assertAcked(indicesAdmin().prepareDelete(indexName).get()); assertBusy(mockLogAppender::assertAllExpectationsMatched); - } finally { - Loggers.removeAppender(recoverySourceHandlerLogger, mockLogAppender); - mockLogAppender.stop(); } respondToRecoverSnapshotFile.countDown(); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/50_cross_cluster.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/50_cross_cluster.yml index 4fef3c3b7db37..eecf1977ca188 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/50_cross_cluster.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/50_cross_cluster.yml @@ -111,6 +111,7 @@ teardown: "cross_cluster": { "cluster": [ "cross_cluster_search", + "monitor_enrich", "cross_cluster_replication" ], "indices": [ @@ -332,7 +333,7 @@ teardown: - match: { "api_keys.0.role_descriptors": { "cross_cluster": { "cluster": [ - "cross_cluster_search" + "cross_cluster_search", "monitor_enrich" ], "indices": [ { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/50_remote_only.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/50_remote_only.yml new file mode 100644 index 0000000000000..cc60b68069195 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/50_remote_only.yml @@ -0,0 +1,84 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + security.put_user: + username: "joe" + body: > + { + "password": "s3krit-password", + "roles" : [ "remote_role" ] + } + +--- +teardown: + - do: + security.delete_user: + username: "joe" + ignore: 404 + - do: + security.delete_role: + name: "remote_role" + ignore: 404 + +--- +"Test put remote role api": + - do: + security.put_role: + name: "remote_role" + body: > + { + "remote_indices":[ + { + "names":[ + "logs*" + ], + "privileges":[ + "read" + ], + "allow_restricted_indices":false, + "clusters":[ + "*" + ] + } + ], + "remote_cluster":[ + { + "privileges":[ + "monitor_enrich" + ], + "clusters":[ + "my_remote*", "my_remote2*" + ] + } + ] + } + - match: { role: { created: true } } + + - do: + security.get_role: + name: "remote_role" + - match: { remote_role.remote_indices.0.names.0: "logs*" } + - match: { remote_role.remote_indices.0.privileges.0: "read" } + - match: { remote_role.remote_indices.0.allow_restricted_indices: false } + - match: { remote_role.remote_indices.0.clusters.0: "*" } + - match: { remote_role.remote_cluster.0.privileges.0: "monitor_enrich" } + - match: { remote_role.remote_cluster.0.clusters.0: "my_remote*" } + - match: { remote_role.remote_cluster.0.clusters.1: "my_remote2*" } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" + security.get_user_privileges: {} + - match: { remote_indices.0.names.0: "logs*" } + - match: { remote_indices.0.privileges.0: "read" } + - match: { remote_indices.0.allow_restricted_indices: false } + - match: { remote_indices.0.clusters.0: "*" } + - match: { remote_cluster.0.privileges.0: "monitor_enrich" } + - match: { remote_cluster.0.clusters.0: "my_remote*" } + - match: { remote_cluster.0.clusters.1: "my_remote2*" } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java index b6442ec06a04e..98a7938c12a1f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java @@ -462,23 +462,18 @@ private DefaultCheckpointProvider newCheckpointProvider(TransformConfig transfor } private void assertExpectation(LoggingExpectation loggingExpectation, AuditExpectation auditExpectation, Runnable codeBlock) { - MockLogAppender mockLogAppender = new MockLogAppender(); - mockLogAppender.start(); - Loggers.setLevel(checkpointProviderLogger, Level.DEBUG); - mockLogAppender.addExpectation(loggingExpectation); // always start fresh transformAuditor.reset(); transformAuditor.addExpectation(auditExpectation); - try { - Loggers.addAppender(checkpointProviderLogger, mockLogAppender); + + MockLogAppender mockLogAppender = new MockLogAppender(); + try (var ignored = mockLogAppender.capturing(checkpointProviderLogger.getName())) { + mockLogAppender.addExpectation(loggingExpectation); codeBlock.run(); mockLogAppender.assertAllExpectationsMatched(); transformAuditor.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(checkpointProviderLogger, mockLogAppender); - mockLogAppender.stop(); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java index fb0b8115ae17b..84c8b0bd95b4f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.test.SecuritySettingsSourceField; @@ -334,9 +335,9 @@ private String getApiKeyAuthorizationHeaderValue(String id, String key) { return "ApiKey " + Base64.getEncoder().encodeToString((id + ":" + key).getBytes(StandardCharsets.UTF_8)); } - private static String randomRoleDescriptors(boolean includeRemoteIndices) { + private static String randomRoleDescriptors(boolean includeRemoteDescriptors) { try { - return XContentTestUtils.convertToXContent(Map.of("my_role", randomRoleDescriptor(includeRemoteIndices)), XContentType.JSON) + return XContentTestUtils.convertToXContent(Map.of("my_role", randomRoleDescriptor(includeRemoteDescriptors)), XContentType.JSON) .utf8ToString(); } catch (IOException e) { throw new UncheckedIOException(e); @@ -410,7 +411,7 @@ private Map getRestClientByCapability() throws IOException return clientsByCapability; } - private static RoleDescriptor randomRoleDescriptor(boolean includeRemoteIndices) { + private static RoleDescriptor randomRoleDescriptor(boolean includeRemoteDescriptors) { final Set excludedPrivileges = Set.of( "cross_cluster_replication", "cross_cluster_replication_internal", @@ -425,7 +426,10 @@ private static RoleDescriptor randomRoleDescriptor(boolean includeRemoteIndices) generateRandomStringArray(5, randomIntBetween(2, 8), false, true), RoleDescriptorTests.randomRoleDescriptorMetadata(false), Map.of(), - includeRemoteIndices ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 3, excludedPrivileges) : null, + includeRemoteDescriptors ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 3, excludedPrivileges) : null, + includeRemoteDescriptors + ? RoleDescriptorTests.randomRemoteClusterPermissions(randomIntBetween(1, 3)) + : RemoteClusterPermissions.NONE, null ); }