From 12e64a1bddac15b49655b79e73347256e371e4fe Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 22 Mar 2024 09:11:30 +0100 Subject: [PATCH 01/79] Use historical features for Yaml REST tests for cluster apis (#106587) --- .../test/cat.indices/10_basic.yml | 4 +- .../test/cat.templates/10_basic.yml | 16 ++-- .../cluster.allocation_explain/10_basic.yml | 10 +-- .../cluster.component_template/10_basic.yml | 16 ++-- .../test/cluster.desired_balance/10_basic.yml | 36 ++++----- .../test/cluster.desired_nodes/20_dry_run.yml | 10 +-- .../test/cluster.health/10_basic.yml | 11 +-- .../cluster.health/30_indices_options.yml | 4 +- .../test/cluster.info/10_info_all.yml | 4 +- .../test/cluster.info/20_info_http.yml | 4 +- .../test/cluster.info/30_info_thread_pool.yml | 4 +- .../test/cluster.info/40_info_script.yml | 4 +- .../10_basic.yml | 18 ++--- .../test/cluster.stats/10_basic.yml | 24 +++--- .../cluster.stats/20_indexing_pressure.yml | 4 +- .../rest/yaml/YamlTestLegacyFeatures.java | 79 +++++++++++++++++-- 16 files changed, 158 insertions(+), 90 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml index 2d006f3425790..981a934a719ca 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml @@ -72,7 +72,7 @@ "Test cat indices output for closed index (pre 7.2.0)": - skip: reason: "closed indices are replicated starting version 7.2.0" - cluster_features: ["cat_indices_replicate_closed"] + cluster_features: ["indices_replicate_closed"] - requires: test_runner_features: ["allowed_warnings"] @@ -117,7 +117,7 @@ "Test cat indices output for closed index": - skip: reason: "closed indices are replicated starting version 7.2.0" - cluster_features: ["cat_indices_replicate_closed"] + cluster_features: ["indices_replicate_closed"] - requires: test_runner_features: ["allowed_warnings"] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml index f6f20913e402b..5270d215f8cea 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml @@ -1,7 +1,7 @@ --- "Help": - requires: - cluster_features: ["cat_templates_v2"] + cluster_features: ["templates_v2"] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -32,7 +32,7 @@ --- "Normal templates": - requires: - cluster_features: [ "cat_templates_v2" ] + cluster_features: [ "templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -83,7 +83,7 @@ --- "Filtered templates": - requires: - cluster_features: [ "cat_templates_v2" ] + cluster_features: [ "templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -125,7 +125,7 @@ --- "Column headers": - requires: - cluster_features: [ "cat_templates_v2" ] + cluster_features: [ "templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -163,7 +163,7 @@ --- "Select columns": - requires: - cluster_features: [ "cat_templates_v2" ] + cluster_features: [ "templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -197,7 +197,7 @@ --- "Sort templates": - requires: - cluster_features: [ "cat_templates_v2" ] + cluster_features: [ "templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" test_runner_features: default_shards, no_xpack @@ -250,7 +250,7 @@ --- "Multiple template": - requires: - cluster_features: [ "cat_templates_v2" ] + cluster_features: [ "templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" test_runner_features: default_shards, no_xpack @@ -286,7 +286,7 @@ --- "Mixture of legacy and composable templates": - requires: - cluster_features: [ "cat_templates_v2" ] + cluster_features: [ "templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" test_runner_features: allowed_warnings diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml index 1f0e2b6fd727c..8c350b50a6bf2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml @@ -49,10 +49,10 @@ --- "Cluster shard allocation explanation test with a closed index": - - skip: - version: " - 7.1.99" + - requires: + cluster_features: ["indices_replicate_closed"] reason: closed indices are replicated starting version 7.2.0 - features: ["allowed_warnings"] + test_runner_features: ["allowed_warnings"] - do: indices.create: @@ -95,8 +95,8 @@ --- "Cluster allocation explanation response includes node's roles": - - skip: - version: " - 8.10.99" + - requires: + cluster_features: ["cluster_allocation_role"] reason: The roles field was introduced in 8.11.0 - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml index d27abc3da7081..0308a68dae2cd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml @@ -1,7 +1,7 @@ --- "Basic CRUD": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: ["templates_v2"] reason: "index/component template v2 API unavailable before 7.8" - do: @@ -48,8 +48,8 @@ --- "Delete multiple templates": - - skip: - version: " - 7.99.99" + - requires: + cluster_features: ["cluster_templates_delete_multiple"] reason: "not yet backported" - do: @@ -116,8 +116,8 @@ --- "Add data stream lifecycle": - - skip: - version: " - 8.10.99" + - requires: + cluster_features: ["datastream_lifecycle"] reason: "Data stream lifecycle was available from 8.11" - do: @@ -144,8 +144,8 @@ --- "Get data stream lifecycle with default rollover": - - skip: - version: " - 8.10.99" + - requires: + cluster_features: ["datastream_lifecycle"] reason: "Data stream lifecycle was available from 8.11" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml index a4204034bfd80..cd213ebe72a8e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml @@ -1,7 +1,7 @@ --- setup: - - skip: - version: " - 8.5.99" + - requires: + cluster_features: ["cluster_desired_balance"] reason: "API added in in 8.6.0" --- @@ -61,8 +61,8 @@ setup: --- "Test cluster_balance_stats": - - skip: - version: " - 8.6.99" + - requires: + cluster_features: ["cluster_desired_balance_stats"] reason: "cluster_balance_stats added in in 8.7.0" - do: @@ -109,8 +109,8 @@ setup: --- "Test cluster_info": - - skip: - version: " - 8.7.99" + - requires: + cluster_features: ["cluster_info"] reason: "cluster_info added in in 8.8.0" - do: @@ -121,8 +121,8 @@ setup: --- "Test cluster_balance_stats contains node ID and roles": - - skip: - version: " - 8.7.99" + - requires: + cluster_features: ["cluster_desired_balance_extended"] reason: "node_id and roles added in in 8.8.0" - do: @@ -140,8 +140,8 @@ setup: --- "Test tier_preference": - - skip: - version: " - 8.7.99" + - requires: + cluster_features: ["cluster_desired_balance_extended"] reason: "tier_preference added in in 8.8.0" - do: @@ -165,8 +165,8 @@ setup: --- "Test computed_shard_movements": - - skip: - version: " - 8.7.99" + - requires: + cluster_features: ["cluster_desired_balance_extended"] reason: "computed_shard_movements added in in 8.8.0" - do: @@ -177,8 +177,8 @@ setup: --- "Test reset desired balance": - - skip: - version: " - 8.7.99" + - requires: + cluster_features: ["cluster_desired_balance_extended"] reason: "reset API added in in 8.8.0" - do: @@ -187,8 +187,8 @@ setup: --- "Test undesired_shard_allocation_count": - - skip: - version: " - 8.11.99" + - requires: + cluster_features: ["cluster_desired_balance_stats_undesired_count"] reason: "undesired_shard_allocation_count added in in 8.12.0" - do: @@ -225,8 +225,8 @@ setup: --- "Test unassigned_shards, total_allocations, undesired_allocations and undesired_allocations_fraction": - - skip: - version: " - 8.11.99" + - requires: + cluster_features: [ "cluster_desired_balance_stats_undesired_count" ] reason: "undesired_shard_allocation_count added in in 8.12.0" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/20_dry_run.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/20_dry_run.yml index 3723c5b2165ca..56a693febec82 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/20_dry_run.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/20_dry_run.yml @@ -1,7 +1,7 @@ --- setup: - - skip: - version: " - 8.3.99" + - requires: + cluster_features: ["cluster_desired_nodes_dry_run"] reason: "Support for the dry run option was added in in 8.4.0" --- teardown: @@ -12,7 +12,7 @@ teardown: "Test dry run doesn't update empty desired nodes": - skip: version: " - 8.12.99" - reason: "version_node removed from version 8.13 onwards" + reason: "version_node removed from version 8.13 onwards, require the new api" - do: cluster.state: {} @@ -42,7 +42,7 @@ teardown: "Test dry run doesn't update existing desired nodes": - skip: version: " - 8.12.99" - reason: "version_node removed from version 8.13 onwards" + reason: "version_node removed from version 8.13 onwards, require the new api" - do: cluster.state: {} @@ -94,7 +94,7 @@ teardown: --- "Test validation works for dry run updates": - skip: - version: "8.9.99 - " + cluster_features: ["cluster_desired_nodes_no_settings_validation"] reason: "We started skipping setting validations in 8.10" - do: cluster.state: { } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml index a051b3626b217..586bd3b7cfb6b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml @@ -132,9 +132,10 @@ --- "cluster health with closed index (pre 7.2.0)": - skip: - version: "7.2.0 - " + cluster_features: ["indices_replicate_closed"] reason: "closed indices are replicated starting version 7.2.0" - features: ["allowed_warnings"] + - requires: + test_runner_features: ["allowed_warnings"] - do: indices.create: @@ -208,10 +209,10 @@ --- "cluster health with closed index": - - skip: - version: " - 7.1.99" + - requires: + cluster_features: ["indices_replicate_closed"] reason: "closed indices are replicated starting version 7.2.0" - features: ["allowed_warnings", "default_shards"] + test_runner_features: ["allowed_warnings", "default_shards"] - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml index 8144a2c1a1dbf..5caa0ebad30b2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml @@ -34,8 +34,8 @@ setup: --- "cluster health with expand_wildcards": - - skip: - version: " - 7.1.99" + - requires: + cluster_features: ["cluster_health_indices_options"] reason: "indices options has been introduced in cluster health request starting version 7.2.0" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/10_info_all.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/10_info_all.yml index 4e88c14a2b1dc..023e35cd4bee1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/10_info_all.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/10_info_all.yml @@ -1,7 +1,7 @@ --- setup: - - skip: - version: " - 8.8.99" + - requires: + cluster_features: ["cluster_info_extended"] reason: "/_info/_all only available from v8.9" --- diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/20_info_http.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/20_info_http.yml index f238e5116e146..ff7d982b14fee 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/20_info_http.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/20_info_http.yml @@ -1,7 +1,7 @@ --- "Cluster HTTP Info": - - skip: - version: " - 8.8.99" + - requires: + cluster_features: ["cluster_info_extended"] reason: "/_info/http only available from v8.9" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/30_info_thread_pool.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/30_info_thread_pool.yml index 35a538ac3a44a..261f1d8ea56cb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/30_info_thread_pool.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/30_info_thread_pool.yml @@ -1,7 +1,7 @@ --- "Cluster HTTP Info": - - skip: - version: " - 8.8.99" + - requires: + cluster_features: ["cluster_info_extended"] reason: "/_info/thread_pool only available from v8.9" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/40_info_script.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/40_info_script.yml index 8c4b3e2832daf..bb7597537014e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/40_info_script.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/40_info_script.yml @@ -1,7 +1,7 @@ --- "Cluster HTTP Info": - - skip: - version: " - 8.8.99" + - requires: + cluster_features: ["cluster_info_extended"] reason: "/_info/script only available from v8.9" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml index 740836efcdc46..fda715e416ac2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml @@ -1,8 +1,8 @@ --- "Prevalidation basic test": - - skip: - features: contains - version: "- 8.6.99" + - requires: + test_runner_features: ["contains"] + cluster_features: ["cluster_prevalidate_node_removal_reason"] reason: "The reason field was introduced in 8.7.0" # Fetch a node ID and stash it in node_id @@ -19,8 +19,8 @@ - contains: {nodes: {id: "$node_id", result: {is_safe: true, reason: no_problems, message: ""}}} --- "Prevalidation with no node specified": - - skip: - version: "- 8.5.99" + - requires: + cluster_features: ["cluster_prevalidate_node_removal"] reason: "API added in 8.6.0" - do: catch: bad_request @@ -31,8 +31,8 @@ - match: { error.reason: "Validation Failed: 1: request must contain one of the parameters 'names', 'ids', or 'external_ids';" } --- "Prevalidation with more than one query parameter": - - skip: - version: "- 8.5.99" + - requires: + cluster_features: ["cluster_prevalidate_node_removal"] reason: "API added in 8.6.0" - do: catch: bad_request @@ -45,8 +45,8 @@ - match: { error.reason: "Validation Failed: 1: request must contain only one of the parameters 'names', 'ids', or 'external_ids';" } --- "Prevalidation with non-existing node": - - skip: - version: "- 8.5.99" + - requires: + cluster_features: ["cluster_prevalidate_node_removal"] reason: "API added in 8.6.0" - do: catch: missing diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml index a46d9a67a863d..2c48282332909 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml @@ -75,8 +75,8 @@ --- "get cluster stats returns packaging types": - - skip: - version: " - 7.1.99" + - requires: + cluster_features: ["cluster_stats_packaging_types"] reason: "packaging types are added for v7.2.0" - do: @@ -85,8 +85,8 @@ - is_true: nodes.packaging_types --- "get cluster stats without runtime fields": - - skip: - version: " - 7.12.99" + - requires: + cluster_features: ["cluster_stats_runtime_fields"] reason: "cluster stats includes runtime fields from 7.13 on" - do: indices.create: @@ -98,8 +98,8 @@ --- "Usage stats with script-less runtime fields": - - skip: - version: " - 7.12.99" + - requires: + cluster_features: ["cluster_stats_runtime_fields"] reason: "cluster stats includes runtime fields from 7.13 on" - do: indices.create: @@ -168,8 +168,8 @@ --- "mappings sizes reported in get cluster stats": - - skip: - version: " - 8.3.99" + - requires: + cluster_features: ["cluster_stats_mapping_sizes"] reason: "mapping sizes reported from 8.4 onwards" - do: indices.create: @@ -188,8 +188,8 @@ --- "snapshot stats reported in get cluster stats": - - skip: - version: " - 8.7.99" + - requires: + cluster_features: ["cluster_stats_snapshots"] reason: "snapshot stats reported from 8.8 onwards" - do: @@ -231,8 +231,8 @@ --- "Dense vector stats": - - skip: - version: " - 8.9.99" + - requires: + cluster_features: ["cluster_stats_dense_vectors"] reason: "dense vector stats added in 8.10" - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/20_indexing_pressure.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/20_indexing_pressure.yml index 9178fab25e230..648964d9e721f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/20_indexing_pressure.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/20_indexing_pressure.yml @@ -1,7 +1,7 @@ --- "Indexing pressure cluster stats": - - skip: - version: " - 8.0.99" + - requires: + cluster_features: ["cluster_stats_indexing_pressure"] reason: "indexing_pressure in cluster was added in 8.1" - do: diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java index 326afdaa7ae1a..2ce35888c3f14 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java @@ -19,7 +19,6 @@ * to support BwC. Rather than leaving them in the main src we group them here, so it's clear they are not used in production code anymore. */ public class YamlTestLegacyFeatures implements FeatureSpecification { - private static final NodeFeature BULK_AUTO_ID = new NodeFeature("bulk_auto_id"); private static final NodeFeature BULK_REQUIRE_ALIAS = new NodeFeature("bulk_require_alias"); private static final NodeFeature BULK_DYNAMIC_TEMPLATE_OP_TYPE = new NodeFeature("bulk_dynamic_template_op_type"); @@ -36,7 +35,6 @@ public class YamlTestLegacyFeatures implements FeatureSpecification { private static final NodeFeature CAT_ALLOCATION_NODE_ROLE = new NodeFeature("cat_allocation_node_role"); private static final NodeFeature CAT_INDICES_DATASET_SIZE = new NodeFeature("cat_indices_dataset_size"); - private static final NodeFeature CAT_INDICES_REPLICATE_CLOSED = new NodeFeature("cat_indices_replicate_closed"); private static final NodeFeature CAT_INDICES_VALIDATE_HEALTH_PARAM = new NodeFeature("cat_indices_validate_health_param"); private static final NodeFeature CAT_PLUGINS_NEW_FORMAT = new NodeFeature("cat_plugins_new_format"); @@ -48,9 +46,47 @@ public class YamlTestLegacyFeatures implements FeatureSpecification { private static final NodeFeature CAT_TASKS_X_OPAQUE_ID = new NodeFeature("cat_tasks_x_opaque_id"); - private static final NodeFeature CAT_TEMPLATES_V2 = new NodeFeature("cat_templates_v2"); private static final NodeFeature CAT_TEMPLATE_NAME_VALIDATION = new NodeFeature("cat_template_name_validation"); + private static final NodeFeature CLUSTER_TEMPLATES_DELETE_MULTIPLE = new NodeFeature("cluster_templates_delete_multiple"); + + private static final NodeFeature CLUSTER_ALLOCATION_ROLE = new NodeFeature("cluster_allocation_role"); + + private static final NodeFeature CLUSTER_DESIRED_BALANCE = new NodeFeature("cluster_desired_balance"); + private static final NodeFeature CLUSTER_DESIRED_BALANCE_STATS = new NodeFeature("cluster_desired_balance_stats"); + private static final NodeFeature CLUSTER_DESIRED_BALANCE_EXTENDED = new NodeFeature("cluster_desired_balance_extended"); + private static final NodeFeature CLUSTER_DESIRED_BALANCE_STATS_UNDESIRED_COUNT = new NodeFeature( + "cluster_desired_balance_stats_undesired_count" + ); + + private static final NodeFeature CLUSTER_DESIRED_NODES_OLD = new NodeFeature("cluster_desired_nodes_old"); + private static final NodeFeature CLUSTER_DESIRED_NODES_DRY_RUN = new NodeFeature("cluster_desired_nodes_dry_run"); + private static final NodeFeature CLUSTER_DESIRED_NODES_NO_SETTINGS_VALIDATION = new NodeFeature( + "cluster_desired_nodes_no_settings_validation" + ); + private static final NodeFeature CLUSTER_DESIRED_NODES = new NodeFeature("cluster_desired_nodes"); + + private static final NodeFeature CLUSTER_HEALTH_INDICES_OPTIONS = new NodeFeature("cluster_health_indices_options"); + + private static final NodeFeature CLUSTER_INFO = new NodeFeature("cluster_info"); + private static final NodeFeature CLUSTER_INFO_EXTENDED = new NodeFeature("cluster_info_extended"); + + private static final NodeFeature CLUSTER_PREVALIDATE_NODE_REMOVAL = new NodeFeature("cluster_prevalidate_node_removal"); + private static final NodeFeature CLUSTER_PREVALIDATE_NODE_REMOVAL_REASON = new NodeFeature("cluster_prevalidate_node_removal_reason"); + + private static final NodeFeature CLUSTER_STATS_PACKAGING_TYPES = new NodeFeature("cluster_stats_packaging_types"); + private static final NodeFeature CLUSTER_STATS_RUNTIME_FIELDS = new NodeFeature("cluster_stats_runtime_fields"); + private static final NodeFeature CLUSTER_STATS_INDEXING_PRESSURE = new NodeFeature("cluster_stats_indexing_pressure"); + private static final NodeFeature CLUSTER_STATS_MAPPING_SIZES = new NodeFeature("cluster_stats_mapping_sizes"); + private static final NodeFeature CLUSTER_STATS_SNAPSHOTS = new NodeFeature("cluster_stats_snapshots"); + private static final NodeFeature CLUSTER_STATS_DENSE_VECTORS = new NodeFeature("cluster_stats_dense_vectors"); + + private static final NodeFeature DATASTREAM_LIFECYCLE = new NodeFeature("datastream_lifecycle"); + + private static final NodeFeature TEMPLATES_V2 = new NodeFeature("templates_v2"); + + private static final NodeFeature INDICES_REPLICATE_CLOSED = new NodeFeature("indices_replicate_closed"); + @Override public Map getHistoricalFeatures() { return Map.ofEntries( @@ -66,7 +102,6 @@ public Map getHistoricalFeatures() { Map.entry(CAT_ALLOCATION_NODE_ROLE, Version.V_8_10_0), - Map.entry(CAT_INDICES_REPLICATE_CLOSED, Version.V_7_2_0), Map.entry(CAT_INDICES_VALIDATE_HEALTH_PARAM, Version.V_7_8_0), Map.entry(CAT_INDICES_DATASET_SIZE, Version.V_8_11_0), @@ -79,8 +114,40 @@ public Map getHistoricalFeatures() { Map.entry(CAT_TASKS_X_OPAQUE_ID, Version.V_7_10_0), - Map.entry(CAT_TEMPLATES_V2, Version.V_7_8_0), - Map.entry(CAT_TEMPLATE_NAME_VALIDATION, Version.V_7_16_0) + Map.entry(CAT_TEMPLATE_NAME_VALIDATION, Version.V_7_16_0), + + Map.entry(CLUSTER_TEMPLATES_DELETE_MULTIPLE, Version.V_8_0_0), + Map.entry(CLUSTER_ALLOCATION_ROLE, Version.V_8_11_0), + + Map.entry(CLUSTER_DESIRED_BALANCE, Version.V_8_6_0), + Map.entry(CLUSTER_DESIRED_BALANCE_STATS, Version.V_8_7_0), + Map.entry(CLUSTER_DESIRED_BALANCE_EXTENDED, Version.V_8_8_0), + Map.entry(CLUSTER_DESIRED_BALANCE_STATS_UNDESIRED_COUNT, Version.V_8_12_0), + + Map.entry(CLUSTER_DESIRED_NODES_OLD, Version.V_8_3_0), + Map.entry(CLUSTER_DESIRED_NODES_DRY_RUN, Version.V_8_4_0), + Map.entry(CLUSTER_DESIRED_NODES_NO_SETTINGS_VALIDATION, Version.V_8_10_0), + + Map.entry(CLUSTER_HEALTH_INDICES_OPTIONS, Version.V_7_2_0), + + Map.entry(CLUSTER_INFO, Version.V_8_8_0), + Map.entry(CLUSTER_INFO_EXTENDED, Version.V_8_9_0), + + Map.entry(CLUSTER_PREVALIDATE_NODE_REMOVAL, Version.V_8_6_0), + Map.entry(CLUSTER_PREVALIDATE_NODE_REMOVAL_REASON, Version.V_8_7_0), + + Map.entry(CLUSTER_STATS_PACKAGING_TYPES, Version.V_7_2_0), + Map.entry(CLUSTER_STATS_RUNTIME_FIELDS, Version.V_7_13_0), + Map.entry(CLUSTER_STATS_INDEXING_PRESSURE, Version.V_8_1_0), + Map.entry(CLUSTER_STATS_MAPPING_SIZES, Version.V_8_4_0), + Map.entry(CLUSTER_STATS_SNAPSHOTS, Version.V_8_8_0), + Map.entry(CLUSTER_STATS_DENSE_VECTORS, Version.V_8_10_0), + + Map.entry(DATASTREAM_LIFECYCLE, Version.V_8_11_0), + + Map.entry(INDICES_REPLICATE_CLOSED, Version.V_7_2_0), + + Map.entry(TEMPLATES_V2, Version.V_7_8_0) ); } } From 1fe4946f81fcc08c23ceccaf94bfacd36846b17e Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 22 Mar 2024 08:18:48 +0000 Subject: [PATCH 02/79] Fix testScheduledFixedDelayRejection (#106630) Not really necessary to allow the scheduled task to race against the blocks, and this race is a source of test flakiness. Fixed by imposing the blocks first. Closes #106618 --- .../org/elasticsearch/threadpool/ThreadPoolTests.java | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java index 8a61054f5fa93..66d3dd7a829eb 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java @@ -465,7 +465,6 @@ public void testScheduledOneShotForceExecution() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106618") public void testScheduledFixedDelayRejection() { final var name = "fixed-bounded"; final var threadPool = new TestThreadPool( @@ -476,17 +475,14 @@ public void testScheduledFixedDelayRejection() { final var future = new PlainActionFuture(); final var latch = new CountDownLatch(1); try { + blockExecution(threadPool.executor(name), latch); threadPool.scheduleWithFixedDelay( - ActionRunnable.wrap(future, ignored -> Thread.yield()), + ActionRunnable.wrap(future, ignored -> fail("should not execute")), TimeValue.timeValueMillis(between(1, 100)), threadPool.executor(name) ); - while (future.isDone() == false) { - // might not block all threads the first time round if the scheduled runnable is running, so must keep trying - blockExecution(threadPool.executor(name), latch); - } - expectThrows(EsRejectedExecutionException.class, () -> FutureUtils.get(future)); + expectThrows(EsRejectedExecutionException.class, () -> FutureUtils.get(future, 10, TimeUnit.SECONDS)); } finally { latch.countDown(); assertTrue(terminate(threadPool)); From cc8fb4dba9536c33abf6a4f396657ba97e52518c Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Fri, 22 Mar 2024 09:42:29 +0100 Subject: [PATCH 03/79] ESQL: Re-enable logical dependency check (#105860) * Fix Enrich refs locally in the dependency check * Re-enable logical dependency check + test for it * Return server error on failed dependency check --- docs/changelog/105860.yaml | 5 +++++ .../xpack/esql/optimizer/LogicalVerifier.java | 9 ++++++--- .../xpack/esql/optimizer/OptimizerRules.java | 10 ++++++++++ .../esql/optimizer/LogicalPlanOptimizerTests.java | 3 +-- 4 files changed, 22 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/105860.yaml diff --git a/docs/changelog/105860.yaml b/docs/changelog/105860.yaml new file mode 100644 index 0000000000000..71f3544a02a1f --- /dev/null +++ b/docs/changelog/105860.yaml @@ -0,0 +1,5 @@ +pr: 105860 +summary: "ESQL: Re-enable logical dependency check" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index bf569ee587dbc..6b62029bd8f45 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -22,11 +22,10 @@ private LogicalVerifier() {} /** Verifies the optimized logical plan. */ public Failures verify(LogicalPlan plan) { Failures failures = new Failures(); + Failures dependencyFailures = new Failures(); plan.forEachUp(p -> { - // dependency check - // FIXME: re-enable - // DEPENDENCY_CHECK.checkPlan(p, failures); + DEPENDENCY_CHECK.checkPlan(p, dependencyFailures); if (failures.hasFailures() == false) { p.forEachExpression(ex -> { @@ -37,6 +36,10 @@ public Failures verify(LogicalPlan plan) { } }); + if (dependencyFailures.hasFailures()) { + throw new IllegalStateException(dependencyFailures.toString()); + } + return failures; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 645924907b6f5..e375f11ab3ae7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -79,6 +79,16 @@ protected AttributeSet generates(P p) { } static class LogicalPlanDependencyCheck extends DependencyConsistency { + @Override + protected AttributeSet references(LogicalPlan plan) { + if (plan instanceof Enrich enrich) { + // The enrichFields are NamedExpressions, so we compute their references as well when just calling enrich.references(). + // But they are not actually referring to attributes from the input plan - only the match field does. + return enrich.matchField().references(); + } + return super.references(plan); + } + @Override protected AttributeSet generates(LogicalPlan logicalPlan) { // source-like operators diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index c6747c9d65d24..952fbc6f265e4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -3299,7 +3299,6 @@ public void testEmptyMappingIndex() { assertThat(Expressions.names(local.output()), contains(NO_FIELDS.get(0).name(), "x", "language_code", "language_name")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105436") public void testPlanSanityCheck() throws Exception { var plan = optimizedPlan(""" from test @@ -3325,7 +3324,7 @@ public void testPlanSanityCheck() throws Exception { ) ); - VerificationException e = expectThrows(VerificationException.class, () -> logicalOptimizer.optimize(invalidPlan)); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> logicalOptimizer.optimize(invalidPlan)); assertThat(e.getMessage(), containsString("Plan [OrderBy[[Order[salary")); assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references [salary")); } From b5923eb8dc42d11eb2d44ea8802008066ebc7344 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 22 Mar 2024 08:56:15 +0000 Subject: [PATCH 04/79] AwaitsFix for #106650 --- .../test/ml/search_knn_query_vector_builder.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml index 97e5146e9af86..50f687f704994 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml @@ -106,8 +106,10 @@ setup: --- "Test vector search with query_vector_builder": - skip: - version: " - 8.13.99" - reason: "introduced after 8.13" + version: all + reason: AwaitsFix https://github.com/elastic/elasticsearch/issues/106650 +# version: " - 8.13.99" +# reason: "introduced after 8.13" - do: search: index: index-with-embedded-text From 23278a52cf60e9d155b4423c8859379c7130d8bc Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 22 Mar 2024 09:50:18 +0000 Subject: [PATCH 05/79] Expand conceptual docs around `ActionListener` (#105405) Rephrases the docs about `ActionListener` in terms of continuations and control flow, rather than as injecting logic into lower layers. --- docs/internal/DistributedArchitectureGuide.md | 72 ++++++++++++++++--- 1 file changed, 61 insertions(+), 11 deletions(-) diff --git a/docs/internal/DistributedArchitectureGuide.md b/docs/internal/DistributedArchitectureGuide.md index ea5942ff71cc8..a89956721a481 100644 --- a/docs/internal/DistributedArchitectureGuide.md +++ b/docs/internal/DistributedArchitectureGuide.md @@ -10,20 +10,70 @@ ### ActionListener -`ActionListener`s are a means off injecting logic into lower layers of the code. They encapsulate a block of code that takes a response -value -- the `onResponse()` method --, and then that block of code (the `ActionListener`) is passed into a function that will eventually -execute the code (call `onResponse()`) when a response value is available. `ActionListener`s are used to pass code down to act on a result, -rather than lower layers returning a result back up to be acted upon by the caller. One of three things can happen to a listener: it can be -executed in the same thread — e.g. `ActionListener.run()` --; it can be passed off to another thread to be executed; or it can be added to -a list someplace, to eventually be executed by some service. `ActionListener`s also define `onFailure()` logic, in case an error is -encountered before a result can be formed. +Callbacks are used extensively throughout Elasticsearch because they enable us to write asynchronous and nonblocking code, i.e. code which +doesn't necessarily compute a result straight away but also doesn't block the calling thread waiting for the result to become available. +They support several useful control flows: + +- They can be completed immediately on the calling thread. +- They can be completed concurrently on a different thread. +- They can be stored in a data structure and completed later on when the system reaches a particular state. +- Most commonly, they can be passed on to other methods that themselves require a callback. +- They can be wrapped in another callback which modifies the behaviour of the original callback, perhaps adding some extra code to run + before or after completion, before passing them on. + +`ActionListener` is a general-purpose callback interface that is used extensively across the Elasticsearch codebase. `ActionListener` is +used pretty much everywhere that needs to perform some asynchronous and nonblocking computation. The uniformity makes it easier to compose +parts of the system together without needing to build adapters to convert back and forth between different kinds of callback. It also makes +it easier to develop the skills needed to read and understand all the asynchronous code, although this definitely takes practice and is +certainly not easy in an absolute sense. Finally, it has allowed us to build a rich library for working with `ActionListener` instances +themselves, creating new instances out of existing ones and completing them in interesting ways. See for instance: + +- all the static methods on [ActionListener](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/ActionListener.java) itself +- [`ThreadedActionListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java) for forking work elsewhere +- [`RefCountingListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java) for running work in parallel +- [`SubscribableListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java) for constructing flexible workflows + +Callback-based asynchronous code can easily call regular synchronous code, but synchronous code cannot run callback-based asynchronous code +without blocking the calling thread until the callback is called back. This blocking is at best undesirable (threads are too expensive to +waste with unnecessary blocking) and at worst outright broken (the blocking can lead to deadlock). Unfortunately this means that most of our +code ends up having to be written with callbacks, simply because it's ultimately calling into some other code that takes a callback. The +entry points for all Elasticsearch APIs are callback-based (e.g. REST APIs all start at +[`org.elasticsearch.rest.BaseRestHandler#prepareRequest`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java#L158-L171), +and transport APIs all start at +[`org.elasticsearch.action.support.TransportAction#doExecute`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/TransportAction.java#L65)) +and the whole system fundamentally works in terms of an event loop (a `io.netty.channel.EventLoop`) which processes network events via +callbacks. + +`ActionListener` is not an _ad-hoc_ invention. Formally speaking, it is our implementation of the general concept of a continuation in the +sense of [_continuation-passing style_](https://en.wikipedia.org/wiki/Continuation-passing_style) (CPS): an extra argument to a function +which defines how to continue the computation when the result is available. This is in contrast to _direct style_ which is the more usual +style of calling methods that return values directly back to the caller so they can continue executing as normal. There's essentially two +ways that computation can continue in Java (it can return a value or it can throw an exception) which is why `ActionListener` has both an +`onResponse()` and an `onFailure()` method. + +CPS is strictly more expressive than direct style: direct code can be mechanically translated into continuation-passing style, but CPS also +enables all sorts of other useful control structures such as forking work onto separate threads, possibly to be executed in parallel, +perhaps even across multiple nodes, or possibly collecting a list of continuations all waiting for the same condition to be satisfied before +proceeding (e.g. +[`SubscribableListener`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java) +amongst many others). Some languages have first-class support for continuations (e.g. the `async` and `await` primitives in C#) allowing the +programmer to write code in direct style away from those exotic control structures, but Java does not. That's why we have to manipulate all +the callbacks ourselves. + +Strictly speaking, CPS requires that a computation _only_ continues by calling the continuation. In Elasticsearch, this means that +asynchronous methods must have `void` return type and may not throw any exceptions. This is mostly the case in our code as written today, +and is a good guiding principle, but we don't enforce void exceptionless methods and there are some deviations from this rule. In +particular, it's not uncommon to permit some methods to throw an exception, using things like +[`ActionListener#run`](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/ActionListener.java#L381-L390) +(or an equivalent `try ... catch ...` block) further up the stack to handle it. Some methods also take (and may complete) an +`ActionListener` parameter, but still return a value separately for other local synchronous work. This pattern is often used in the transport action layer with the use of the -[ChannelActionListener]([url](https://github.com/elastic/elasticsearch/blob/8.12/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java)) +[ChannelActionListener](https://github.com/elastic/elasticsearch/blob/v8.12.2/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java) class, which wraps a `TransportChannel` produced by the transport layer. `TransportChannel` implementations can hold a reference to a Netty -channel with which to pass the response back to the network caller. Netty has a many-to-one association of network callers to channels, so -a call taking a long time generally won't hog resources: it's cheap. A transport action can take hours to respond and that's alright, -barring caller timeouts. +channel with which to pass the response back to the network caller. Netty has a many-to-one association of network callers to channels, so a +call taking a long time generally won't hog resources: it's cheap. A transport action can take hours to respond and that's alright, barring +caller timeouts. (TODO: add useful starter references and explanations for a range of Listener classes. Reference the Netty section.) From 4647691809b56892a8ab6b6e93e6a5dcfae09a3e Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Fri, 22 Mar 2024 11:00:39 +0100 Subject: [PATCH 06/79] Clarify the difference between ImmutableEntry and Map.entry (#106588) --- server/src/main/java/org/elasticsearch/common/util/Maps.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/Maps.java b/server/src/main/java/org/elasticsearch/common/util/Maps.java index 1b46e71dadd12..fc911793711b7 100644 --- a/server/src/main/java/org/elasticsearch/common/util/Maps.java +++ b/server/src/main/java/org/elasticsearch/common/util/Maps.java @@ -331,8 +331,7 @@ public static Map transformValues(Map source, Function /** * An immutable implementation of {@link Map.Entry}. - * @param key key - * @param value value + * Unlike {@code Map.entry(...)} this implementation permits null key and value. */ public record ImmutableEntry(KType key, VType value) implements Map.Entry { From 69ecdc643b8d7b7c9482a8feeee6380cae7abb8d Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Fri, 22 Mar 2024 11:26:45 +0100 Subject: [PATCH 07/79] Expose workflow restriction parsing (#106648) Need this one for custom role parsing upstream. --- .../elasticsearch/xpack/core/security/authz/RoleDescriptor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index e9aa982a05d8b..ecbd12a7f4643 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -1623,7 +1623,7 @@ public String toString() { return sb.toString(); } - static Restriction parse(String roleName, XContentParser parser) throws IOException { + public static Restriction parse(String roleName, XContentParser parser) throws IOException { // advance to the START_OBJECT token if needed XContentParser.Token token = parser.currentToken() == null ? parser.nextToken() : parser.currentToken(); if (token != XContentParser.Token.START_OBJECT) { From 14ca58c9260e3d9bca86d561d254e84ed9266ed7 Mon Sep 17 00:00:00 2001 From: Dmitry Cherniachenko <2sabio@gmail.com> Date: Fri, 22 Mar 2024 11:27:45 +0100 Subject: [PATCH 08/79] `Sets` utility class code cleanup (#105350) * `Sets` utility class code cleanup - newHashSetWithExpectedSize() returns HashSet, same as newHashSet() - haveEmptyIntersection() iterates a smaller set, same as intersection() - sortedDifference() accepts sets with Comparable values - replace Set.copyOf() in addToCopy() with Set.of() to avoid 1 extra copy of values --- .../elasticsearch/common/util/set/Sets.java | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/set/Sets.java b/server/src/main/java/org/elasticsearch/common/util/set/Sets.java index adfa5023f2b35..75e5717d41b9f 100644 --- a/server/src/main/java/org/elasticsearch/common/util/set/Sets.java +++ b/server/src/main/java/org/elasticsearch/common/util/set/Sets.java @@ -40,7 +40,7 @@ public static HashSet newHashSet(T... elements) { return new HashSet<>(Arrays.asList(elements)); } - public static Set newHashSetWithExpectedSize(int expectedSize) { + public static HashSet newHashSetWithExpectedSize(int expectedSize) { return new HashSet<>(capacity(expectedSize)); } @@ -53,7 +53,17 @@ static int capacity(int expectedSize) { return expectedSize < 2 ? expectedSize + 1 : (int) (expectedSize / 0.75 + 1.0); } - public static boolean haveEmptyIntersection(Set left, Set right) { + public static boolean haveEmptyIntersection(Set set1, Set set2) { + final Set left; + final Set right; + if (set1.size() < set2.size()) { + left = set1; + right = set2; + } else { + left = set2; + right = set1; + } + for (T t : left) { if (right.contains(t)) { return false; @@ -95,7 +105,7 @@ public static Set difference(Set left, Set right) { * @param the type of the elements of the sets * @return the sorted relative complement of the left set with respect to the right set */ - public static SortedSet sortedDifference(final Set left, final Set right) { + public static > SortedSet sortedDifference(final Set left, final Set right) { final SortedSet set = new TreeSet<>(); for (T k : left) { if (right.contains(k) == false) { @@ -165,11 +175,12 @@ public static Set intersection(Set set1, Set set2) { * * @param set set to copy * @param elements elements to add + * @return the unmodifiable copy of the input set with the extra elements added */ @SuppressWarnings("unchecked") public static Set addToCopy(Set set, E... elements) { final var res = new HashSet<>(set); Collections.addAll(res, elements); - return Set.copyOf(res); + return (Set) Set.of(res.toArray()); } } From d8fc8779c469e688336a23bd34f91ce8af9f417c Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 22 Mar 2024 12:06:27 +0100 Subject: [PATCH 09/79] Fix S3RepositoryThirdPartyTests.testReadFromPositionLargerThanBlobLength (#106466) The test should use a random operation purpose that is not "Indices", otherwise S3RetryingInputStream retries up to Integer.MAX_VALUE times which causes the test suite to timeout. Also fixes the progress in the retries log messages. Closes #106457 --- .../s3/S3RepositoryThirdPartyTests.java | 28 ++++++++++++++---- .../s3/S3RetryingInputStream.java | 29 +++++++++++++++---- 2 files changed, 47 insertions(+), 10 deletions(-) diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 583d1477fbaa9..085e357da5ae9 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.repositories.s3; -import com.amazonaws.AmazonClientException; import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; @@ -26,6 +25,7 @@ import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.recovery.RecoverySettings; @@ -44,12 +44,14 @@ import java.io.IOException; import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; @@ -226,7 +228,6 @@ List listMultipartUploads() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106457") public void testReadFromPositionLargerThanBlobLength() { final var blobName = randomIdentifier(); final var blobBytes = randomBytesReference(randomIntBetween(100, 2_000)); @@ -239,9 +240,26 @@ public void testReadFromPositionLargerThanBlobLength() { long position = randomLongBetween(blobBytes.length(), Long.MAX_VALUE - 1L); long length = randomLongBetween(1L, Long.MAX_VALUE - position); - var exception = expectThrows(AmazonClientException.class, () -> readBlob(repository, blobName, position, length)); - assertThat(exception, instanceOf(AmazonS3Exception.class)); - assertThat(((AmazonS3Exception) exception).getStatusCode(), equalTo(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus())); + var exception = expectThrows(UncategorizedExecutionException.class, () -> readBlob(repository, blobName, position, length)); + assertThat(exception.getCause(), instanceOf(ExecutionException.class)); + assertThat(exception.getCause().getCause(), instanceOf(IOException.class)); + assertThat( + exception.getCause().getCause().getMessage(), + containsString( + "Requested range [start=" + + position + + ", end=" + + (position + length - 1L) + + ", currentOffset=0] cannot be satisfied for blob object [" + + repository.basePath().buildAsString() + + blobName + + ']' + ) + ); + assertThat( + asInstanceOf(AmazonS3Exception.class, exception.getRootCause()).getStatusCode(), + equalTo(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()) + ); } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index 998455a658406..d08ff5eefd20f 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.core.IOUtils; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; +import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.io.InputStream; @@ -94,16 +95,34 @@ private void openStreamWithRetry() throws IOException { : "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end; getObjectRequest.setRange(Math.addExact(start, currentOffset), end); } - final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); this.currentStreamFirstOffset = Math.addExact(start, currentOffset); + final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); this.currentStreamLastOffset = Math.addExact(currentStreamFirstOffset, getStreamLength(s3Object)); this.currentStream = s3Object.getObjectContent(); return; } catch (AmazonClientException e) { - if (e instanceof AmazonS3Exception amazonS3Exception && 404 == amazonS3Exception.getStatusCode()) { - throw addSuppressedExceptions( - new NoSuchFileException("Blob object [" + blobKey + "] not found: " + amazonS3Exception.getMessage()) - ); + if (e instanceof AmazonS3Exception amazonS3Exception) { + if (amazonS3Exception.getStatusCode() == RestStatus.NOT_FOUND.getStatus()) { + throw addSuppressedExceptions( + new NoSuchFileException("Blob object [" + blobKey + "] not found: " + amazonS3Exception.getMessage()) + ); + } + if (amazonS3Exception.getStatusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()) { + throw addSuppressedExceptions( + new IOException( + "Requested range [start=" + + start + + ", end=" + + end + + ", currentOffset=" + + currentOffset + + "] cannot be satisfied for blob object [" + + blobKey + + ']', + amazonS3Exception + ) + ); + } } if (attempt == 1) { From a8188f8173caf07023aedc490d22fd9050cdab1e Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 22 Mar 2024 11:12:45 +0000 Subject: [PATCH 10/79] [ML] Fix Array out of bounds exception in the XLM Roberta tokenizer (#106655) Increases the buffer size for the normalised form of the input unicode character. Certain characters can have surprisingly long normalised forms --- docs/changelog/106655.yaml | 5 +++++ .../nlp/tokenizers/PrecompiledCharMapNormalizer.java | 5 +---- .../nlp/tokenizers/PrecompiledCharMapNormalizerTests.java | 5 +++++ 3 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/106655.yaml diff --git a/docs/changelog/106655.yaml b/docs/changelog/106655.yaml new file mode 100644 index 0000000000000..98078595d5f0c --- /dev/null +++ b/docs/changelog/106655.yaml @@ -0,0 +1,5 @@ +pr: 106655 +summary: Fix Array out of bounds exception in the XLM Roberta tokenizer +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java index 836c9a78f19d9..93dc8077196d7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java @@ -73,10 +73,8 @@ static Config fromBase64EncodedResource(String resourcePath) throws IOException private final int[] offsets; // The entire normalized bytes representations delimited by NULL private final byte[] normalizedStrUtf8Bytes; - // Continually reused to copy a single char into utf8 bytes - private final byte[] reusableCharByteBuffer = new byte[4]; // reusable char buffer for decoding utf8 bytes to determine char offset corrections - private final char[] reusableCharDecodeBuffer = new char[8]; + private final char[] reusableCharDecodeBuffer = new char[64]; private Reader transformedInput; public PrecompiledCharMapNormalizer(int[] offsets, String normalizedStr, Reader in) { @@ -172,7 +170,6 @@ Reader normalize(CharSequence str) { ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(str)); byte[] strBytes = new byte[byteBuffer.limit()]; byteBuffer.get(strBytes); - int[] strCp = str.codePoints().toArray(); BreakIterator b = BreakIterator.getCharacterInstance(Locale.ROOT); b.setText(str); // We iterate the whole string, so b.first() is always `0` diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java index d542b97eee192..eef9902d35e59 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java @@ -57,6 +57,11 @@ public void testEmoji() throws IOException { assertNormalization("😀", parsed, "😀"); } + public void testCharThatNormalizesToLongText() throws IOException { + PrecompiledCharMapNormalizer.Config parsed = loadTestCharMap(); + assertNormalization("ﷺ", parsed, "صلى الله عليه وسلم"); + } + private void assertNormalization(String input, PrecompiledCharMapNormalizer.Config config, String expected) throws IOException { PrecompiledCharMapNormalizer normalizer = new PrecompiledCharMapNormalizer( config.offsets(), From c41df745a6e0c954ee8827e7bac7fd0b71e2bb52 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 22 Mar 2024 12:31:16 +0100 Subject: [PATCH 11/79] Move more XContent parsers that are only used in tests to test codebase (#105801) Just like a couple times before, moving a couple more of the test only parsers to the test codebase to save code-size etc. --- .../geoip/stats/GeoIpDownloaderStats.java | 34 +---- .../GeoIpDownloaderStatsSerializingTests.java | 17 ++- .../action/DocWriteResponse.java | 2 +- .../node/tasks/list/ListTasksResponse.java | 41 +---- .../verify/VerifyRepositoryResponse.java | 22 --- .../ClusterUpdateSettingsResponse.java | 25 +-- .../status/SnapshotsStatusResponse.java | 22 --- .../GetScriptContextResponse.java | 29 +--- .../analyze/ReloadAnalyzersResponse.java | 57 +------ .../admin/indices/open/OpenIndexResponse.java | 16 -- .../validate/query/ValidateQueryResponse.java | 41 ----- .../action/delete/DeleteResponse.java | 20 --- .../action/explain/ExplainResponse.java | 54 +------ .../action/index/IndexResponse.java | 20 --- .../support/master/AcknowledgedResponse.java | 2 +- .../master/ShardsAcknowledgedResponse.java | 2 +- .../support/tasks/BaseTasksResponse.java | 4 +- .../action/update/UpdateResponse.java | 31 +--- .../cluster/health/ClusterIndexHealth.java | 11 -- .../index/reindex/BulkByScrollTask.java | 138 ----------------- .../search/profile/SearchProfileResults.java | 81 +--------- .../verify/VerifyRepositoryResponseTests.java | 23 ++- .../ClusterUpdateSettingsResponseTests.java | 17 ++- .../status/SnapshotsStatusResponseTests.java | 19 ++- .../GetScriptContextResponseTests.java | 26 +++- .../analyze/ReloadAnalyzersResponseTests.java | 48 +++++- .../indices/open/OpenIndexResponseTests.java | 15 +- .../query/ValidateQueryResponseTests.java | 36 ++++- .../action/bulk/BulkItemResponseTests.java | 7 +- .../action/delete/DeleteResponseTests.java | 14 +- .../action/explain/ExplainResponseTests.java | 41 ++++- .../action/index/IndexResponseTests.java | 12 +- .../action/update/UpdateResponseTests.java | 29 +++- .../health/ClusterIndexHealthTests.java | 9 +- .../reindex/BulkByScrollResponseTests.java | 2 +- ...ulkByScrollTaskStatusOrExceptionTests.java | 2 +- .../reindex/BulkByScrollTaskStatusTests.java | 144 +++++++++++++++++- .../profile/SearchProfileResultsTests.java | 3 +- .../tasks/ListTasksResponseTests.java | 40 ++++- .../search/SearchResponseUtils.java | 78 +++++++++- .../core/ilm/ExplainLifecycleResponse.java | 25 --- .../action/PreviewTransformAction.java | 26 ---- .../ilm/ExplainLifecycleResponseTests.java | 22 ++- .../PreviewTransformsActionResponseTests.java | 27 +++- .../action/PutAnalyticsCollectionAction.java | 15 -- ...CollectionResponseBWCSerializingTests.java | 3 +- 46 files changed, 639 insertions(+), 713 deletions(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java index acfda99ae42fc..64b704a484058 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java @@ -13,10 +13,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.ingest.geoip.GeoIpDownloader; import org.elasticsearch.tasks.Task; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -25,26 +23,12 @@ public class GeoIpDownloaderStats implements Task.Status { public static final GeoIpDownloaderStats EMPTY = new GeoIpDownloaderStats(0, 0, 0, 0, 0, 0); - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "geoip_downloader_stats", - a -> new GeoIpDownloaderStats((int) a[0], (int) a[1], (long) a[2], (int) a[3], (int) a[4], a[5] == null ? 0 : (int) a[5]) - ); - - private static final ParseField SUCCESSFUL_DOWNLOADS = new ParseField("successful_downloads"); - private static final ParseField FAILED_DOWNLOADS = new ParseField("failed_downloads"); - private static final ParseField TOTAL_DOWNLOAD_TIME = new ParseField("total_download_time"); - private static final ParseField DATABASES_COUNT = new ParseField("databases_count"); - private static final ParseField SKIPPED_DOWNLOADS = new ParseField("skipped_updates"); - private static final ParseField EXPIRED_DATABASES = new ParseField("expired_databases"); - - static { - PARSER.declareInt(ConstructingObjectParser.constructorArg(), SUCCESSFUL_DOWNLOADS); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), FAILED_DOWNLOADS); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_DOWNLOAD_TIME); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), DATABASES_COUNT); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), SKIPPED_DOWNLOADS); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), EXPIRED_DATABASES); - } + static final ParseField SUCCESSFUL_DOWNLOADS = new ParseField("successful_downloads"); + static final ParseField FAILED_DOWNLOADS = new ParseField("failed_downloads"); + static final ParseField TOTAL_DOWNLOAD_TIME = new ParseField("total_download_time"); + static final ParseField DATABASES_COUNT = new ParseField("databases_count"); + static final ParseField SKIPPED_DOWNLOADS = new ParseField("skipped_updates"); + static final ParseField EXPIRED_DATABASES = new ParseField("expired_databases"); private final int successfulDownloads; private final int failedDownloads; @@ -62,7 +46,7 @@ public GeoIpDownloaderStats(StreamInput in) throws IOException { expiredDatabases = in.readVInt(); } - private GeoIpDownloaderStats( + GeoIpDownloaderStats( int successfulDownloads, int failedDownloads, long totalDownloadTime, @@ -170,10 +154,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static GeoIpDownloaderStats fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(successfulDownloads); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java index 68b1ac4b28ff7..69e9cc9b5f5e5 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java @@ -10,15 +10,30 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; public class GeoIpDownloaderStatsSerializingTests extends AbstractXContentSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "geoip_downloader_stats", + a -> new GeoIpDownloaderStats((int) a[0], (int) a[1], (long) a[2], (int) a[3], (int) a[4], a[5] == null ? 0 : (int) a[5]) + ); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), GeoIpDownloaderStats.SUCCESSFUL_DOWNLOADS); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), GeoIpDownloaderStats.FAILED_DOWNLOADS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), GeoIpDownloaderStats.TOTAL_DOWNLOAD_TIME); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), GeoIpDownloaderStats.DATABASES_COUNT); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), GeoIpDownloaderStats.SKIPPED_DOWNLOADS); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), GeoIpDownloaderStats.EXPIRED_DATABASES); + } + @Override protected GeoIpDownloaderStats doParseInstance(XContentParser parser) throws IOException { - return GeoIpDownloaderStats.fromXContent(parser); + return PARSER.parse(parser, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 230a8154b64ce..fdef41acb16da 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -309,7 +309,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly * if needed and then immediately returns. */ - protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException { + public static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException { XContentParser.Token token = parser.currentToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 7d883ad60b4e7..6d052c242c55c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -23,9 +22,6 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -35,13 +31,11 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * Returns the list of tasks currently running on the nodes */ public class ListTasksResponse extends BaseTasksResponse { - private static final String TASKS = "tasks"; + public static final String TASKS = "tasks"; private final List tasks; @@ -69,35 +63,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(tasks); } - protected static ConstructingObjectParser setupParser( - String name, - TriFunction, List, List, T> ctor - ) { - ConstructingObjectParser parser = new ConstructingObjectParser<>(name, true, constructingObjects -> { - int i = 0; - @SuppressWarnings("unchecked") - List tasks = (List) constructingObjects[i++]; - @SuppressWarnings("unchecked") - List tasksFailures = (List) constructingObjects[i++]; - @SuppressWarnings("unchecked") - List nodeFailures = (List) constructingObjects[i]; - return ctor.apply(tasks, tasksFailures, nodeFailures); - }); - parser.declareObjectArray(optionalConstructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); - parser.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); - parser.declareObjectArray( - optionalConstructorArg(), - (p, c) -> ElasticsearchException.fromXContent(p), - new ParseField(NODE_FAILURES) - ); - return parser; - } - - private static final ConstructingObjectParser PARSER = setupParser( - "list_tasks_response", - ListTasksResponse::new - ); - /** * Returns the list of tasks by node */ @@ -250,10 +215,6 @@ public ChunkedToXContentObject groupedByNone() { })); } - public static ListTasksResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public String toString() { return Strings.toString(ChunkedToXContent.wrapAsToXContent(groupedByNone()), true, true); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index 354c67cfb416b..8d48141f9e268 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -14,11 +14,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Arrays; @@ -34,12 +31,6 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte static final String NAME = "name"; public static class NodeView implements Writeable, ToXContentObject { - private static final ObjectParser.NamedObjectParser PARSER; - static { - ObjectParser internalParser = new ObjectParser<>(NODES, true, null); - internalParser.declareString(NodeView::setName, new ParseField(NAME)); - PARSER = (p, v, name) -> internalParser.parse(p, new NodeView(name), null); - } final String nodeId; String name; @@ -104,15 +95,6 @@ public int hashCode() { private List nodes; - private static final ObjectParser PARSER = new ObjectParser<>( - VerifyRepositoryResponse.class.getName(), - true, - VerifyRepositoryResponse::new - ); - static { - PARSER.declareNamedObjects(VerifyRepositoryResponse::setNodes, NodeView.PARSER, new ParseField("nodes")); - } - public VerifyRepositoryResponse() {} public VerifyRepositoryResponse(StreamInput in) throws IOException { @@ -157,10 +139,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static VerifyRepositoryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public String toString() { return Strings.toString(this); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index a4ec5222e2392..d99cc0b0ef8df 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -12,36 +12,19 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - /** * A response for a cluster update settings action. */ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { - private static final ParseField PERSISTENT = new ParseField("persistent"); - private static final ParseField TRANSIENT = new ParseField("transient"); - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "cluster_update_settings_response", - true, - args -> { - return new ClusterUpdateSettingsResponse((boolean) args[0], (Settings) args[1], (Settings) args[2]); - } - ); - static { - declareAcknowledgedField(PARSER); - PARSER.declareObject(constructorArg(), (p, c) -> Settings.fromXContent(p), TRANSIENT); - PARSER.declareObject(constructorArg(), (p, c) -> Settings.fromXContent(p), PERSISTENT); - } + static final ParseField PERSISTENT = new ParseField("persistent"); + static final ParseField TRANSIENT = new ParseField("transient"); final Settings transientSettings; final Settings persistentSettings; @@ -83,10 +66,6 @@ protected void addCustomFields(XContentBuilder builder, Params params) throws IO builder.endObject(); } - public static ClusterUpdateSettingsResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public boolean equals(Object o) { if (super.equals(o)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 578800edfb691..941f1c8d30b2c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -13,18 +13,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - /** * Snapshot status response */ @@ -55,23 +50,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(snapshots); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "snapshots_status_response", - true, - (Object[] parsedObjects) -> { - @SuppressWarnings("unchecked") - List snapshots = (List) parsedObjects[0]; - return new SnapshotsStatusResponse(snapshots); - } - ); - static { - PARSER.declareObjectArray(constructorArg(), SnapshotStatus.PARSER, new ParseField("snapshots")); - } - - public static SnapshotsStatusResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java index e7568a0c66a37..73cfeb48b96bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java @@ -13,11 +13,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; import org.elasticsearch.script.ScriptContextInfo; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; @@ -31,28 +29,9 @@ public class GetScriptContextResponse extends ActionResponse implements ToXContentObject { - private static final ParseField CONTEXTS = new ParseField("contexts"); + static final ParseField CONTEXTS = new ParseField("contexts"); final Map contexts; - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "get_script_context", - true, - (a) -> { - Map contexts = ((List) a[0]).stream() - .collect(Collectors.toMap(ScriptContextInfo::getName, c -> c)); - return new GetScriptContextResponse(contexts); - } - ); - - static { - PARSER.declareObjectArray( - ConstructingObjectParser.constructorArg(), - (parser, ctx) -> ScriptContextInfo.PARSER.apply(parser, ctx), - CONTEXTS - ); - } - GetScriptContextResponse(StreamInput in) throws IOException { super(in); int size = in.readInt(); @@ -70,7 +49,7 @@ public class GetScriptContextResponse extends ActionResponse implements ToXConte } // Parser constructor - private GetScriptContextResponse(Map contexts) { + GetScriptContextResponse(Map contexts) { this.contexts = Map.copyOf(contexts); } @@ -96,10 +75,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static GetScriptContextResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java index 4f388804f2340..98b11de5bffc9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java @@ -9,19 +9,14 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -29,8 +24,6 @@ import java.util.Objects; import java.util.Set; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - /** * The response object that will be returned when reloading analyzers */ @@ -38,10 +31,10 @@ public class ReloadAnalyzersResponse extends BroadcastResponse { private final Map reloadDetails; - private static final ParseField RELOAD_DETAILS_FIELD = new ParseField("reload_details"); - private static final ParseField INDEX_FIELD = new ParseField("index"); - private static final ParseField RELOADED_ANALYZERS_FIELD = new ParseField("reloaded_analyzers"); - private static final ParseField RELOADED_NODE_IDS_FIELD = new ParseField("reloaded_node_ids"); + static final ParseField RELOAD_DETAILS_FIELD = new ParseField("reload_details"); + static final ParseField INDEX_FIELD = new ParseField("index"); + static final ParseField RELOADED_ANALYZERS_FIELD = new ParseField("reloaded_analyzers"); + static final ParseField RELOADED_NODE_IDS_FIELD = new ParseField("reloaded_node_ids"); public ReloadAnalyzersResponse(StreamInput in) throws IOException { super(in); @@ -80,48 +73,6 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.endArray(); } - @SuppressWarnings({ "unchecked" }) - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "reload_analyzer", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - List results = (List) arg[1]; - Map reloadedNodeIds = new HashMap<>(); - for (ReloadDetails result : results) { - reloadedNodeIds.put(result.getIndexName(), result); - } - return new ReloadAnalyzersResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()), - reloadedNodeIds - ); - } - ); - - @SuppressWarnings({ "unchecked" }) - private static final ConstructingObjectParser ENTRY_PARSER = new ConstructingObjectParser<>( - "reload_analyzer.entry", - true, - arg -> { - return new ReloadDetails((String) arg[0], new HashSet<>((List) arg[1]), new HashSet<>((List) arg[2])); - } - ); - - static { - declareBroadcastFields(PARSER); - PARSER.declareObjectArray(constructorArg(), ENTRY_PARSER, RELOAD_DETAILS_FIELD); - ENTRY_PARSER.declareString(constructorArg(), INDEX_FIELD); - ENTRY_PARSER.declareStringArray(constructorArg(), RELOADED_ANALYZERS_FIELD); - ENTRY_PARSER.declareStringArray(constructorArg(), RELOADED_NODE_IDS_FIELD); - } - - public static ReloadAnalyzersResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java index 39d16e35e61a2..605aab5ab02d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java @@ -11,8 +11,6 @@ import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -21,16 +19,6 @@ */ public class OpenIndexResponse extends ShardsAcknowledgedResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "open_index", - true, - args -> new OpenIndexResponse((boolean) args[0], (boolean) args[1]) - ); - - static { - declareAcknowledgedAndShardsAcknowledgedFields(PARSER); - } - public OpenIndexResponse(StreamInput in) throws IOException { super(in, true); } @@ -44,8 +32,4 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeShardsAcknowledged(out); } - - public static OpenIndexResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 6ec0be33e3766..aaa06908f72f0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -9,23 +9,14 @@ package org.elasticsearch.action.admin.indices.validate.query; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.List; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * The response of the validate action. * @@ -36,38 +27,10 @@ public class ValidateQueryResponse extends BroadcastResponse { public static final String VALID_FIELD = "valid"; public static final String EXPLANATIONS_FIELD = "explanations"; - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "validate_query", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new ValidateQueryResponse( - (boolean) arg[1], - (List) arg[2], - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - } - ); - static { - declareBroadcastFields(PARSER); - PARSER.declareBoolean(constructorArg(), new ParseField(VALID_FIELD)); - PARSER.declareObjectArray(optionalConstructorArg(), QueryExplanation.PARSER, new ParseField(EXPLANATIONS_FIELD)); - } - private final boolean valid; private final List queryExplanations; - ValidateQueryResponse(StreamInput in) throws IOException { - super(in); - valid = in.readBoolean(); - queryExplanations = in.readCollectionAsList(QueryExplanation::new); - } - ValidateQueryResponse( boolean valid, List queryExplanations, @@ -115,8 +78,4 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.endArray(); } } - - public static ValidateQueryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java index 9fd9c5fcd791f..47202998d3193 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -12,12 +12,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - /** * The response of the delete action. * @@ -64,23 +61,6 @@ public String toString() { return builder.append("]").toString(); } - public static DeleteResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - - Builder context = new Builder(); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parseXContentFields(parser, context); - } - return context.build(); - } - - /** - * Parse the current token and update the parsing context appropriately. - */ - public static void parseXContentFields(XContentParser parser, Builder context) throws IOException { - DocWriteResponse.parseInnerToXContent(parser, context); - } - /** * Builder class for {@link DeleteResponse}. This builder is usually used during xcontent parsing to * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 58c50df47c3ce..9d8ba5f126fd5 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -17,14 +17,11 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Collection; import java.util.Objects; import static org.elasticsearch.common.lucene.Lucene.readExplanation; @@ -35,14 +32,14 @@ */ public class ExplainResponse extends ActionResponse implements ToXContentObject { - private static final ParseField _INDEX = new ParseField("_index"); - private static final ParseField _ID = new ParseField("_id"); + static final ParseField _INDEX = new ParseField("_index"); + static final ParseField _ID = new ParseField("_id"); private static final ParseField MATCHED = new ParseField("matched"); - private static final ParseField EXPLANATION = new ParseField("explanation"); - private static final ParseField VALUE = new ParseField("value"); - private static final ParseField DESCRIPTION = new ParseField("description"); - private static final ParseField DETAILS = new ParseField("details"); - private static final ParseField GET = new ParseField("get"); + static final ParseField EXPLANATION = new ParseField("explanation"); + static final ParseField VALUE = new ParseField("value"); + static final ParseField DESCRIPTION = new ParseField("description"); + static final ParseField DETAILS = new ParseField("details"); + static final ParseField GET = new ParseField("get"); private final String index; private final String id; @@ -136,43 +133,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "explain", - true, - (arg, exists) -> new ExplainResponse((String) arg[0], (String) arg[1], exists, (Explanation) arg[2], (GetResult) arg[3]) - ); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), _INDEX); - PARSER.declareString(ConstructingObjectParser.constructorArg(), _ID); - final ConstructingObjectParser explanationParser = getExplanationsParser(); - PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), explanationParser, EXPLANATION); - PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> GetResult.fromXContentEmbedded(p), GET); - } - - @SuppressWarnings("unchecked") - private static ConstructingObjectParser getExplanationsParser() { - final ConstructingObjectParser explanationParser = new ConstructingObjectParser<>( - "explanation", - true, - arg -> { - if ((float) arg[0] > 0) { - return Explanation.match((float) arg[0], (String) arg[1], (Collection) arg[2]); - } else { - return Explanation.noMatch((String) arg[1], (Collection) arg[2]); - } - } - ); - explanationParser.declareFloat(ConstructingObjectParser.constructorArg(), VALUE); - explanationParser.declareString(ConstructingObjectParser.constructorArg(), DESCRIPTION); - explanationParser.declareObjectArray(ConstructingObjectParser.constructorArg(), explanationParser, DETAILS); - return explanationParser; - } - - public static ExplainResponse fromXContent(XContentParser parser, boolean exists) { - return PARSER.apply(parser, exists); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index e3598c33d5951..c547eb7ba8f30 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -17,13 +17,10 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.List; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - /** * A response of an index operation, * @@ -134,23 +131,6 @@ public String toString() { return builder.append("]").toString(); } - public static IndexResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - - Builder context = new Builder(); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parseXContentFields(parser, context); - } - return context.build(); - } - - /** - * Parse the current token and update the parsing context appropriately. - */ - public static void parseXContentFields(XContentParser parser, Builder context) throws IOException { - DocWriteResponse.parseInnerToXContent(parser, context); - } - /** * Builder class for {@link IndexResponse}. This builder is usually used during xcontent parsing to * temporarily store the parsed values, then the {@link Builder#build()} method is called to diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java index 107bab447e03e..892db8d4a6d04 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java @@ -40,7 +40,7 @@ public class AcknowledgedResponse extends ActionResponse implements IsAcknowledg public static final String ACKNOWLEDGED_KEY = "acknowledged"; private static final ParseField ACKNOWLEDGED = new ParseField(ACKNOWLEDGED_KEY); - protected static void declareAcknowledgedField(ConstructingObjectParser objectParser) { + public static void declareAcknowledgedField(ConstructingObjectParser objectParser) { objectParser.declareField( constructorArg(), (parser, context) -> parser.booleanValue(), diff --git a/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java index f897d8644d4bb..a00495605dbb5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java @@ -24,7 +24,7 @@ public class ShardsAcknowledgedResponse extends AcknowledgedResponse { protected static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged"); - protected static void declareAcknowledgedAndShardsAcknowledgedFields( + public static void declareAcknowledgedAndShardsAcknowledgedFields( ConstructingObjectParser objectParser ) { declareAcknowledgedField(objectParser); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java index fe6db0c0c4a4b..3e8290ad4fb4a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java @@ -32,8 +32,8 @@ * Base class for responses of task-related operations */ public class BaseTasksResponse extends ActionResponse { - protected static final String TASK_FAILURES = "task_failures"; - protected static final String NODE_FAILURES = "node_failures"; + public static final String TASK_FAILURES = "task_failures"; + public static final String NODE_FAILURES = "node_failures"; private List taskFailures; private List nodeFailures; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java index c6454dd6cedd8..8c9ae3f43d5c4 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java @@ -15,15 +15,12 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - public class UpdateResponse extends DocWriteResponse { - private static final String GET = "get"; + static final String GET = "get"; private GetResult getResult; @@ -114,32 +111,6 @@ public String toString() { return builder.append("]").toString(); } - public static UpdateResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - - Builder context = new Builder(); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parseXContentFields(parser, context); - } - return context.build(); - } - - /** - * Parse the current token and update the parsing context appropriately. - */ - public static void parseXContentFields(XContentParser parser, Builder context) throws IOException { - XContentParser.Token token = parser.currentToken(); - String currentFieldName = parser.currentName(); - - if (GET.equals(currentFieldName)) { - if (token == XContentParser.Token.START_OBJECT) { - context.setGetResult(GetResult.fromXContentEmbedded(parser)); - } - } else { - DocWriteResponse.parseInnerToXContent(parser, context); - } - } - /** * Builder class for {@link UpdateResponse}. This builder is usually used during xcontent parsing to * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java index 887a0d7d5a751..f236a9eff25a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java @@ -31,7 +31,6 @@ import java.util.Objects; import static java.util.Collections.emptyMap; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -284,16 +283,6 @@ public static ClusterIndexHealth innerFromXContent(XContentParser parser, String return PARSER.apply(parser, index); } - public static ClusterIndexHealth fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - String index = parser.currentName(); - ClusterIndexHealth parsed = innerFromXContent(parser, index); - ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); - return parsed; - } - @Override public String toString() { return "ClusterIndexHealth{" diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index 150948b4e5822..a483dd5cd48e9 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -20,14 +20,9 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParseException; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; import java.util.ArrayList; @@ -42,9 +37,7 @@ import static java.lang.Math.min; import static java.util.Collections.emptyList; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.core.TimeValue.timeValueNanos; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** * Task storing information about a currently running BulkByScroll request. @@ -380,37 +373,6 @@ public static class Status implements Task.Status, SuccessfullyProcessed { FIELDS_SET.add(SLICES_FIELD); } - static final ConstructingObjectParser, Void> RETRIES_PARSER = new ConstructingObjectParser<>( - "bulk_by_scroll_task_status_retries", - true, - a -> new Tuple<>(((Long) a[0]), (Long) a[1]) - ); - static { - RETRIES_PARSER.declareLong(constructorArg(), new ParseField(RETRIES_BULK_FIELD)); - RETRIES_PARSER.declareLong(constructorArg(), new ParseField(RETRIES_SEARCH_FIELD)); - } - - public static void declareFields(ObjectParser parser) { - parser.declareInt(StatusBuilder::setSliceId, new ParseField(SLICE_ID_FIELD)); - parser.declareLong(StatusBuilder::setTotal, new ParseField(TOTAL_FIELD)); - parser.declareLong(StatusBuilder::setUpdated, new ParseField(UPDATED_FIELD)); - parser.declareLong(StatusBuilder::setCreated, new ParseField(CREATED_FIELD)); - parser.declareLong(StatusBuilder::setDeleted, new ParseField(DELETED_FIELD)); - parser.declareInt(StatusBuilder::setBatches, new ParseField(BATCHES_FIELD)); - parser.declareLong(StatusBuilder::setVersionConflicts, new ParseField(VERSION_CONFLICTS_FIELD)); - parser.declareLong(StatusBuilder::setNoops, new ParseField(NOOPS_FIELD)); - parser.declareObject(StatusBuilder::setRetries, RETRIES_PARSER, new ParseField(RETRIES_FIELD)); - parser.declareLong(StatusBuilder::setThrottled, new ParseField(THROTTLED_RAW_FIELD)); - parser.declareFloat(StatusBuilder::setRequestsPerSecond, new ParseField(REQUESTS_PER_SEC_FIELD)); - parser.declareString(StatusBuilder::setReasonCancelled, new ParseField(CANCELED_FIELD)); - parser.declareLong(StatusBuilder::setThrottledUntil, new ParseField(THROTTLED_UNTIL_RAW_FIELD)); - parser.declareObjectArray( - StatusBuilder::setSliceStatuses, - (p, c) -> StatusOrException.fromXContent(p), - new ParseField(SLICES_FIELD) - ); - } - private final Integer sliceId; private final long total; private final long updated; @@ -571,11 +533,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } - /** - * We need to write a manual parser for this because of {@link StatusOrException}. Since - * {@link StatusOrException#fromXContent(XContentParser)} tries to peek at a field first before deciding - * what needs to be it cannot use an {@link ObjectParser}. - */ public XContentBuilder innerXContent(XContentBuilder builder, Params params) throws IOException { if (sliceId != null) { builder.field(SLICE_ID_FIELD, sliceId); @@ -617,61 +574,6 @@ public XContentBuilder innerXContent(XContentBuilder builder, Params params) thr return builder; } - public static Status fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - if (parser.currentToken() == Token.START_OBJECT) { - token = parser.nextToken(); - } else { - token = parser.nextToken(); - } - ensureExpectedToken(Token.START_OBJECT, token, parser); - token = parser.nextToken(); - ensureExpectedToken(Token.FIELD_NAME, token, parser); - return innerFromXContent(parser); - } - - public static Status innerFromXContent(XContentParser parser) throws IOException { - Token token = parser.currentToken(); - String fieldName = parser.currentName(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - StatusBuilder builder = new StatusBuilder(); - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token == Token.START_OBJECT) { - if (fieldName.equals(Status.RETRIES_FIELD)) { - builder.setRetries(Status.RETRIES_PARSER.parse(parser, null)); - } else { - parser.skipChildren(); - } - } else if (token == Token.START_ARRAY) { - if (fieldName.equals(Status.SLICES_FIELD)) { - while ((token = parser.nextToken()) != Token.END_ARRAY) { - builder.addToSliceStatuses(StatusOrException.fromXContent(parser)); - } - } else { - parser.skipChildren(); - } - } else { // else if it is a value - switch (fieldName) { - case Status.SLICE_ID_FIELD -> builder.setSliceId(parser.intValue()); - case Status.TOTAL_FIELD -> builder.setTotal(parser.longValue()); - case Status.UPDATED_FIELD -> builder.setUpdated(parser.longValue()); - case Status.CREATED_FIELD -> builder.setCreated(parser.longValue()); - case Status.DELETED_FIELD -> builder.setDeleted(parser.longValue()); - case Status.BATCHES_FIELD -> builder.setBatches(parser.intValue()); - case Status.VERSION_CONFLICTS_FIELD -> builder.setVersionConflicts(parser.longValue()); - case Status.NOOPS_FIELD -> builder.setNoops(parser.longValue()); - case Status.THROTTLED_RAW_FIELD -> builder.setThrottled(parser.longValue()); - case Status.REQUESTS_PER_SEC_FIELD -> builder.setRequestsPerSecond(parser.floatValue()); - case Status.CANCELED_FIELD -> builder.setReasonCancelled(parser.text()); - case Status.THROTTLED_UNTIL_RAW_FIELD -> builder.setThrottledUntil(parser.longValue()); - } - } - } - return builder.buildStatus(); - } - @Override public String toString() { StringBuilder builder = new StringBuilder(); @@ -937,46 +839,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - /** - * Since {@link StatusOrException} can contain either an {@link Exception} or a {@link Status} we need to peek - * at a field first before deciding what needs to be parsed since the same object could contains either. - * The {@link #EXPECTED_EXCEPTION_FIELDS} contains the fields that are expected when the serialised object - * was an instance of exception and the {@link Status#FIELDS_SET} is the set of fields expected when the - * serialized object was an instance of Status. - */ - public static StatusOrException fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.currentToken(); - if (token == null) { - token = parser.nextToken(); - } - if (token == Token.VALUE_NULL) { - return null; - } else { - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - token = parser.nextToken(); - // This loop is present only to ignore unknown tokens. It breaks as soon as we find a field - // that is allowed. - while (token != Token.END_OBJECT) { - ensureExpectedToken(Token.FIELD_NAME, token, parser); - String fieldName = parser.currentName(); - // weird way to ignore unknown tokens - if (Status.FIELDS_SET.contains(fieldName)) { - return new StatusOrException(Status.innerFromXContent(parser)); - } else if (EXPECTED_EXCEPTION_FIELDS.contains(fieldName)) { - return new StatusOrException(ElasticsearchException.innerFromXContent(parser, false)); - } else { - // Ignore unknown tokens - token = parser.nextToken(); - if (token == Token.START_OBJECT || token == Token.START_ARRAY) { - parser.skipChildren(); - } - token = parser.nextToken(); - } - } - throw new XContentParseException("Unable to parse StatusFromException. Expected fields not found."); - } - } - @Override public String toString() { if (exception != null) { diff --git a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java index 1cc6810f8e575..cb15e9af8956a 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java +++ b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java @@ -15,36 +15,28 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; -import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - /** * Profile results for all shards. */ public final class SearchProfileResults implements Writeable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(SearchProfileResults.class); - private static final String ID_FIELD = "id"; + public static final String ID_FIELD = "id"; private static final String NODE_ID_FIELD = "node_id"; private static final String CLUSTER_FIELD = "cluster"; private static final String INDEX_NAME_FIELD = "index"; private static final String SHARD_ID_FIELD = "shard_id"; - private static final String SHARDS_FIELD = "shards"; + public static final String SHARDS_FIELD = "shards"; public static final String PROFILE_FIELD = "profile"; // map key is the composite "id" of form [nodeId][(clusterName:)indexName][shardId] created from SearchShardTarget.toString @@ -117,75 +109,6 @@ public String toString() { return Strings.toString(this); } - public static SearchProfileResults fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.currentToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - Map profileResults = new HashMap<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.START_ARRAY) { - if (SHARDS_FIELD.equals(parser.currentName())) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - parseProfileResultsEntry(parser, profileResults); - } - } else { - parser.skipChildren(); - } - } else if (token == XContentParser.Token.START_OBJECT) { - parser.skipChildren(); - } - } - return new SearchProfileResults(profileResults); - } - - private static void parseProfileResultsEntry(XContentParser parser, Map searchProfileResults) - throws IOException { - XContentParser.Token token = parser.currentToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = null; - List queryProfileResults = new ArrayList<>(); - AggregationProfileShardResult aggProfileShardResult = null; - ProfileResult fetchResult = null; - String id = null; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (ID_FIELD.equals(currentFieldName)) { - id = parser.text(); - } else { - parser.skipChildren(); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("searches".equals(currentFieldName)) { - while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { - queryProfileResults.add(QueryProfileShardResult.fromXContent(parser)); - } - } else if (AggregationProfileShardResult.AGGREGATIONS.equals(currentFieldName)) { - aggProfileShardResult = AggregationProfileShardResult.fromXContent(parser); - } else { - parser.skipChildren(); - } - } else if (token == XContentParser.Token.START_OBJECT) { - if ("dfs".equals(currentFieldName)) { - searchProfileDfsPhaseResult = SearchProfileDfsPhaseResult.fromXContent(parser); - } else if ("fetch".equals(currentFieldName)) { - fetchResult = ProfileResult.fromXContent(parser); - } else { - parser.skipChildren(); - } - } else { - parser.skipChildren(); - } - } - SearchProfileShardResult result = new SearchProfileShardResult( - new SearchProfileQueryPhaseResult(queryProfileResults, aggProfileShardResult), - fetchResult - ); - result.getQueryPhase().setSearchProfileDfsPhaseResult(searchProfileDfsPhaseResult); - searchProfileResults.put(id, result); - } - /** * Parsed representation of a composite id used for shards in a profile. * The composite id format is specified/created via the {@code SearchShardTarget} method. diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponseTests.java index 8f5712d90487f..bf90d962912c5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponseTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.util.ArrayList; @@ -15,9 +17,28 @@ public class VerifyRepositoryResponseTests extends AbstractXContentTestCase { + private static final ObjectParser PARSER = new ObjectParser<>( + VerifyRepositoryResponse.class.getName(), + true, + VerifyRepositoryResponse::new + ); + static { + ObjectParser internalParser = new ObjectParser<>( + VerifyRepositoryResponse.NODES, + true, + null + ); + internalParser.declareString(VerifyRepositoryResponse.NodeView::setName, new ParseField(VerifyRepositoryResponse.NAME)); + PARSER.declareNamedObjects( + VerifyRepositoryResponse::setNodes, + (p, v, name) -> internalParser.parse(p, new VerifyRepositoryResponse.NodeView(name), null), + new ParseField("nodes") + ); + } + @Override protected VerifyRepositoryResponse doParseInstance(XContentParser parser) { - return VerifyRepositoryResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java index 3f5692c30cfef..3d46994faacf7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java @@ -14,17 +14,32 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import java.util.List; import java.util.Set; import java.util.function.Predicate; +import static org.elasticsearch.action.support.master.AcknowledgedResponse.declareAcknowledgedField; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class ClusterUpdateSettingsResponseTests extends AbstractXContentSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_update_settings_response", + true, + args -> new ClusterUpdateSettingsResponse((boolean) args[0], (Settings) args[1], (Settings) args[2]) + ); + static { + declareAcknowledgedField(PARSER); + PARSER.declareObject(constructorArg(), (p, c) -> Settings.fromXContent(p), ClusterUpdateSettingsResponse.TRANSIENT); + PARSER.declareObject(constructorArg(), (p, c) -> Settings.fromXContent(p), ClusterUpdateSettingsResponse.PERSISTENT); + } + @Override protected ClusterUpdateSettingsResponse doParseInstance(XContentParser parser) { - return ClusterUpdateSettingsResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java index f90b37f75fa41..21cba892669d0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java @@ -10,6 +10,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -17,11 +19,26 @@ import java.util.List; import java.util.function.Predicate; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class SnapshotsStatusResponseTests extends AbstractChunkedSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "snapshots_status_response", + true, + (Object[] parsedObjects) -> { + @SuppressWarnings("unchecked") + List snapshots = (List) parsedObjects[0]; + return new SnapshotsStatusResponse(snapshots); + } + ); + static { + PARSER.declareObjectArray(constructorArg(), SnapshotStatus.PARSER, new ParseField("snapshots")); + } + @Override protected SnapshotsStatusResponse doParseInstance(XContentParser parser) throws IOException { - return SnapshotsStatusResponse.fromXContent(parser); + return PARSER.parse(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponseTests.java index efb1e61e19fa2..41faaf3517e76 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptContextResponseTests.java @@ -8,14 +8,38 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.script.ScriptContextInfo; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; public class GetScriptContextResponseTests extends AbstractXContentSerializingTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_script_context", + true, + (a) -> { + Map contexts = ((List) a[0]).stream() + .collect(Collectors.toMap(ScriptContextInfo::getName, c -> c)); + return new GetScriptContextResponse(contexts); + } + ); + + static { + PARSER.declareObjectArray( + ConstructingObjectParser.constructorArg(), + ScriptContextInfo.PARSER::apply, + GetScriptContextResponse.CONTEXTS + ); + } + @Override protected GetScriptContextResponse createTestInstance() { if (randomBoolean()) { @@ -31,7 +55,7 @@ protected Writeable.Reader instanceReader() { @Override protected GetScriptContextResponse doParseInstance(XContentParser parser) throws IOException { - return GetScriptContextResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java index a6524932dd775..f0802e471fc38 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java @@ -8,10 +8,12 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.AbstractBroadcastResponseTestCase; import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -23,8 +25,52 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.action.support.broadcast.BaseBroadcastResponse.declareBroadcastFields; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class ReloadAnalyzersResponseTests extends AbstractBroadcastResponseTestCase { + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "reload_analyzer", + true, + arg -> { + BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; + List results = (List) arg[1]; + Map reloadedNodeIds = new HashMap<>(); + for (ReloadAnalyzersResponse.ReloadDetails result : results) { + reloadedNodeIds.put(result.getIndexName(), result); + } + return new ReloadAnalyzersResponse( + response.getTotalShards(), + response.getSuccessfulShards(), + response.getFailedShards(), + Arrays.asList(response.getShardFailures()), + reloadedNodeIds + ); + } + ); + + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser ENTRY_PARSER = + new ConstructingObjectParser<>( + "reload_analyzer.entry", + true, + arg -> new ReloadAnalyzersResponse.ReloadDetails( + (String) arg[0], + new HashSet<>((List) arg[1]), + new HashSet<>((List) arg[2]) + ) + ); + + static { + declareBroadcastFields(PARSER); + PARSER.declareObjectArray(constructorArg(), ENTRY_PARSER, ReloadAnalyzersResponse.RELOAD_DETAILS_FIELD); + ENTRY_PARSER.declareString(constructorArg(), ReloadAnalyzersResponse.INDEX_FIELD); + ENTRY_PARSER.declareStringArray(constructorArg(), ReloadAnalyzersResponse.RELOADED_ANALYZERS_FIELD); + ENTRY_PARSER.declareStringArray(constructorArg(), ReloadAnalyzersResponse.RELOADED_NODE_IDS_FIELD); + } + @Override protected ReloadAnalyzersResponse createTestInstance( int totalShards, @@ -50,7 +96,7 @@ public static Map createRandomRel @Override protected ReloadAnalyzersResponse doParseInstance(XContentParser parser) throws IOException { - return ReloadAnalyzersResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java index 962304ef8aadc..a8d8980e6358c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java @@ -10,13 +10,26 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; +import static org.elasticsearch.action.support.master.ShardsAcknowledgedResponse.declareAcknowledgedAndShardsAcknowledgedFields; + public class OpenIndexResponseTests extends AbstractXContentSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "open_index", + true, + args -> new OpenIndexResponse((boolean) args[0], (boolean) args[1]) + ); + + static { + declareAcknowledgedAndShardsAcknowledgedFields(PARSER); + } + @Override protected OpenIndexResponse doParseInstance(XContentParser parser) { - return OpenIndexResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java index 6bbb2884f1bf3..9ec910e79918c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java @@ -10,19 +10,53 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.test.AbstractBroadcastResponseTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import static org.elasticsearch.action.support.broadcast.BaseBroadcastResponse.declareBroadcastFields; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class ValidateQueryResponseTests extends AbstractBroadcastResponseTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "validate_query", + true, + arg -> { + BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; + return new ValidateQueryResponse( + (boolean) arg[1], + (List) arg[2], + response.getTotalShards(), + response.getSuccessfulShards(), + response.getFailedShards(), + Arrays.asList(response.getShardFailures()) + ); + } + ); + static { + declareBroadcastFields(PARSER); + PARSER.declareBoolean(constructorArg(), new ParseField(ValidateQueryResponse.VALID_FIELD)); + PARSER.declareObjectArray( + optionalConstructorArg(), + QueryExplanation.PARSER, + new ParseField(ValidateQueryResponse.EXPLANATIONS_FIELD) + ); + } + private static ValidateQueryResponse createRandomValidateQueryResponse( int totalShards, int successfulShards, @@ -60,7 +94,7 @@ private static ValidateQueryResponse createRandomValidateQueryResponse() { @Override protected ValidateQueryResponse doParseInstance(XContentParser parser) throws IOException { - return ValidateQueryResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java index 76b1fa0011540..6c45367baf674 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java @@ -192,17 +192,16 @@ public static BulkItemResponse itemResponseFromXContent(XContentParser parser, i if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder(); builder = indexResponseBuilder; - itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder); - + itemParser = indexParser -> DocWriteResponse.parseInnerToXContent(indexParser, indexResponseBuilder); } else if (opType == DocWriteRequest.OpType.UPDATE) { final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder(); builder = updateResponseBuilder; - itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder); + itemParser = updateParser -> UpdateResponseTests.parseXContentFields(updateParser, updateResponseBuilder); } else if (opType == DocWriteRequest.OpType.DELETE) { final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder(); builder = deleteResponseBuilder; - itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder); + itemParser = deleteParser -> DocWriteResponse.parseInnerToXContent(deleteParser, deleteResponseBuilder); } else { throwUnknownField(currentFieldName, parser); } diff --git a/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java b/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java index e7019a583b729..937ac2d26ebb9 100644 --- a/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.delete; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -26,6 +27,7 @@ import static org.elasticsearch.action.index.IndexResponseTests.assertDocWriteResponse; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; public class DeleteResponseTests extends ESTestCase { @@ -102,7 +104,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } DeleteResponse parsedDeleteResponse; try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedDeleteResponse = DeleteResponse.fromXContent(parser); + parsedDeleteResponse = parseInstance(parser); assertNull(parser.nextToken()); } @@ -112,6 +114,16 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws assertDocWriteResponse(expectedDeleteResponse, parsedDeleteResponse); } + private static DeleteResponse parseInstance(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + + DeleteResponse.Builder context = new DeleteResponse.Builder(); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + DocWriteResponse.parseInnerToXContent(parser, context); + } + return context.build(); + } + /** * Returns a tuple of {@link DeleteResponse}s. *

diff --git a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java index 31fcfe342eb3a..2830d9408e494 100644 --- a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.test.RandomObjects; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -24,6 +25,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.function.Predicate; @@ -34,9 +36,46 @@ public class ExplainResponseTests extends AbstractXContentSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "explain", + true, + (arg, exists) -> new ExplainResponse((String) arg[0], (String) arg[1], exists, (Explanation) arg[2], (GetResult) arg[3]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ExplainResponse._INDEX); + PARSER.declareString(ConstructingObjectParser.constructorArg(), ExplainResponse._ID); + final ConstructingObjectParser explanationParser = getExplanationsParser(); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), explanationParser, ExplainResponse.EXPLANATION); + PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> GetResult.fromXContentEmbedded(p), + ExplainResponse.GET + ); + } + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser getExplanationsParser() { + final ConstructingObjectParser explanationParser = new ConstructingObjectParser<>( + "explanation", + true, + arg -> { + if ((float) arg[0] > 0) { + return Explanation.match((float) arg[0], (String) arg[1], (Collection) arg[2]); + } else { + return Explanation.noMatch((String) arg[1], (Collection) arg[2]); + } + } + ); + explanationParser.declareFloat(ConstructingObjectParser.constructorArg(), ExplainResponse.VALUE); + explanationParser.declareString(ConstructingObjectParser.constructorArg(), ExplainResponse.DESCRIPTION); + explanationParser.declareObjectArray(ConstructingObjectParser.constructorArg(), explanationParser, ExplainResponse.DETAILS); + return explanationParser; + } + @Override protected ExplainResponse doParseInstance(XContentParser parser) throws IOException { - return ExplainResponse.fromXContent(parser, randomBoolean()); + return PARSER.apply(parser, randomBoolean()); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java index ea9e83021e781..c8a8c3853601d 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java @@ -29,6 +29,7 @@ import static org.elasticsearch.action.support.replication.ReplicationResponseTests.assertShardInfo; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; public class IndexResponseTests extends ESTestCase { @@ -111,7 +112,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } IndexResponse parsedIndexResponse; try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedIndexResponse = IndexResponse.fromXContent(parser); + parsedIndexResponse = parseInstanceFromXContent(parser); assertNull(parser.nextToken()); } @@ -121,6 +122,15 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws assertDocWriteResponse(expectedIndexResponse, parsedIndexResponse); } + private static IndexResponse parseInstanceFromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + IndexResponse.Builder context = new IndexResponse.Builder(); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + DocWriteResponse.parseInnerToXContent(parser, context); + } + return context.build(); + } + public static void assertDocWriteResponse(DocWriteResponse expected, DocWriteResponse actual) { assertEquals(expected.getIndex(), actual.getIndex()); assertEquals(expected.getId(), actual.getId()); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index 05c974ea9d4d3..d35162287e3ac 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -38,6 +38,7 @@ import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; @@ -174,7 +175,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } UpdateResponse parsedUpdateResponse; try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedUpdateResponse = UpdateResponse.fromXContent(parser); + parsedUpdateResponse = parseInstanceFromXContent(parser); assertNull(parser.nextToken()); } @@ -191,6 +192,32 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws assertToXContentEquivalent(expectedBytes, parsedBytes, xContentType); } + private static UpdateResponse parseInstanceFromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + + UpdateResponse.Builder context = new UpdateResponse.Builder(); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parseXContentFields(parser, context); + } + return context.build(); + } + + /** + * Parse the current token and update the parsing context appropriately. + */ + public static void parseXContentFields(XContentParser parser, UpdateResponse.Builder context) throws IOException { + XContentParser.Token token = parser.currentToken(); + String currentFieldName = parser.currentName(); + + if (UpdateResponse.GET.equals(currentFieldName)) { + if (token == XContentParser.Token.START_OBJECT) { + context.setGetResult(GetResult.fromXContentEmbedded(parser)); + } + } else { + DocWriteResponse.parseInnerToXContent(parser, context); + } + } + /** * Returns a tuple of {@link UpdateResponse}s. *

diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java index 9ee942df1c2b0..637a18547b1b2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java @@ -26,6 +26,7 @@ import java.util.function.Predicate; import java.util.regex.Pattern; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.hamcrest.CoreMatchers.equalTo; public class ClusterIndexHealthTests extends AbstractXContentSerializingTestCase { @@ -101,7 +102,13 @@ protected Writeable.Reader instanceReader() { @Override protected ClusterIndexHealth doParseInstance(XContentParser parser) throws IOException { - return ClusterIndexHealth.fromXContent(parser); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String index = parser.currentName(); + ClusterIndexHealth parsed = ClusterIndexHealth.innerFromXContent(parser, index); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + return parsed; } @Override diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index f8162eb987226..48432a0ff4958 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -51,7 +51,7 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase { @@ -177,7 +184,142 @@ protected BulkByScrollTask.Status createTestInstance() { @Override protected BulkByScrollTask.Status doParseInstance(XContentParser parser) throws IOException { - return BulkByScrollTask.Status.fromXContent(parser); + XContentParser.Token token; + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + } else { + token = parser.nextToken(); + } + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + return innerParseStatus(parser); + } + + private static final ConstructingObjectParser, Void> RETRIES_PARSER = new ConstructingObjectParser<>( + "bulk_by_scroll_task_status_retries", + true, + a -> new Tuple<>(((Long) a[0]), (Long) a[1]) + ); + static { + RETRIES_PARSER.declareLong(constructorArg(), new ParseField(BulkByScrollTask.Status.RETRIES_BULK_FIELD)); + RETRIES_PARSER.declareLong(constructorArg(), new ParseField(BulkByScrollTask.Status.RETRIES_SEARCH_FIELD)); + } + + public static void declareFields(ObjectParser parser) { + parser.declareInt(BulkByScrollTask.StatusBuilder::setSliceId, new ParseField(BulkByScrollTask.Status.SLICE_ID_FIELD)); + parser.declareLong(BulkByScrollTask.StatusBuilder::setTotal, new ParseField(BulkByScrollTask.Status.TOTAL_FIELD)); + parser.declareLong(BulkByScrollTask.StatusBuilder::setUpdated, new ParseField(BulkByScrollTask.Status.UPDATED_FIELD)); + parser.declareLong(BulkByScrollTask.StatusBuilder::setCreated, new ParseField(BulkByScrollTask.Status.CREATED_FIELD)); + parser.declareLong(BulkByScrollTask.StatusBuilder::setDeleted, new ParseField(BulkByScrollTask.Status.DELETED_FIELD)); + parser.declareInt(BulkByScrollTask.StatusBuilder::setBatches, new ParseField(BulkByScrollTask.Status.BATCHES_FIELD)); + parser.declareLong( + BulkByScrollTask.StatusBuilder::setVersionConflicts, + new ParseField(BulkByScrollTask.Status.VERSION_CONFLICTS_FIELD) + ); + parser.declareLong(BulkByScrollTask.StatusBuilder::setNoops, new ParseField(BulkByScrollTask.Status.NOOPS_FIELD)); + parser.declareObject( + BulkByScrollTask.StatusBuilder::setRetries, + RETRIES_PARSER, + new ParseField(BulkByScrollTask.Status.RETRIES_FIELD) + ); + parser.declareLong(BulkByScrollTask.StatusBuilder::setThrottled, new ParseField(BulkByScrollTask.Status.THROTTLED_RAW_FIELD)); + parser.declareFloat( + BulkByScrollTask.StatusBuilder::setRequestsPerSecond, + new ParseField(BulkByScrollTask.Status.REQUESTS_PER_SEC_FIELD) + ); + parser.declareString(BulkByScrollTask.StatusBuilder::setReasonCancelled, new ParseField(BulkByScrollTask.Status.CANCELED_FIELD)); + parser.declareLong( + BulkByScrollTask.StatusBuilder::setThrottledUntil, + new ParseField(BulkByScrollTask.Status.THROTTLED_UNTIL_RAW_FIELD) + ); + parser.declareObjectArray( + BulkByScrollTask.StatusBuilder::setSliceStatuses, + (p, c) -> parseStatusOrException(p), + new ParseField(BulkByScrollTask.Status.SLICES_FIELD) + ); + } + + private static Status innerParseStatus(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + String fieldName = parser.currentName(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + BulkByScrollTask.StatusBuilder builder = new BulkByScrollTask.StatusBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName.equals(Status.RETRIES_FIELD)) { + builder.setRetries(RETRIES_PARSER.parse(parser, null)); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (fieldName.equals(Status.SLICES_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + builder.addToSliceStatuses(parseStatusOrException(parser)); + } + } else { + parser.skipChildren(); + } + } else { // else if it is a value + switch (fieldName) { + case Status.SLICE_ID_FIELD -> builder.setSliceId(parser.intValue()); + case Status.TOTAL_FIELD -> builder.setTotal(parser.longValue()); + case Status.UPDATED_FIELD -> builder.setUpdated(parser.longValue()); + case Status.CREATED_FIELD -> builder.setCreated(parser.longValue()); + case Status.DELETED_FIELD -> builder.setDeleted(parser.longValue()); + case Status.BATCHES_FIELD -> builder.setBatches(parser.intValue()); + case Status.VERSION_CONFLICTS_FIELD -> builder.setVersionConflicts(parser.longValue()); + case Status.NOOPS_FIELD -> builder.setNoops(parser.longValue()); + case Status.THROTTLED_RAW_FIELD -> builder.setThrottled(parser.longValue()); + case Status.REQUESTS_PER_SEC_FIELD -> builder.setRequestsPerSecond(parser.floatValue()); + case Status.CANCELED_FIELD -> builder.setReasonCancelled(parser.text()); + case Status.THROTTLED_UNTIL_RAW_FIELD -> builder.setThrottledUntil(parser.longValue()); + } + } + } + return builder.buildStatus(); + } + + /** + * Since {@link BulkByScrollTask.StatusOrException} can contain either an {@link Exception} or a {@link Status} we need to peek + * at a field first before deciding what needs to be parsed since the same object could contains either. + * The {@link BulkByScrollTask.StatusOrException#EXPECTED_EXCEPTION_FIELDS} contains the fields that are expected when the serialised + * object was an instance of exception and the {@link Status#FIELDS_SET} is the set of fields expected when the + * serialized object was an instance of Status. + */ + public static BulkByScrollTask.StatusOrException parseStatusOrException(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.VALUE_NULL) { + return null; + } else { + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + token = parser.nextToken(); + // This loop is present only to ignore unknown tokens. It breaks as soon as we find a field + // that is allowed. + while (token != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String fieldName = parser.currentName(); + // weird way to ignore unknown tokens + if (Status.FIELDS_SET.contains(fieldName)) { + return new BulkByScrollTask.StatusOrException(innerParseStatus(parser)); + } else if (BulkByScrollTask.StatusOrException.EXPECTED_EXCEPTION_FIELDS.contains(fieldName)) { + return new BulkByScrollTask.StatusOrException(ElasticsearchException.innerFromXContent(parser, false)); + } else { + // Ignore unknown tokens + token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT || token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); + } + token = parser.nextToken(); + } + } + throw new XContentParseException("Unable to parse StatusFromException. Expected fields not found."); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java index f02114a48991c..bda74e75de88c 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -114,7 +115,7 @@ protected SearchProfileResults doParseInstance(XContentParser parser) throws IOE ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); ensureFieldName(parser, parser.nextToken(), SearchProfileResults.PROFILE_FIELD); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - SearchProfileResults result = SearchProfileResults.fromXContent(parser); + SearchProfileResults result = SearchResponseUtils.parseSearchProfileResults(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); return result; diff --git a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java index f47aaee5ff145..169379441aadd 100644 --- a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java @@ -12,11 +12,15 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -31,12 +35,46 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class ListTasksResponseTests extends AbstractXContentTestCase { + private static ConstructingObjectParser setupParser( + String name, + TriFunction, List, List, T> ctor + ) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(name, true, constructingObjects -> { + int i = 0; + @SuppressWarnings("unchecked") + List tasks = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List tasksFailures = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List nodeFailures = (List) constructingObjects[i]; + return ctor.apply(tasks, tasksFailures, nodeFailures); + }); + parser.declareObjectArray(optionalConstructorArg(), TaskInfo.PARSER, new ParseField(ListTasksResponse.TASKS)); + parser.declareObjectArray( + optionalConstructorArg(), + (p, c) -> TaskOperationFailure.fromXContent(p), + new ParseField(BaseTasksResponse.TASK_FAILURES) + ); + parser.declareObjectArray( + optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + new ParseField(BaseTasksResponse.NODE_FAILURES) + ); + return parser; + } + + private static final ConstructingObjectParser PARSER = setupParser( + "list_tasks_response", + ListTasksResponse::new + ); + // ListTasksResponse doesn't directly implement ToXContent because it has multiple XContent representations, so we must wrap here public record ListTasksResponseWrapper(ListTasksResponse in) implements ToXContentObject { @Override @@ -108,7 +146,7 @@ private static List randomTasks() { @Override protected ListTasksResponseWrapper doParseInstance(XContentParser parser) { - return new ListTasksResponseWrapper(ListTasksResponse.fromXContent(parser)); + return new ListTasksResponseWrapper(PARSER.apply(parser, null)); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index 77b25efd56b35..71837ccf14387 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -18,7 +18,13 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.profile.ProfileResult; +import org.elasticsearch.search.profile.SearchProfileDfsPhaseResult; +import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.profile.SearchProfileResults; +import org.elasticsearch.search.profile.SearchProfileShardResult; +import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; +import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -27,6 +33,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -186,7 +193,7 @@ public static SearchResponse parseInnerSearchResponse(XContentParser parser) thr } else if (Suggest.NAME.equals(currentFieldName)) { suggest = Suggest.fromXContent(parser); } else if (SearchProfileResults.PROFILE_FIELD.equals(currentFieldName)) { - profile = SearchProfileResults.fromXContent(parser); + profile = parseSearchProfileResults(parser); } else if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -389,4 +396,73 @@ private static SearchResponse.Cluster parseCluster(String clusterAlias, XContent timedOut ); } + + public static SearchProfileResults parseSearchProfileResults(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + Map profileResults = new HashMap<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.START_ARRAY) { + if (SearchProfileResults.SHARDS_FIELD.equals(parser.currentName())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + parseProfileResultsEntry(parser, profileResults); + } + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + parser.skipChildren(); + } + } + return new SearchProfileResults(profileResults); + } + + private static void parseProfileResultsEntry(XContentParser parser, Map searchProfileResults) + throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = null; + List queryProfileResults = new ArrayList<>(); + AggregationProfileShardResult aggProfileShardResult = null; + ProfileResult fetchResult = null; + String id = null; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (SearchProfileResults.ID_FIELD.equals(currentFieldName)) { + id = parser.text(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("searches".equals(currentFieldName)) { + while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { + queryProfileResults.add(QueryProfileShardResult.fromXContent(parser)); + } + } else if (AggregationProfileShardResult.AGGREGATIONS.equals(currentFieldName)) { + aggProfileShardResult = AggregationProfileShardResult.fromXContent(parser); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if ("dfs".equals(currentFieldName)) { + searchProfileDfsPhaseResult = SearchProfileDfsPhaseResult.fromXContent(parser); + } else if ("fetch".equals(currentFieldName)) { + fetchResult = ProfileResult.fromXContent(parser); + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + SearchProfileShardResult result = new SearchProfileShardResult( + new SearchProfileQueryPhaseResult(queryProfileResults, aggProfileShardResult), + fetchResult + ); + result.getQueryPhase().setSearchProfileDfsPhaseResult(searchProfileDfsPhaseResult); + searchProfileResults.put(id, result); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java index 749304caf6e20..97c7d6d8cb60d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java @@ -12,18 +12,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.function.Function; -import java.util.stream.Collectors; /** * The response object returned by the Explain Lifecycle API. @@ -37,26 +32,6 @@ public class ExplainLifecycleResponse extends ActionResponse implements ToXConte private Map indexResponses; - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "explain_lifecycle_response", - a -> new ExplainLifecycleResponse( - ((List) a[0]).stream() - .collect(Collectors.toMap(IndexLifecycleExplainResponse::getIndex, Function.identity())) - ) - ); - static { - PARSER.declareNamedObjects( - ConstructingObjectParser.constructorArg(), - (p, c, n) -> IndexLifecycleExplainResponse.PARSER.apply(p, c), - INDICES_FIELD - ); - } - - public static ExplainLifecycleResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - public ExplainLifecycleResponse(StreamInput in) throws IOException { super(in); int size = in.readVInt(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java index 67c5e22902cf2..f06ba16d9da78 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -42,7 +41,6 @@ import java.util.Objects; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class PreviewTransformAction extends ActionType { @@ -154,26 +152,6 @@ public static class Response extends ActionResponse implements ToXContentObject private final List> docs; private final TransformDestIndexSettings generatedDestIndexSettings; - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "data_frame_transform_preview", - true, - args -> { - @SuppressWarnings("unchecked") - List> docs = (List>) args[0]; - TransformDestIndexSettings generatedDestIndex = (TransformDestIndexSettings) args[1]; - - return new Response(docs, generatedDestIndex); - } - ); - static { - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> p.mapOrdered(), PREVIEW); - PARSER.declareObject( - optionalConstructorArg(), - (p, c) -> TransformDestIndexSettings.fromXContent(p), - GENERATED_DEST_INDEX_SETTINGS - ); - } - public Response(List> docs, TransformDestIndexSettings generatedDestIndexSettings) { this.docs = docs; this.generatedDestIndexSettings = generatedDestIndexSettings; @@ -237,9 +215,5 @@ public int hashCode() { public String toString() { return Strings.toString(this, true, true); } - - public static Response fromXContent(final XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java index ab084e66c3ad1..937502281b64d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -19,10 +20,29 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; public class ExplainLifecycleResponseTests extends AbstractXContentSerializingTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "explain_lifecycle_response", + a -> new ExplainLifecycleResponse( + ((List) a[0]).stream() + .collect(Collectors.toMap(IndexLifecycleExplainResponse::getIndex, Function.identity())) + ) + ); + static { + PARSER.declareNamedObjects( + ConstructingObjectParser.constructorArg(), + (p, c, n) -> IndexLifecycleExplainResponse.PARSER.apply(p, c), + ExplainLifecycleResponse.INDICES_FIELD + ); + } + @Override protected ExplainLifecycleResponse createTestInstance() { Map indexResponses = new HashMap<>(); @@ -51,7 +71,7 @@ protected ExplainLifecycleResponse mutateInstance(ExplainLifecycleResponse respo @Override protected ExplainLifecycleResponse doParseInstance(XContentParser parser) throws IOException { - return ExplainLifecycleResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseTests.java index 9613fc83efd50..9a573818fb111 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseTests.java @@ -9,8 +9,10 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettingsTests; import java.io.IOException; @@ -18,8 +20,31 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class PreviewTransformsActionResponseTests extends AbstractXContentSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "data_frame_transform_preview", + true, + args -> { + @SuppressWarnings("unchecked") + List> docs = (List>) args[0]; + TransformDestIndexSettings generatedDestIndex = (TransformDestIndexSettings) args[1]; + + return new Response(docs, generatedDestIndex); + } + ); + + static { + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> p.mapOrdered(), PreviewTransformAction.Response.PREVIEW); + PARSER.declareObject( + optionalConstructorArg(), + (p, c) -> TransformDestIndexSettings.fromXContent(p), + PreviewTransformAction.Response.GENERATED_DEST_INDEX_SETTINGS + ); + } + public static Response randomPreviewResponse() { int size = randomIntBetween(0, 10); List> data = new ArrayList<>(size); @@ -32,7 +57,7 @@ public static Response randomPreviewResponse() { @Override protected Response doParseInstance(XContentParser parser) throws IOException { - return Response.fromXContent(parser); + return PARSER.parse(parser, null); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java index 8a544f735b570..659c58d2bd1b8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java @@ -146,20 +146,5 @@ protected void addCustomFields(XContentBuilder builder, Params params) throws IO builder.field(COLLECTION_NAME_FIELD.getPreferredName(), name); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "put_analytics_collection_response", - false, - (p) -> { - return new Response((boolean) p[0], (String) p[1]); - } - ); - static { - PARSER.declareString(constructorArg(), COLLECTION_NAME_FIELD); - } - - public static Response fromXContent(String resourceName, XContentParser parser) throws IOException { - return new Response(AcknowledgedResponse.fromXContent(parser).isAcknowledged(), resourceName); - } - } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionResponseBWCSerializingTests.java index ce6259b40765e..f6a13477acae7 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionResponseBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionResponseBWCSerializingTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.application.analytics.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; @@ -40,7 +41,7 @@ protected PutAnalyticsCollectionAction.Response mutateInstance(PutAnalyticsColle @Override protected PutAnalyticsCollectionAction.Response doParseInstance(XContentParser parser) throws IOException { - return PutAnalyticsCollectionAction.Response.fromXContent(this.name, parser); + return new PutAnalyticsCollectionAction.Response(AcknowledgedResponse.fromXContent(parser).isAcknowledged(), this.name); } @Override From d0c7da6bb7f9ff3373c71d8b9b89ac4d877f1660 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 22 Mar 2024 11:32:56 +0000 Subject: [PATCH 12/79] Improves Error Handling in Chunked Inference API (#106634) Previously, if one input failed within a batch call to the chunked inference API, the entire request was labeled as failed. This update alters this process, now allowing individual errors to be conveyed in the response. This empowers the caller to address failures specifically and understand the root cause, rather than receiving a vague error message like: 'Expected a chunked inference [chunked_text_embedding_service_results] received [error]'. --- .../results/ErrorChunkedInferenceResults.java | 97 +++++++++++++++++++ .../InferenceNamedWriteablesProvider.java | 8 ++ .../ElasticsearchInternalService.java | 4 + .../services/elser/ElserInternalService.java | 4 + .../ErrorChunkedInferenceResultsTests.java | 43 ++++++++ .../ElasticsearchInternalServiceTests.java | 9 +- .../elser/ElserInternalServiceTests.java | 8 +- 7 files changed, 171 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/ErrorChunkedInferenceResultsTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java new file mode 100644 index 0000000000000..eef864f2e8992 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class ErrorChunkedInferenceResults implements ChunkedInferenceServiceResults { + + public static final String NAME = "error_chunked"; + + private final Exception exception; + + public ErrorChunkedInferenceResults(Exception exception) { + this.exception = Objects.requireNonNull(exception); + } + + public ErrorChunkedInferenceResults(StreamInput in) throws IOException { + this.exception = in.readException(); + } + + public Exception getException() { + return exception; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeException(exception); + } + + @Override + public boolean equals(Object object) { + if (object == this) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + ErrorChunkedInferenceResults that = (ErrorChunkedInferenceResults) object; + // Just compare the message for serialization test purposes + return Objects.equals(exception.getMessage(), that.exception.getMessage()); + } + + @Override + public int hashCode() { + // Just compare the message for serialization test purposes + return Objects.hash(exception.getMessage()); + } + + @Override + public List transformToCoordinationFormat() { + return null; + } + + @Override + public List transformToLegacyFormat() { + return null; + } + + @Override + public Map asMap() { + Map asMap = new LinkedHashMap<>(); + asMap.put(NAME, exception.getMessage()); + return asMap; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME, exception.getMessage()); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index c38b427200744..80fd98c40516e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -16,6 +16,7 @@ import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xpack.core.inference.results.ChunkedSparseEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.ChunkedTextEmbeddingResults; +import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; import org.elasticsearch.xpack.core.inference.results.LegacyTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.TextEmbeddingByteResults; @@ -62,6 +63,13 @@ public static List getNamedWriteables() { ); // Chunked inference results + namedWriteables.add( + new NamedWriteableRegistry.Entry( + InferenceServiceResults.class, + ErrorChunkedInferenceResults.NAME, + ErrorChunkedInferenceResults::new + ) + ); namedWriteables.add( new NamedWriteableRegistry.Entry( InferenceServiceResults.class, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 1aafa340268f3..a07ebe56a9258 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -28,6 +28,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.inference.results.ChunkedTextEmbeddingResults; +import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; import org.elasticsearch.xpack.core.inference.results.TextEmbeddingResults; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; @@ -36,6 +37,7 @@ import org.elasticsearch.xpack.core.ml.action.StopTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; @@ -382,6 +384,8 @@ private List translateChunkedResults(List translateChunkedResults(List { + + public static ErrorChunkedInferenceResults createRandomResults() { + return new ErrorChunkedInferenceResults( + randomBoolean() + ? new ElasticsearchTimeoutException(randomAlphaOfLengthBetween(10, 50)) + : new ElasticsearchStatusException(randomAlphaOfLengthBetween(10, 50), randomFrom(RestStatus.values())) + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return ErrorChunkedInferenceResults::new; + } + + @Override + protected ErrorChunkedInferenceResults createTestInstance() { + return createRandomResults(); + } + + @Override + protected ErrorChunkedInferenceResults mutateInstance(ErrorChunkedInferenceResults instance) throws IOException { + return new ErrorChunkedInferenceResults(new RuntimeException(randomAlphaOfLengthBetween(10, 50))); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 4f0deaceb17da..0757012b234bd 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -24,8 +24,10 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.inference.results.ChunkedTextEmbeddingResults; +import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextEmbeddingResultsTests; +import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; import java.util.ArrayList; @@ -37,6 +39,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.any; @@ -346,6 +349,7 @@ public void testChunkInfer() { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(ChunkedTextEmbeddingResultsTests.createRandomResults()); mlTrainedModelResults.add(ChunkedTextEmbeddingResultsTests.createRandomResults()); + mlTrainedModelResults.add(new ErrorInferenceResults(new RuntimeException("boom"))); var response = new InferTrainedModelDeploymentAction.Response(mlTrainedModelResults); ThreadPool threadpool = new TestThreadPool("test"); @@ -372,7 +376,7 @@ public void testChunkInfer() { var gotResults = new AtomicBoolean(); var resultsListener = ActionListener.>wrap(chunkedResponse -> { - assertThat(chunkedResponse, hasSize(2)); + assertThat(chunkedResponse, hasSize(3)); assertThat(chunkedResponse.get(0), instanceOf(ChunkedTextEmbeddingResults.class)); var result1 = (ChunkedTextEmbeddingResults) chunkedResponse.get(0); assertEquals( @@ -385,6 +389,9 @@ public void testChunkInfer() { ((org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextEmbeddingResults) mlTrainedModelResults.get(1)).getChunks(), result2.getChunks() ); + var result3 = (ErrorChunkedInferenceResults) chunkedResponse.get(2); + assertThat(result3.getException(), instanceOf(RuntimeException.class)); + assertThat(result3.getException().getMessage(), containsString("boom")); gotResults.set(true); }, ESTestCase::fail); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java index 6da634afddeb0..f2fd195ab8c5a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java @@ -23,8 +23,10 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.inference.results.ChunkedSparseEmbeddingResults; +import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextExpansionResultsTests; +import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import java.util.ArrayList; import java.util.Collections; @@ -330,6 +332,7 @@ public void testChunkInfer() { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(ChunkedTextExpansionResultsTests.createRandomResults()); mlTrainedModelResults.add(ChunkedTextExpansionResultsTests.createRandomResults()); + mlTrainedModelResults.add(new ErrorInferenceResults(new RuntimeException("boom"))); var response = new InferTrainedModelDeploymentAction.Response(mlTrainedModelResults); ThreadPool threadpool = new TestThreadPool("test"); @@ -357,7 +360,7 @@ public void testChunkInfer() { var gotResults = new AtomicBoolean(); var resultsListener = ActionListener.>wrap(chunkedResponse -> { - assertThat(chunkedResponse, hasSize(2)); + assertThat(chunkedResponse, hasSize(3)); assertThat(chunkedResponse.get(0), instanceOf(ChunkedSparseEmbeddingResults.class)); var result1 = (ChunkedSparseEmbeddingResults) chunkedResponse.get(0); assertEquals( @@ -370,6 +373,9 @@ public void testChunkInfer() { ((org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextExpansionResults) mlTrainedModelResults.get(1)).getChunks(), result2.getChunkedResults() ); + var result3 = (ErrorChunkedInferenceResults) chunkedResponse.get(2); + assertThat(result3.getException(), instanceOf(RuntimeException.class)); + assertThat(result3.getException().getMessage(), containsString("boom")); gotResults.set(true); }, ESTestCase::fail); From f3f117a3646cb5ca20c58ec0a7d077ebc664dbc7 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:52:30 +0100 Subject: [PATCH 13/79] [DOCS] Update params for Update Connector Filtering API (#106662) --- .../apis/update-connector-filtering-api.asciidoc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc index 3e81f0fda2ce7..04c40ebf9fa4e 100644 --- a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc @@ -55,32 +55,32 @@ Contains the set of rules that are actively used for sync jobs. The `active` obj The value to be used in conjunction with the rule for matching the contents of the document's field. ** `order` (Required, number) + The order in which the rules are applied. The first rule to match has its policy applied. - ** `created_at` (Optional, datetime) + + ** `created_at` (Required, datetime) + The timestamp when the rule was added. - ** `updated_at` (Optional, datetime) + + ** `updated_at` (Required, datetime) + The timestamp when the rule was last edited. - * `advanced_snippet` (Optional, object) + + * `advanced_snippet` (Required, object) + Used for {enterprise-search-ref}/sync-rules.html#sync-rules-advanced[advanced filtering] at query time, with the following sub-attributes: ** `value` (Required, object) + A JSON object passed directly to the connector for advanced filtering. - ** `created_at` (Optional, datetime) + + ** `created_at` (Required, datetime) + The timestamp when this JSON object was created. - ** `updated_at` (Optional, datetime) + + ** `updated_at` (Required, datetime) + The timestamp when this JSON object was last edited. - * `validation` (Optional, object) + + * `validation` (Required, object) + Provides validation status for the rules, including: ** `state` (Required, string) + Indicates the validation state: "edited", "valid", or "invalid". - ** `errors` (Optional, object) + + ** `errors` (Required, object) + Contains details about any validation errors, with sub-attributes: *** `ids` (Required, string) + The ID(s) of any rules deemed invalid. *** `messages` (Required, string) + Messages explaining what is invalid about the rules. -- `draft` (Optional, object) + +- `draft` (Required, object) + An object identical in structure to the `active` object, but used for drafting and editing filtering rules before they become active. From 16787c2b021e67f2b8e60ddaf8802d45fc1a8459 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Fri, 22 Mar 2024 13:14:07 +0100 Subject: [PATCH 14/79] [Profiling] Fix spurious test failures (#106660) With this commit we force-merge indices to workaround elastic/elasticsearch#106657. We also revert a test mute that was applied to the wrong test case so all integration tests are executed again. Relates #106309 Relates #106657 Closes #106308 --- .../elasticsearch/xpack/profiling/GetStackTracesActionIT.java | 1 - .../org/elasticsearch/xpack/profiling/ProfilingTestCase.java | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 501d564bbda0d..62b8242e7df86 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -42,7 +42,6 @@ public void testGetStackTracesUnfiltered() throws Exception { assertEquals("vmlinux", response.getExecutables().get("lHp5_WAgpLy2alrUVab6HA")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106308") public void testGetStackTracesFromAPMWithMatchNoDownsampling() throws Exception { BoolQueryBuilder query = QueryBuilders.boolQuery(); query.must().add(QueryBuilders.termQuery("transaction.name", "encodeSha1")); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 383f46c97f02f..58b018a13e096 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -143,6 +143,9 @@ protected final void doSetupData() throws Exception { bulkIndex("data/apm-legacy-test.ndjson"); refresh(); + + // temporary workaround for #106657, see also #106308. + forceMerge(); } @After From 46b5596767d8f267e0e29cce50673d34993325e9 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 22 Mar 2024 12:47:47 +0000 Subject: [PATCH 15/79] Avoid double-sorting results in TransportGetSnapshotsAction (#106644) Today we call `sortSnapshots` twice, once for the per-repository results, and then again for the combined results. The first call has no limit or offset, and the sorting is made redundant by the second call, so only really serves to filter out snapshots which do not match the `?after` parameter. Meanwhile the `?after` parameter filtering is redundant in the second call. This commit separates these two steps to avoid the redundant sorting in the first step and the redundant filtering in the second. --- .../get/TransportGetSnapshotsAction.java | 49 ++++++++++--------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 898adf721be33..6d29c36bdcda1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -164,8 +164,7 @@ private class GetSnapshotsOperation { @Nullable private final String fromSortValue; private final int offset; - @Nullable - private final SnapshotSortKey.After after; + private final Predicate afterPredicate; private final int size; // current state @@ -210,7 +209,6 @@ private class GetSnapshotsOperation { this.order = order; this.fromSortValue = fromSortValue; this.offset = offset; - this.after = after; this.size = size; this.snapshotsInProgress = snapshotsInProgress; this.verbose = verbose; @@ -219,6 +217,7 @@ private class GetSnapshotsOperation { this.snapshotNamePredicate = SnapshotNamePredicate.forSnapshots(ignoreUnavailable, snapshots); this.fromSortValuePredicates = SnapshotPredicates.forFromSortValue(fromSortValue, sortBy, order); this.slmPolicyPredicate = SlmPolicyPredicate.forPolicies(policies); + this.afterPredicate = sortBy.getAfterPredicate(after, order); this.getSnapshotInfoExecutor = new GetSnapshotInfoExecutor( threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), @@ -344,20 +343,15 @@ private void loadSnapshotInfos(String repo, @Nullable RepositoryData repositoryD } else { assert fromSortValuePredicates.isMatchAll() : "filtering is not supported in non-verbose mode"; assert slmPolicyPredicate == SlmPolicyPredicate.MATCH_ALL_POLICIES : "filtering is not supported in non-verbose mode"; - final var currentSnapshots = snapshotsInProgress.forRepo(repo) - .stream() - .map(entry -> SnapshotInfo.inProgress(entry).basic()) - .toList(); - - final SnapshotsInRepo snapshotInfos; - if (repositoryData != null) { - // want non-current snapshots as well, which are found in the repository data - snapshotInfos = buildSimpleSnapshotInfos(toResolve, repo, repositoryData, currentSnapshots); - } else { - // only want current snapshots - snapshotInfos = sortSnapshotsWithNoOffsetOrLimit(currentSnapshots); - } - listener.onResponse(snapshotInfos); + + listener.onResponse( + buildSimpleSnapshotInfos( + toResolve, + repo, + repositoryData, + snapshotsInProgress.forRepo(repo).stream().map(entry -> SnapshotInfo.inProgress(entry).basic()).toList() + ) + ); } } @@ -446,7 +440,7 @@ public void onFailure(Exception e) { .addListener(listener.safeMap(v -> // no need to synchronize access to snapshots: Repository#getSnapshotInfo fails fast but we're on the success path here - sortSnapshotsWithNoOffsetOrLimit(snapshots)), executor, threadPool.getThreadContext()); + applyAfterPredicate(snapshots)), executor, threadPool.getThreadContext()); } private SnapshotsInRepo buildSimpleSnapshotInfos( @@ -455,6 +449,11 @@ private SnapshotsInRepo buildSimpleSnapshotInfos( final RepositoryData repositoryData, final List currentSnapshots ) { + if (repositoryData == null) { + // only want current snapshots + return applyAfterPredicate(currentSnapshots); + } // else want non-current snapshots as well, which are found in the repository data + List snapshotInfos = new ArrayList<>(); for (SnapshotInfo snapshotInfo : currentSnapshots) { assert snapshotInfo.startTime() == 0L && snapshotInfo.endTime() == 0L && snapshotInfo.totalShards() == 0L : snapshotInfo; @@ -483,16 +482,16 @@ private SnapshotsInRepo buildSimpleSnapshotInfos( ) ); } - return sortSnapshotsWithNoOffsetOrLimit(snapshotInfos); + return applyAfterPredicate(snapshotInfos); } - private SnapshotsInRepo sortSnapshotsWithNoOffsetOrLimit(List snapshotInfos) { - return sortSnapshots(snapshotInfos.stream(), snapshotInfos.size(), 0, GetSnapshotsRequest.NO_LIMIT); + private SnapshotsInRepo applyAfterPredicate(List snapshotInfos) { + return new SnapshotsInRepo(snapshotInfos.stream().filter(afterPredicate).toList(), snapshotInfos.size(), 0); } private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, int totalCount, int offset, int size) { assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.MANAGEMENT); - final var resultsStream = snapshotInfoStream.filter(sortBy.getAfterPredicate(after, order)) + final var resultsStream = snapshotInfoStream.peek(this::assertSatisfiesAllPredicates) .sorted(sortBy.getSnapshotInfoComparator(order)) .skip(offset); if (size == GetSnapshotsRequest.NO_LIMIT) { @@ -513,6 +512,12 @@ private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, i } } + private void assertSatisfiesAllPredicates(SnapshotInfo snapshotInfo) { + assert matchesPredicates(snapshotInfo); + assert afterPredicate.test(snapshotInfo); + assert indices || snapshotInfo.indices().isEmpty(); + } + private boolean matchesPredicates(SnapshotId snapshotId, RepositoryData repositoryData) { if (fromSortValuePredicates.test(snapshotId, repositoryData) == false) { return false; From 391f010f9b5b111efbe649602733dedf8555a182 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Fri, 22 Mar 2024 14:01:31 +0100 Subject: [PATCH 16/79] ES|QL: Fix usage of IN operator with TEXT fields (#106654) --- docs/changelog/106654.yaml | 6 ++ .../predicate/operator/comparison/In.java | 4 +- .../xpack/esql/analysis/AnalyzerTests.java | 26 +++++++ .../rest-api-spec/test/esql/80_text.yml | 76 +++++++++++++++++++ 4 files changed, 110 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/106654.yaml diff --git a/docs/changelog/106654.yaml b/docs/changelog/106654.yaml new file mode 100644 index 0000000000000..3443b68482443 --- /dev/null +++ b/docs/changelog/106654.yaml @@ -0,0 +1,6 @@ +pr: 106654 +summary: "ES|QL: Fix usage of IN operator with TEXT fields" +area: ES|QL +type: bug +issues: + - 105379 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java index b20160ac936d6..ab2f9079b610c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; -import org.elasticsearch.xpack.ql.expression.TypeResolutions; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.InProcessor; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -67,7 +67,7 @@ protected boolean areCompatible(DataType left, DataType right) { @Override protected TypeResolution resolveType() { // TODO: move the foldability check from QL's In to SQL's and remove this method - TypeResolution resolution = TypeResolutions.isExact(value(), functionName(), DEFAULT); + TypeResolution resolution = EsqlTypeResolutions.isExact(value(), functionName(), DEFAULT); if (resolution.unresolved()) { return resolution; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 975b31b967fe0..543e7c93526d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1775,6 +1775,32 @@ public void testUnsupportedTypesInStats() { ); } + public void testInOnText() { + assertProjectionWithMapping(""" + from a_index + | eval text in (\"a\", \"b\", \"c\") + | keep text + """, "mapping-multi-field-variation.json", "text"); + + assertProjectionWithMapping(""" + from a_index + | eval text in (\"a\", \"b\", \"c\", text) + | keep text + """, "mapping-multi-field-variation.json", "text"); + + assertProjectionWithMapping(""" + from a_index + | eval text not in (\"a\", \"b\", \"c\") + | keep text + """, "mapping-multi-field-variation.json", "text"); + + assertProjectionWithMapping(""" + from a_index + | eval text not in (\"a\", \"b\", \"c\", text) + | keep text + """, "mapping-multi-field-variation.json", "text"); + } + private void verifyUnsupported(String query, String errorMessage) { verifyUnsupported(query, errorMessage, "mapping-multi-field-variation.json"); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index 09462691688bf..d73efe1788ce3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -121,6 +121,82 @@ setup: - length: { values: 1 } - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } +--- +"IN on text": + - skip: + version: " - 8.13.99" + reason: "IN on text fixed in v 8.14" + features: allowed_warnings_regex + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where tag IN ("abc", "baz") | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"IN on text and itself": + - skip: + version: " - 8.13.99" + reason: "IN on text fixed in v 8.14" + features: allowed_warnings_regex + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where tag IN ("abc", tag) | keep emp_no, name, job, tag | sort emp_no' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 2 } + - match: { values.0: [ 10, "Jenny", "IT Director", "foo bar"] } + - match: { values.1: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"NOT IN on text": + - skip: + version: " - 8.13.99" + reason: "IN on text fixed in v 8.14" + features: allowed_warnings_regex + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where tag NOT IN ("abc", "baz") | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 10, "Jenny", "IT Director", "foo bar"] } + --- "eval and filter text": - do: From bd6f3c7f78c7577a8fabd945f3cefb6f11ed28be Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 22 Mar 2024 09:18:43 -0400 Subject: [PATCH 17/79] Test mute for issue #106647 (#106670) --- .../search/query/PartialHitCountCollectorTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/search/query/PartialHitCountCollectorTests.java b/server/src/test/java/org/elasticsearch/search/query/PartialHitCountCollectorTests.java index c7967f0de5411..c63e2499147c1 100644 --- a/server/src/test/java/org/elasticsearch/search/query/PartialHitCountCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/PartialHitCountCollectorTests.java @@ -118,6 +118,7 @@ public void testHitCountFromWeightDoesNotEarlyTerminate() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106647") public void testCollectedHitCount() throws Exception { Query query = new NonCountingTermQuery(new Term("string", "a1")); int threshold = randomIntBetween(1, 10000); From f02adacceae8d44665f781b0c78331ac80140f62 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Fri, 22 Mar 2024 13:38:21 +0000 Subject: [PATCH 18/79] Fix testDataStreamLifecycleDownsampleRollingRestart (#106664) This removes a redundant thread creation when triggering a rolling restart as the method is already async and drops the check for cluster health as that might hit a node that's being shut down (the master node in particular) The test assertions still hold i.e. the successful downsampling of the source index --- ...StreamLifecycleDownsampleDisruptionIT.java | 84 +------------------ 1 file changed, 4 insertions(+), 80 deletions(-) diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java index 65a4d84e921a2..afa2e95e1284c 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java @@ -31,9 +31,7 @@ import java.util.Collection; import java.util.List; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.getBackingIndices; import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.putTSDBIndexTemplate; @@ -57,11 +55,10 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return settings.build(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105577") @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception { final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); + cluster.startMasterOnlyNodes(1); cluster.startDataOnlyNodes(3); ensureStableCluster(cluster.size()); ensureGreen(); @@ -99,36 +96,12 @@ public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception { long sleepTime = randomLongBetween(3000, 4500); logger.info("-> giving data stream lifecycle [{}] millis to make some progress before starting the disruption", sleepTime); Thread.sleep(sleepTime); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); List backingIndices = getBackingIndices(client(), dataStreamName); // first generation index String sourceIndex = backingIndices.get(0); - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.rollingRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty(), 60, TimeUnit.SECONDS); - ensureStableCluster(cluster.numDataAndMasterNodes()); + internalCluster().rollingRestart(new InternalTestCluster.RestartCallback() { + }); // if the source index has already been downsampled and moved into the data stream just use its name directly final String targetIndex = sourceIndex.startsWith("downsample-5m-") ? sourceIndex : "downsample-5m-" + sourceIndex; @@ -147,55 +120,6 @@ public boolean validateClusterForming() { throw new AssertionError(e); } }, 60, TimeUnit.SECONDS); - } - - interface DisruptionListener { - void disruptionStart(); - - void disruptionEnd(); - } - - private class Disruptor implements Runnable { - final InternalTestCluster cluster; - private final String sourceIndex; - private final DisruptionListener listener; - private final String clientNode; - private final Consumer disruption; - - private Disruptor( - final InternalTestCluster cluster, - final String sourceIndex, - final DisruptionListener listener, - final String clientNode, - final Consumer disruption - ) { - this.cluster = cluster; - this.sourceIndex = sourceIndex; - this.listener = listener; - this.clientNode = clientNode; - this.disruption = disruption; - } - - @Override - public void run() { - listener.disruptionStart(); - try { - final String candidateNode = cluster.client(clientNode) - .admin() - .cluster() - .prepareSearchShards(sourceIndex) - .get() - .getNodes()[0].getName(); - logger.info("Candidate node [" + candidateNode + "]"); - disruption.accept(candidateNode); - ensureGreen(sourceIndex); - ensureStableCluster(cluster.numDataAndMasterNodes(), clientNode); - - } catch (Exception e) { - logger.error("Ignoring Error while injecting disruption [" + e.getMessage() + "]"); - } finally { - listener.disruptionEnd(); - } - } + ensureGreen(targetIndex); } } From 9a907704b7300f3364d74047a92e79a265952c6c Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 22 Mar 2024 13:46:46 +0000 Subject: [PATCH 19/79] Move `XContent` -> `SnapshotInfo` parsing out of prod (#106669) The code to parse a `SnapshotInfo` object out of an `XContent` response body is only used in tests, so this commit moves it out of the production codebase and into the test framework. --- .../http/snapshots/RestGetSnapshotsIT.java | 3 +- .../create/CreateSnapshotResponse.java | 30 +- .../create/TransportCreateSnapshotAction.java | 3 +- .../elasticsearch/snapshots/SnapshotInfo.java | 258 ++---------------- .../create/CreateSnapshotResponseTests.java | 3 +- .../snapshots/SnapshotInfoUtils.java | 244 +++++++++++++++++ .../xpack/slm/SnapshotLifecycleTaskTests.java | 5 +- 7 files changed, 285 insertions(+), 261 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/snapshots/SnapshotInfoUtils.java diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index 88d910b61fa52..b12a70ccb8425 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotInfoUtils; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -524,7 +525,7 @@ private static GetSnapshotsResponse sortedWithLimit( static { GET_SNAPSHOT_PARSER.declareObjectArray( ConstructingObjectParser.constructorArg(), - (p, c) -> SnapshotInfo.SNAPSHOT_INFO_PARSER.apply(p, c).build(), + (p, c) -> SnapshotInfoUtils.snapshotInfoFromXContent(p), new ParseField("snapshots") ); GET_SNAPSHOT_PARSER.declareObject( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index 7062efd301991..4e04506d03d6a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -14,12 +14,8 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.SnapshotInfo; -import org.elasticsearch.snapshots.SnapshotInfo.SnapshotInfoBuilder; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -29,24 +25,8 @@ */ public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject { - private static final ObjectParser PARSER = new ObjectParser<>( - CreateSnapshotResponse.class.getName(), - true, - CreateSnapshotResponse::new - ); - - static { - PARSER.declareObject( - CreateSnapshotResponse::setSnapshotInfoFromBuilder, - SnapshotInfo.SNAPSHOT_INFO_PARSER, - new ParseField("snapshot") - ); - } - @Nullable - private SnapshotInfo snapshotInfo; - - CreateSnapshotResponse() {} + private final SnapshotInfo snapshotInfo; public CreateSnapshotResponse(@Nullable SnapshotInfo snapshotInfo) { this.snapshotInfo = snapshotInfo; @@ -57,10 +37,6 @@ public CreateSnapshotResponse(StreamInput in) throws IOException { snapshotInfo = in.readOptionalWriteable(SnapshotInfo::readFrom); } - private void setSnapshotInfoFromBuilder(SnapshotInfoBuilder snapshotInfoBuilder) { - this.snapshotInfo = snapshotInfoBuilder.build(); - } - /** * Returns snapshot information if snapshot was completed by the time this method returned or null otherwise. * @@ -103,10 +79,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static CreateSnapshotResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public String toString() { return "CreateSnapshotResponse{" + "snapshotInfo=" + snapshotInfo + '}'; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 8d776b7ae6ecb..02592b722c9e0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -68,7 +69,7 @@ protected void masterOperation( if (request.waitForCompletion()) { snapshotsService.executeSnapshot(request, listener.map(CreateSnapshotResponse::new)); } else { - snapshotsService.createSnapshot(request, listener.map(snapshot -> new CreateSnapshotResponse())); + snapshotsService.createSnapshot(request, listener.map(snapshot -> new CreateSnapshotResponse((SnapshotInfo) null))); } } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index 243df88cfab00..8a1f68c867943 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -23,7 +23,6 @@ import org.elasticsearch.repositories.RepositoryShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentFragment; @@ -53,237 +52,40 @@ public final class SnapshotInfo implements Comparable, ToXContentF public static final String INCLUDE_REPOSITORY_XCONTENT_PARAM = "include_repository"; private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time"); - private static final String SNAPSHOT = "snapshot"; - private static final String UUID = "uuid"; - private static final String REPOSITORY = "repository"; - private static final String INDICES = "indices"; - private static final String DATA_STREAMS = "data_streams"; - private static final String STATE = "state"; - private static final String REASON = "reason"; - private static final String START_TIME = "start_time"; - private static final String START_TIME_IN_MILLIS = "start_time_in_millis"; - private static final String END_TIME = "end_time"; - private static final String END_TIME_IN_MILLIS = "end_time_in_millis"; - private static final String DURATION = "duration"; - private static final String DURATION_IN_MILLIS = "duration_in_millis"; - private static final String FAILURES = "failures"; - private static final String SHARDS = "shards"; - private static final String TOTAL = "total"; - private static final String FAILED = "failed"; - private static final String SUCCESSFUL = "successful"; - private static final String VERSION_ID = "version_id"; - private static final String VERSION = "version"; - private static final String NAME = "name"; - private static final String TOTAL_SHARDS = "total_shards"; - private static final String SUCCESSFUL_SHARDS = "successful_shards"; - private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; - private static final String USER_METADATA = "metadata"; - private static final String FEATURE_STATES = "feature_states"; - private static final String INDEX_DETAILS = "index_details"; - - private static final String UNKNOWN_REPO_NAME = "_na_"; + + static final String SNAPSHOT = "snapshot"; + static final String UUID = "uuid"; + static final String REPOSITORY = "repository"; + static final String INDICES = "indices"; + static final String DATA_STREAMS = "data_streams"; + static final String STATE = "state"; + static final String REASON = "reason"; + static final String START_TIME = "start_time"; + static final String START_TIME_IN_MILLIS = "start_time_in_millis"; + static final String END_TIME = "end_time"; + static final String END_TIME_IN_MILLIS = "end_time_in_millis"; + static final String DURATION = "duration"; + static final String DURATION_IN_MILLIS = "duration_in_millis"; + static final String FAILURES = "failures"; + static final String SHARDS = "shards"; + static final String TOTAL = "total"; + static final String FAILED = "failed"; + static final String SUCCESSFUL = "successful"; + static final String VERSION_ID = "version_id"; + static final String VERSION = "version"; + static final String NAME = "name"; + static final String TOTAL_SHARDS = "total_shards"; + static final String SUCCESSFUL_SHARDS = "successful_shards"; + static final String INCLUDE_GLOBAL_STATE = "include_global_state"; + static final String USER_METADATA = "metadata"; + static final String FEATURE_STATES = "feature_states"; + static final String INDEX_DETAILS = "index_details"; + + static final String UNKNOWN_REPO_NAME = "_na_"; private static final Comparator COMPARATOR = Comparator.comparing(SnapshotInfo::startTime) .thenComparing(SnapshotInfo::snapshotId); - public static final class SnapshotInfoBuilder { - private String snapshotName = null; - private String snapshotUUID = null; - private String repository = UNKNOWN_REPO_NAME; - private String state = null; - private String reason = null; - private List indices = null; - private List dataStreams = null; - private List featureStates = null; - private Map indexSnapshotDetails = null; - private long startTime = 0L; - private long endTime = 0L; - private ShardStatsBuilder shardStatsBuilder = null; - private Boolean includeGlobalState = null; - private Map userMetadata = null; - private int version = -1; - private List shardFailures = null; - - private void setSnapshotName(String snapshotName) { - this.snapshotName = snapshotName; - } - - private void setSnapshotUUID(String snapshotUUID) { - this.snapshotUUID = snapshotUUID; - } - - private void setRepository(String repository) { - this.repository = repository; - } - - private void setState(String state) { - this.state = state; - } - - private void setReason(String reason) { - this.reason = reason; - } - - private void setIndices(List indices) { - this.indices = indices; - } - - private void setDataStreams(List dataStreams) { - this.dataStreams = dataStreams; - } - - private void setFeatureStates(List featureStates) { - this.featureStates = featureStates; - } - - private void setIndexSnapshotDetails(Map indexSnapshotDetails) { - this.indexSnapshotDetails = indexSnapshotDetails; - } - - private void setStartTime(long startTime) { - this.startTime = startTime; - } - - private void setEndTime(long endTime) { - this.endTime = endTime; - } - - private void setShardStatsBuilder(ShardStatsBuilder shardStatsBuilder) { - this.shardStatsBuilder = shardStatsBuilder; - } - - private void setIncludeGlobalState(Boolean includeGlobalState) { - this.includeGlobalState = includeGlobalState; - } - - private void setUserMetadata(Map userMetadata) { - this.userMetadata = userMetadata; - } - - private void setVersion(int version) { - this.version = version; - } - - private void setShardFailures(List shardFailures) { - this.shardFailures = shardFailures; - } - - public SnapshotInfo build() { - final Snapshot snapshot = new Snapshot(repository, new SnapshotId(snapshotName, snapshotUUID)); - - if (indices == null) { - indices = Collections.emptyList(); - } - - if (dataStreams == null) { - dataStreams = Collections.emptyList(); - } - - if (featureStates == null) { - featureStates = Collections.emptyList(); - } - - if (indexSnapshotDetails == null) { - indexSnapshotDetails = Collections.emptyMap(); - } - - SnapshotState snapshotState = state == null ? null : SnapshotState.valueOf(state); - IndexVersion version = this.version == -1 ? IndexVersion.current() : IndexVersion.fromId(this.version); - - int totalShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.getTotalShards(); - int successfulShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.getSuccessfulShards(); - - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - - return new SnapshotInfo( - snapshot, - indices, - dataStreams, - featureStates, - reason, - version, - startTime, - endTime, - totalShards, - successfulShards, - shardFailures, - includeGlobalState, - userMetadata, - snapshotState, - indexSnapshotDetails - ); - } - } - - private static final class ShardStatsBuilder { - private int totalShards; - private int successfulShards; - - private void setTotalShards(int totalShards) { - this.totalShards = totalShards; - } - - int getTotalShards() { - return totalShards; - } - - private void setSuccessfulShards(int successfulShards) { - this.successfulShards = successfulShards; - } - - int getSuccessfulShards() { - return successfulShards; - } - } - - public static final ObjectParser SNAPSHOT_INFO_PARSER = new ObjectParser<>( - SnapshotInfoBuilder.class.getName(), - true, - SnapshotInfoBuilder::new - ); - - private static final ObjectParser SHARD_STATS_PARSER = new ObjectParser<>( - ShardStatsBuilder.class.getName(), - true, - ShardStatsBuilder::new - ); - - static { - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setSnapshotName, new ParseField(SNAPSHOT)); - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setSnapshotUUID, new ParseField(UUID)); - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setRepository, new ParseField(REPOSITORY)); - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setState, new ParseField(STATE)); - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setReason, new ParseField(REASON)); - SNAPSHOT_INFO_PARSER.declareStringArray(SnapshotInfoBuilder::setIndices, new ParseField(INDICES)); - SNAPSHOT_INFO_PARSER.declareStringArray(SnapshotInfoBuilder::setDataStreams, new ParseField(DATA_STREAMS)); - SNAPSHOT_INFO_PARSER.declareObjectArray( - SnapshotInfoBuilder::setFeatureStates, - SnapshotFeatureInfo.SNAPSHOT_FEATURE_INFO_PARSER, - new ParseField(FEATURE_STATES) - ); - SNAPSHOT_INFO_PARSER.declareObject( - SnapshotInfoBuilder::setIndexSnapshotDetails, - (p, c) -> p.map(HashMap::new, p2 -> IndexSnapshotDetails.PARSER.parse(p2, c)), - new ParseField(INDEX_DETAILS) - ); - SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::setStartTime, new ParseField(START_TIME_IN_MILLIS)); - SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::setEndTime, new ParseField(END_TIME_IN_MILLIS)); - SNAPSHOT_INFO_PARSER.declareObject(SnapshotInfoBuilder::setShardStatsBuilder, SHARD_STATS_PARSER, new ParseField(SHARDS)); - SNAPSHOT_INFO_PARSER.declareBoolean(SnapshotInfoBuilder::setIncludeGlobalState, new ParseField(INCLUDE_GLOBAL_STATE)); - SNAPSHOT_INFO_PARSER.declareObject(SnapshotInfoBuilder::setUserMetadata, (p, c) -> p.map(), new ParseField(USER_METADATA)); - SNAPSHOT_INFO_PARSER.declareInt(SnapshotInfoBuilder::setVersion, new ParseField(VERSION_ID)); - SNAPSHOT_INFO_PARSER.declareObjectArray( - SnapshotInfoBuilder::setShardFailures, - SnapshotShardFailure.SNAPSHOT_SHARD_FAILURE_PARSER, - new ParseField(FAILURES) - ); - - SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::setTotalShards, new ParseField(TOTAL)); - SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::setSuccessfulShards, new ParseField(SUCCESSFUL)); - } - private final Snapshot snapshot; @Nullable diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java index cb3c9a3557d61..87cb67a53fc37 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotInfoTestUtils; +import org.elasticsearch.snapshots.SnapshotInfoUtils; import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -30,7 +31,7 @@ public class CreateSnapshotResponseTests extends AbstractXContentTestCase CREATE_SNAPSHOT_RESPONSE_PARSER = new ConstructingObjectParser<>( + CreateSnapshotResponse.class.getName(), + true, + args -> new CreateSnapshotResponse(((SnapshotInfoBuilder) args[0]).build()) + ); + + static final ObjectParser SNAPSHOT_INFO_PARSER = new ObjectParser<>( + SnapshotInfoBuilder.class.getName(), + true, + SnapshotInfoBuilder::new + ); + + static final ConstructingObjectParser SHARD_STATS_PARSER = new ConstructingObjectParser<>( + ShardStatsBuilder.class.getName(), + true, + args -> new ShardStatsBuilder((int) Objects.requireNonNullElse(args[0], 0), (int) Objects.requireNonNullElse(args[1], 0)) + ); + + static { + SHARD_STATS_PARSER.declareInt(optionalConstructorArg(), new ParseField(TOTAL)); + SHARD_STATS_PARSER.declareInt(optionalConstructorArg(), new ParseField(SUCCESSFUL)); + + SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setSnapshotName, new ParseField(SNAPSHOT)); + SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setSnapshotUUID, new ParseField(UUID)); + SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setRepository, new ParseField(REPOSITORY)); + SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setState, new ParseField(STATE)); + SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setReason, new ParseField(REASON)); + SNAPSHOT_INFO_PARSER.declareStringArray(SnapshotInfoBuilder::setIndices, new ParseField(INDICES)); + SNAPSHOT_INFO_PARSER.declareStringArray(SnapshotInfoBuilder::setDataStreams, new ParseField(DATA_STREAMS)); + SNAPSHOT_INFO_PARSER.declareObjectArray( + SnapshotInfoBuilder::setFeatureStates, + SnapshotFeatureInfo.SNAPSHOT_FEATURE_INFO_PARSER, + new ParseField(FEATURE_STATES) + ); + SNAPSHOT_INFO_PARSER.declareObject( + SnapshotInfoBuilder::setIndexSnapshotDetails, + (p, c) -> p.map(HashMap::new, p2 -> SnapshotInfo.IndexSnapshotDetails.PARSER.parse(p2, c)), + new ParseField(INDEX_DETAILS) + ); + SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::setStartTime, new ParseField(START_TIME_IN_MILLIS)); + SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::setEndTime, new ParseField(END_TIME_IN_MILLIS)); + SNAPSHOT_INFO_PARSER.declareObject(SnapshotInfoBuilder::setShardStatsBuilder, SHARD_STATS_PARSER, new ParseField(SHARDS)); + SNAPSHOT_INFO_PARSER.declareBoolean(SnapshotInfoBuilder::setIncludeGlobalState, new ParseField(INCLUDE_GLOBAL_STATE)); + SNAPSHOT_INFO_PARSER.declareObject(SnapshotInfoBuilder::setUserMetadata, (p, c) -> p.map(), new ParseField(USER_METADATA)); + SNAPSHOT_INFO_PARSER.declareInt(SnapshotInfoBuilder::setVersion, new ParseField(VERSION_ID)); + SNAPSHOT_INFO_PARSER.declareObjectArray( + SnapshotInfoBuilder::setShardFailures, + SnapshotShardFailure.SNAPSHOT_SHARD_FAILURE_PARSER, + new ParseField(FAILURES) + ); + + CREATE_SNAPSHOT_RESPONSE_PARSER.declareObject(optionalConstructorArg(), SNAPSHOT_INFO_PARSER, new ParseField("snapshot")); + } + + private record ShardStatsBuilder(int totalShards, int successfulShards) {} + + public static final class SnapshotInfoBuilder { + private String snapshotName = null; + private String snapshotUUID = null; + private String repository = UNKNOWN_REPO_NAME; + private String state = null; + private String reason = null; + private List indices = null; + private List dataStreams = null; + private List featureStates = null; + private Map indexSnapshotDetails = null; + private long startTime = 0L; + private long endTime = 0L; + private ShardStatsBuilder shardStatsBuilder = null; + private Boolean includeGlobalState = null; + private Map userMetadata = null; + private int version = -1; + private List shardFailures = null; + + private void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + private void setSnapshotUUID(String snapshotUUID) { + this.snapshotUUID = snapshotUUID; + } + + private void setRepository(String repository) { + this.repository = repository; + } + + private void setState(String state) { + this.state = state; + } + + private void setReason(String reason) { + this.reason = reason; + } + + private void setIndices(List indices) { + this.indices = indices; + } + + private void setDataStreams(List dataStreams) { + this.dataStreams = dataStreams; + } + + private void setFeatureStates(List featureStates) { + this.featureStates = featureStates; + } + + private void setIndexSnapshotDetails(Map indexSnapshotDetails) { + this.indexSnapshotDetails = indexSnapshotDetails; + } + + private void setStartTime(long startTime) { + this.startTime = startTime; + } + + private void setEndTime(long endTime) { + this.endTime = endTime; + } + + private void setShardStatsBuilder(ShardStatsBuilder shardStatsBuilder) { + this.shardStatsBuilder = shardStatsBuilder; + } + + private void setIncludeGlobalState(Boolean includeGlobalState) { + this.includeGlobalState = includeGlobalState; + } + + private void setUserMetadata(Map userMetadata) { + this.userMetadata = userMetadata; + } + + private void setVersion(int version) { + this.version = version; + } + + private void setShardFailures(List shardFailures) { + this.shardFailures = shardFailures; + } + + public SnapshotInfo build() { + final Snapshot snapshot = new Snapshot(repository, new SnapshotId(snapshotName, snapshotUUID)); + + if (indices == null) { + indices = Collections.emptyList(); + } + + if (dataStreams == null) { + dataStreams = Collections.emptyList(); + } + + if (featureStates == null) { + featureStates = Collections.emptyList(); + } + + if (indexSnapshotDetails == null) { + indexSnapshotDetails = Collections.emptyMap(); + } + + SnapshotState snapshotState = state == null ? null : SnapshotState.valueOf(state); + IndexVersion version = this.version == -1 ? IndexVersion.current() : IndexVersion.fromId(this.version); + + int totalShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.totalShards(); + int successfulShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.successfulShards(); + + if (shardFailures == null) { + shardFailures = new ArrayList<>(); + } + + return new SnapshotInfo( + snapshot, + indices, + dataStreams, + featureStates, + reason, + version, + startTime, + endTime, + totalShards, + successfulShards, + shardFailures, + includeGlobalState, + userMetadata, + snapshotState, + indexSnapshotDetails + ); + } + } + + public static CreateSnapshotResponse createSnapshotResponseFromXContent(XContentParser parser) { + return CREATE_SNAPSHOT_RESPONSE_PARSER.apply(parser, null); + } + + public static SnapshotInfo snapshotInfoFromXContent(XContentParser parser) { + return SNAPSHOT_INFO_PARSER.apply(parser, null).build(); + } +} diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index 729cb8ef47292..f54cd4d4977d7 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotInfoUtils; import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; @@ -194,7 +195,9 @@ public void testCreateSnapshotOnTrigger() { assertThat(req.includeGlobalState(), equalTo(globalState)); try { - return CreateSnapshotResponse.fromXContent(createParser(JsonXContent.jsonXContent, createSnapResponse)); + return SnapshotInfoUtils.createSnapshotResponseFromXContent( + createParser(JsonXContent.jsonXContent, createSnapResponse) + ); } catch (IOException e) { fail("failed to parse snapshot response"); return null; From faf7b7fd382dbe207a69752436971ca59f32ea5d Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 22 Mar 2024 14:50:01 +0100 Subject: [PATCH 20/79] Add more debugging to 25_id_generation/generates a consistent id test (#106667) Include the _ts_routing_hash field in the search response for more debugging. In order to add support for docvalue_fields feature for this field type, a custom DOC_VALUE_FORMAT is needed in order to decode to valid utf8 string. Relates #106550 --- .../test/tsdb/25_id_generation.yml | 3 +++ .../TimeSeriesRoutingHashFieldMapper.java | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml index 04fa2faca209f..621906820e4ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml @@ -110,10 +110,13 @@ generates a consistent id: query: match_all: {} sort: ["@timestamp"] + _source: true + docvalue_fields: [_ts_routing_hash] - match: {hits.total.value: 9} - match: { hits.hits.0._id: cn4excfoxSs_KdA5AAABeRnRFAY } + - match: { hits.hits.0.fields._ts_routing_hash: [ cn4exQ ] } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:50:03.142Z } - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java index 090fe7839b3e9..d5750600a25c9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java @@ -10,6 +10,8 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersions; @@ -20,8 +22,10 @@ import org.elasticsearch.index.fielddata.plain.SortedOrdinalsIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.field.DelegateDocValuesField; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import java.time.ZoneId; import java.util.Base64; import java.util.Collections; @@ -43,6 +47,22 @@ public class TimeSeriesRoutingHashFieldMapper extends MetadataFieldMapper { static final class TimeSeriesRoutingHashFieldType extends MappedFieldType { private static final TimeSeriesRoutingHashFieldType INSTANCE = new TimeSeriesRoutingHashFieldType(); + private static final DocValueFormat DOC_VALUE_FORMAT = new DocValueFormat() { + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) {} + + @Override + public Object format(BytesRef value) { + return Uid.decodeId(value.bytes, value.offset, value.length); + } + + }; private TimeSeriesRoutingHashFieldType() { super(NAME, false, false, true, TextSearchInfo.NONE, Collections.emptyMap()); @@ -75,6 +95,11 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext public Query termQuery(Object value, SearchExecutionContext context) { throw new IllegalArgumentException("[" + NAME + "] is not searchable"); } + + @Override + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { + return DOC_VALUE_FORMAT; + } } private TimeSeriesRoutingHashFieldMapper() { From e92420dc865815995aee526b40dd60c4440a50a7 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 22 Mar 2024 15:28:30 +0100 Subject: [PATCH 21/79] [DOCS] Update cross cluster search compatability matrix (#106677) --- .../ccs-version-compat-matrix.asciidoc | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index bf51042d6adec..5af3c997251dd 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,22 +1,24 @@ -[cols="^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^"] +[cols="^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^"] |==== -| 16+^h| Remote cluster version +| 17+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 |8.7 |8.8 |8.9 |8.10 |8.11 |8.12 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon} -| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} -|==== \ No newline at end of file + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 |8.7 |8.8 |8.9 |8.10 |8.11 |8.12 |8.13 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} +| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} +| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} +| 8.13 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} +|==== + From cc96a8b7e9335682694eea91fb4bd4736a70ecc3 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 22 Mar 2024 16:31:51 +0200 Subject: [PATCH 22/79] Add DownsampleMetrics (#106632) A single instance gets created by the `Downsample` plugin and injected to related actions. --- .../xpack/downsample/Downsample.java | 6 ++ .../xpack/downsample/DownsampleMetrics.java | 74 +++++++++++++++++++ .../downsample/DownsampleShardIndexer.java | 9 ++- ...DownsampleShardPersistentTaskExecutor.java | 14 +++- .../TransportDownsampleIndexerAction.java | 7 +- .../DownsampleActionSingleNodeTests.java | 25 ++++++- 6 files changed, 130 insertions(+), 5 deletions(-) create mode 100644 x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleMetrics.java diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/Downsample.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/Downsample.java index 0d3a784e00e53..a6ba4346b1a25 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/Downsample.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/Downsample.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.core.downsample.DownsampleShardPersistentTaskState; import org.elasticsearch.xpack.core.downsample.DownsampleShardTask; +import java.util.Collection; import java.util.List; import java.util.function.Predicate; import java.util.function.Supplier; @@ -133,4 +134,9 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(PersistentTaskParams.class, DownsampleShardTaskParams.NAME, DownsampleShardTaskParams::new) ); } + + @Override + public Collection createComponents(PluginServices services) { + return List.of(new DownsampleMetrics(services.telemetryProvider().getMeterRegistry())); + } } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleMetrics.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleMetrics.java new file mode 100644 index 0000000000000..576f40a8190f3 --- /dev/null +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleMetrics.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.downsample; + +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.io.IOException; +import java.util.Map; + +/** + * Contains metrics related to downsampling actions. + * It gets initialized as a component by the {@link Downsample} plugin, can be injected to its actions. + * + * In tests, use TestTelemetryPlugin to inject a MeterRegistry for testing purposes + * and check that metrics get recorded as expected. + * + * To add a new metric, you need to: + * - Add a constant for its name, following the naming conventions for metrics. + * - Register it in method {@link #doStart}. + * - Add a function for recording its value. + * - If needed, inject {@link DownsampleMetrics} to the action containing the logic + * that records the metric value. For reference, see {@link TransportDownsampleIndexerAction}. + */ +public class DownsampleMetrics extends AbstractLifecycleComponent { + + public static final String LATENCY_SHARD = "es.tsdb.downsample.latency.shard.histogram"; + + private final MeterRegistry meterRegistry; + + public DownsampleMetrics(MeterRegistry meterRegistry) { + this.meterRegistry = meterRegistry; + } + + @Override + protected void doStart() { + // Register all metrics to track. + meterRegistry.registerLongHistogram(LATENCY_SHARD, "Downsampling action latency per shard", "ms"); + } + + @Override + protected void doStop() {} + + @Override + protected void doClose() throws IOException {} + + enum ShardActionStatus { + + SUCCESS("success"), + MISSING_DOCS("missing_docs"), + FAILED("failed"); + + public static final String NAME = "status"; + + private final String message; + + ShardActionStatus(String message) { + this.message = message; + } + + String getMessage() { + return message; + } + } + + void recordLatencyShard(long durationInMilliSeconds, ShardActionStatus status) { + meterRegistry.getLongHistogram(LATENCY_SHARD).record(durationInMilliSeconds, Map.of(ShardActionStatus.NAME, status.getMessage())); + } +} diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java index 844c644ee9ea6..773dfbe897b50 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java @@ -85,6 +85,7 @@ class DownsampleShardIndexer { public static final ByteSizeValue DOWNSAMPLE_MAX_BYTES_IN_FLIGHT = new ByteSizeValue(50, ByteSizeUnit.MB); private final IndexShard indexShard; private final Client client; + private final DownsampleMetrics downsampleMetrics; private final String downsampleIndex; private final Engine.Searcher searcher; private final SearchExecutionContext searchExecutionContext; @@ -103,6 +104,7 @@ class DownsampleShardIndexer { final DownsampleShardTask task, final Client client, final IndexService indexService, + final DownsampleMetrics downsampleMetrics, final ShardId shardId, final String downsampleIndex, final DownsampleConfig config, @@ -113,6 +115,7 @@ class DownsampleShardIndexer { ) { this.task = task; this.client = client; + this.downsampleMetrics = downsampleMetrics; this.indexShard = indexService.getShard(shardId.id()); this.downsampleIndex = downsampleIndex; this.searcher = indexShard.acquireSearcher("downsampling"); @@ -164,6 +167,7 @@ public DownsampleIndexerAction.ShardDownsampleResponse execute() throws IOExcept timeSeriesSearcher.search(initialStateQuery, bucketCollector); } + TimeValue duration = TimeValue.timeValueMillis(client.threadPool().relativeTimeInMillis() - startTime); logger.info( "Shard [{}] successfully sent [{}], received source doc [{}], indexed downsampled doc [{}], failed [{}], took [{}]", indexShard.shardId(), @@ -171,7 +175,7 @@ public DownsampleIndexerAction.ShardDownsampleResponse execute() throws IOExcept task.getNumSent(), task.getNumIndexed(), task.getNumFailed(), - TimeValue.timeValueMillis(client.threadPool().relativeTimeInMillis() - startTime) + duration ); if (task.getNumIndexed() != task.getNumSent()) { @@ -187,6 +191,7 @@ public DownsampleIndexerAction.ShardDownsampleResponse execute() throws IOExcept + task.getNumSent() + "]"; logger.info(error); + downsampleMetrics.recordLatencyShard(duration.millis(), DownsampleMetrics.ShardActionStatus.MISSING_DOCS); throw new DownsampleShardIndexerException(error, false); } @@ -199,6 +204,7 @@ public DownsampleIndexerAction.ShardDownsampleResponse execute() throws IOExcept + task.getNumFailed() + "]"; logger.info(error); + downsampleMetrics.recordLatencyShard(duration.millis(), DownsampleMetrics.ShardActionStatus.FAILED); throw new DownsampleShardIndexerException(error, false); } @@ -208,6 +214,7 @@ public DownsampleIndexerAction.ShardDownsampleResponse execute() throws IOExcept ActionListener.noop() ); logger.info("Downsampling task [" + task.getPersistentTaskId() + " on shard " + indexShard.shardId() + " completed"); + downsampleMetrics.recordLatencyShard(duration.millis(), DownsampleMetrics.ShardActionStatus.SUCCESS); return new DownsampleIndexerAction.ShardDownsampleResponse(indexShard.shardId(), task.getNumIndexed()); } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java index b4116d42d25ca..5e6f8b6b5b18e 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java @@ -188,6 +188,7 @@ private static IndexShardRoutingTable findShardRoutingTable(ShardId shardId, Clu static void realNodeOperation( Client client, IndicesService indicesService, + DownsampleMetrics downsampleMetrics, DownsampleShardTask task, DownsampleShardTaskParams params, BytesRef lastDownsampledTsid @@ -209,6 +210,7 @@ protected void doRun() throws Exception { task, client, indicesService.indexServiceSafe(params.shardId().getIndex()), + downsampleMetrics, params.shardId(), params.downsampleIndex(), params.downsampleConfig(), @@ -303,17 +305,25 @@ public static class TA extends TransportAction { private final Client client; private final IndicesService indicesService; + private final DownsampleMetrics downsampleMetrics; @Inject - public TA(TransportService transportService, ActionFilters actionFilters, Client client, IndicesService indicesService) { + public TA( + TransportService transportService, + ActionFilters actionFilters, + Client client, + IndicesService indicesService, + DownsampleMetrics downsampleMetrics + ) { super(NAME, actionFilters, transportService.getTaskManager()); this.client = client; this.indicesService = indicesService; + this.downsampleMetrics = downsampleMetrics; } @Override protected void doExecute(Task t, Request request, ActionListener listener) { - realNodeOperation(client, indicesService, request.task, request.params, request.lastDownsampleTsid); + realNodeOperation(client, indicesService, downsampleMetrics, request.task, request.params, request.lastDownsampleTsid); listener.onResponse(ActionResponse.Empty.INSTANCE); } } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleIndexerAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleIndexerAction.java index 24d1df638f80b..f7cfe2d859583 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleIndexerAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleIndexerAction.java @@ -52,6 +52,8 @@ public class TransportDownsampleIndexerAction extends TransportBroadcastAction< private final ClusterService clusterService; private final IndicesService indicesService; + private final DownsampleMetrics downsampleMetrics; + @Inject public TransportDownsampleIndexerAction( Client client, @@ -59,7 +61,8 @@ public TransportDownsampleIndexerAction( TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + DownsampleMetrics downsampleMetrics ) { super( DownsampleIndexerAction.NAME, @@ -74,6 +77,7 @@ public TransportDownsampleIndexerAction( this.client = new OriginSettingClient(client, ClientHelper.ROLLUP_ORIGIN); this.clusterService = clusterService; this.indicesService = indicesService; + this.downsampleMetrics = downsampleMetrics; } @Override @@ -139,6 +143,7 @@ protected DownsampleIndexerAction.ShardDownsampleResponse shardOperation( (DownsampleShardTask) task, client, indexService, + downsampleMetrics, request.shardId(), request.getDownsampleIndex(), request.getRollupConfig(), diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index a7b36bbd7dc9b..4c5fdc23e04f9 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -57,6 +57,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchResponseUtils; @@ -80,6 +81,8 @@ import org.elasticsearch.tasks.TaskCancelHelper; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -93,6 +96,7 @@ import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; @@ -162,7 +166,8 @@ protected Collection> getPlugins() { Downsample.class, AggregateMetricMapperPlugin.class, DataStreamsPlugin.class, - IndexLifecycle.class + IndexLifecycle.class, + TestTelemetryPlugin.class ); } @@ -623,6 +628,7 @@ public void testCancelDownsampleIndexer() throws IOException { task, client(), indexService, + getInstanceFromNode(DownsampleMetrics.class), shard.shardId(), downsampleIndex, config, @@ -672,6 +678,7 @@ public void testDownsampleBulkFailed() throws IOException { task, client(), indexService, + getInstanceFromNode(DownsampleMetrics.class), shard.shardId(), downsampleIndex, config, @@ -739,6 +746,7 @@ public void testTooManyBytesInFlight() throws IOException { task, client(), indexService, + getInstanceFromNode(DownsampleMetrics.class), shard.shardId(), downsampleIndex, config, @@ -791,6 +799,7 @@ public void testDownsampleStats() throws IOException { task, client(), indexService, + getInstanceFromNode(DownsampleMetrics.class), shard.shardId(), downsampleIndex, config, @@ -810,6 +819,18 @@ public void testDownsampleStats() throws IOException { assertDownsampleIndexer(indexService, shardNum, task, executeResponse, task.getTotalShardDocCount()); } + + // Check that metrics get collected as expected. + final TestTelemetryPlugin plugin = getInstanceFromNode(PluginsService.class).filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + List measurements = plugin.getLongHistogramMeasurement(DownsampleMetrics.LATENCY_SHARD); + assertFalse(measurements.isEmpty()); + for (Measurement measurement : measurements) { + assertTrue(measurement.value().toString(), measurement.value().longValue() >= 0 && measurement.value().longValue() < 1000_000); + assertEquals(1, measurement.attributes().size()); + assertThat(measurement.attributes().get("status"), Matchers.in(List.of("success", "failed", "missing_docs"))); + } } public void testResumeDownsample() throws IOException { @@ -848,6 +869,7 @@ public void testResumeDownsample() throws IOException { task, client(), indexService, + getInstanceFromNode(DownsampleMetrics.class), shard.shardId(), downsampleIndex, config, @@ -923,6 +945,7 @@ public void testResumeDownsamplePartial() throws IOException { task, client(), indexService, + getInstanceFromNode(DownsampleMetrics.class), shard.shardId(), downsampleIndex, config, From bdce52ebf768658f355168c38fdf9e342389c94b Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 22 Mar 2024 15:33:32 +0100 Subject: [PATCH 23/79] Assert that FsBlobContainer.readBlob does not start reading after file length (#106668) This change adds an assertion in the FsBlobContainer.readBlob to ensure we are not reading after the last byte of the file. While this is legal and documented in the SeekableByteChannel.position() API, having this assertion in place would have caught on CI some regression introduced recently and only caught on deployments where S3 rejects reads starting after blob length. --- .../org/elasticsearch/common/blobstore/fs/FsBlobContainer.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index e40ca70460b13..749773cd91eb8 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -197,6 +197,7 @@ public InputStream readBlob(OperationPurpose purpose, String blobName, long posi assert BlobContainer.assertPurposeConsistency(purpose, blobName); final SeekableByteChannel channel = Files.newByteChannel(path.resolve(blobName)); if (position > 0L) { + assert position < channel.size() : "reading from " + position + " exceeds file length " + channel.size(); channel.position(position); } assert channel.position() == position; From 0eca8dda92d0b680d9549b2f387677f5f09adf01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20R=C3=BChsen?= Date: Fri, 22 Mar 2024 15:53:39 +0100 Subject: [PATCH 24/79] [Profiling] Switch to OTEL cloud.provider, cloud.region, host.type (#106656) * [Profiling] Switch to OTEL cloud.provider, cloud.region, host.type * Remove wrong spaces from auto-format --- .../component-template/profiling-hosts.json | 25 ++++++-- .../resources/data/profiling-hosts.ndjson | 4 +- .../xpack/profiling/InstanceType.java | 64 ++++++++++++------- .../ProfilingIndexTemplateRegistry.java | 5 +- .../xpack/profiling/HostMetadataTests.java | 60 +++++++++++++++-- .../rest-api-spec/test/profiling/10_basic.yml | 2 +- 6 files changed, 123 insertions(+), 37 deletions(-) diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json index f633a8f0cbdb5..353411ed80b2e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json @@ -33,11 +33,28 @@ "type": "date", "format": "epoch_second" }, - "host.id": { - "type": "keyword" + "host": { + "properties": { + "arch": { + "type": "keyword" + }, + "id": { + "type": "keyword" + }, + "type": { + "type": "keyword" + } + } }, - "host.arch": { - "type": "keyword" + "cloud": { + "properties": { + "provider": { + "type": "keyword" + }, + "region": { + "type": "keyword" + } + } }, "profiling": { "properties": { diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson index e164f49c4f685..e12a670a79d18 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson @@ -1,4 +1,4 @@ {"create": {"_index": "profiling-hosts","_id":"eLH27YsBj2lLi3tJYlvr"}} -{"profiling.project.id":100,"host.id":"8457605156473051743","@timestamp":1700504426,"ecs.version":"1.12.0","profiling.agent.build_timestamp":1688111067,"profiling.instance.private_ipv4s":["192.168.1.2"],"ec2.instance_life_cycle":"on-demand","profiling.agent.config.map_scale_factor":0,"ec2.instance_type":"i3.2xlarge","profiling.host.ip":"192.168.1.2","profiling.agent.config.bpf_log_level":0,"profiling.host.sysctl.net.core.bpf_jit_enable":1,"profiling.agent.config.file":"/etc/prodfiler/prodfiler.conf","ec2.local_ipv4":"192.168.1.2","profiling.agent.config.no_kernel_version_check":false,"host.arch":"amd64","profiling.host.tags":["cloud_provider:aws","cloud_environment:qa","cloud_region:eu-west-1"],"profiling.agent.config.probabilistic_threshold":100,"profiling.agent.config.disable_tls":false,"profiling.agent.config.tracers":"all","profiling.agent.start_time":1700090045589,"profiling.agent.config.max_elements_per_interval":800,"ec2.placement.region":"eu-west-1","profiling.agent.config.present_cpu_cores":8,"profiling.host.kernel_version":"9.9.9-0-aws","profiling.agent.config.bpf_log_size":65536,"profiling.agent.config.known_traces_entries":65536,"profiling.host.sysctl.kernel.unprivileged_bpf_disabled":1,"profiling.agent.config.verbose":false,"profiling.agent.config.probabilistic_interval":"1m0s","ec2.placement.availability_zone_id":"euw1-az1","ec2.security_groups":"","ec2.local_hostname":"ip-192-168-1-2.eu-west-1.compute.internal","ec2.placement.availability_zone":"eu-west-1c","profiling.agent.config.upload_symbols":false,"profiling.host.sysctl.kernel.bpf_stats_enabled":0,"profiling.host.name":"ip-192-168-1-2","ec2.mac":"00:11:22:33:44:55","profiling.host.kernel_proc_version":"Linux version 9.9.9-0-aws","profiling.agent.config.cache_directory":"/var/cache/optimyze/","profiling.agent.version":"v8.12.0","ec2.hostname":"ip-192-168-1-2.eu-west-1.compute.internal","profiling.agent.config.elastic_mode":false,"ec2.ami_id":"ami-aaaaaaaaaaa","ec2.instance_id":"i-0b999999999999999"} +{"profiling.project.id":100,"host.id":"8457605156473051743","@timestamp":1700504426,"ecs.version":"1.12.0","profiling.agent.build_timestamp":1688111067,"profiling.instance.private_ipv4s":["192.168.1.2"],"ec2.instance_life_cycle":"on-demand","profiling.agent.config.map_scale_factor":0,"host.type":"i3.2xlarge","profiling.host.ip":"192.168.1.2","profiling.agent.config.bpf_log_level":0,"profiling.host.sysctl.net.core.bpf_jit_enable":1,"profiling.agent.config.file":"/etc/prodfiler/prodfiler.conf","ec2.local_ipv4":"192.168.1.2","profiling.agent.config.no_kernel_version_check":false,"host.arch":"amd64","profiling.host.tags":["cloud_provider:aws","cloud_environment:qa","cloud_region:eu-west-1"],"profiling.agent.config.probabilistic_threshold":100,"profiling.agent.config.disable_tls":false,"profiling.agent.config.tracers":"all","profiling.agent.start_time":1700090045589,"profiling.agent.config.max_elements_per_interval":800,"cloud.provider":"aws","cloud.region":"eu-west-1","profiling.agent.config.present_cpu_cores":8,"profiling.host.kernel_version":"9.9.9-0-aws","profiling.agent.config.bpf_log_size":65536,"profiling.agent.config.known_traces_entries":65536,"profiling.host.sysctl.kernel.unprivileged_bpf_disabled":1,"profiling.agent.config.verbose":false,"profiling.agent.config.probabilistic_interval":"1m0s","ec2.placement.availability_zone_id":"euw1-az1","ec2.security_groups":"","ec2.local_hostname":"ip-192-168-1-2.eu-west-1.compute.internal","ec2.placement.availability_zone":"eu-west-1c","profiling.agent.config.upload_symbols":false,"profiling.host.sysctl.kernel.bpf_stats_enabled":0,"profiling.host.name":"ip-192-168-1-2","ec2.mac":"00:11:22:33:44:55","profiling.host.kernel_proc_version":"Linux version 9.9.9-0-aws","profiling.agent.config.cache_directory":"/var/cache/optimyze/","profiling.agent.version":"v8.12.0","ec2.hostname":"ip-192-168-1-2.eu-west-1.compute.internal","profiling.agent.config.elastic_mode":false,"ec2.ami_id":"ami-aaaaaaaaaaa","ec2.instance_id":"i-0b999999999999999"} {"create": {"_index": "profiling-hosts", "_id": "u_fHlYwBkmZvQ6tVo1Lr"}} -{"profiling.project.id":100,"host.id":"7416508186220657211","@timestamp":1703319912,"ecs.version":"1.12.0","profiling.agent.version":"8.11.0","profiling.agent.config.map_scale_factor":0,"profiling.agent.config.probabilistic_threshold":100,"profiling.host.name":"ip-192-186-1-3","profiling.agent.config.no_kernel_version_check":false,"profiling.host.sysctl.net.core.bpf_jit_enable":1,"profiling.agent.config.elastic_mode":false,"azure.compute.vmsize":"Standard_D4s_v3","azure.compute.environment":"AzurePublicCloud","profiling.agent.config.bpf_log_level":0,"profiling.agent.config.known_traces_entries":65536,"profiling.agent.config.ca_address":"example.com:443","profiling.agent.config.tags":"cloud_provider:azure;cloud_environment:qa;cloud_region:eastus2","profiling.host.tags":["cloud_provider:azure","cloud_environment:qa","cloud_region:eastus2"],"profiling.host.kernel_version":"9.9.9-0-azure","profiling.agent.revision":"head-52cc2030","azure.compute.subscriptionid":"1-2-3-4-5","profiling.host.sysctl.kernel.bpf_stats_enabled":0,"host.arch":"amd64","azure.compute.zone":"3","profiling.agent.config.cache_directory":"/var/cache/Elastic/universal-profiling","azure.compute.name":"example-qa-eastus2-001-v1-zone3_6","profiling.agent.config.probabilistic_interval":"1m0s","azure.compute.location":"eastus2","azure.compute.version":"1234.20230510.233254","profiling.instance.private_ipv4s":["192.168.1.3"],"profiling.agent.build_timestamp":1699000836,"profiling.agent.config.file":"/etc/Elastic/universal-profiling/pf-host-agent.conf","profiling.agent.config.bpf_log_size":65536,"profiling.host.sysctl.kernel.unprivileged_bpf_disabled":1,"profiling.agent.config.tracers":"all","profiling.agent.config.present_cpu_cores":4,"profiling.agent.start_time":1702306987358,"profiling.agent.config.disable_tls":false,"azure.compute.ostype":"Linux","profiling.host.ip":"192.168.1.3","profiling.agent.config.max_elements_per_interval":400,"profiling.agent.config.upload_symbols":false,"azure.compute.tags":"bootstrap-version:v1;ece-id:001;environment:qa;identifier:v1;initial-config:;managed-by:terraform;monitored-by:core-infrastructure;owner:core-infrastructure;region_type:ess;role:blueprint;secondary_role:;vars-identifier:eastus2-001-v1","profiling.host.kernel_proc_version":"Linux version 9.9.9-0-azure","profiling.agent.config.verbose":false,"azure.compute.vmid":"1-2-3-4-5"} +{"profiling.project.id":100,"host.id":"7416508186220657211","@timestamp":1703319912,"ecs.version":"1.12.0","profiling.agent.version":"8.11.0","profiling.agent.config.map_scale_factor":0,"profiling.agent.config.probabilistic_threshold":100,"profiling.host.name":"ip-192-186-1-3","profiling.agent.config.no_kernel_version_check":false,"profiling.host.sysctl.net.core.bpf_jit_enable":1,"profiling.agent.config.elastic_mode":false,"host.type":"Standard_D4s_v3","azure.compute.environment":"AzurePublicCloud","profiling.agent.config.bpf_log_level":0,"profiling.agent.config.known_traces_entries":65536,"profiling.agent.config.ca_address":"example.com:443","profiling.agent.config.tags":"cloud_provider:azure;cloud_environment:qa;cloud_region:eastus2","profiling.host.tags":["cloud_provider:azure","cloud_environment:qa","cloud_region:eastus2"],"profiling.host.kernel_version":"9.9.9-0-azure","profiling.agent.revision":"head-52cc2030","azure.compute.subscriptionid":"1-2-3-4-5","profiling.host.sysctl.kernel.bpf_stats_enabled":0,"host.arch":"amd64","azure.compute.zone":"3","profiling.agent.config.cache_directory":"/var/cache/Elastic/universal-profiling","azure.compute.name":"example-qa-eastus2-001-v1-zone3_6","profiling.agent.config.probabilistic_interval":"1m0s","cloud.provider":"azure","cloud.region":"eastus2","azure.compute.version":"1234.20230510.233254","profiling.instance.private_ipv4s":["192.168.1.3"],"profiling.agent.build_timestamp":1699000836,"profiling.agent.config.file":"/etc/Elastic/universal-profiling/pf-host-agent.conf","profiling.agent.config.bpf_log_size":65536,"profiling.host.sysctl.kernel.unprivileged_bpf_disabled":1,"profiling.agent.config.tracers":"all","profiling.agent.config.present_cpu_cores":4,"profiling.agent.start_time":1702306987358,"profiling.agent.config.disable_tls":false,"azure.compute.ostype":"Linux","profiling.host.ip":"192.168.1.3","profiling.agent.config.max_elements_per_interval":400,"profiling.agent.config.upload_symbols":false,"azure.compute.tags":"bootstrap-version:v1;ece-id:001;environment:qa;identifier:v1;initial-config:;managed-by:terraform;monitored-by:core-infrastructure;owner:core-infrastructure;region_type:ess;role:blueprint;secondary_role:;vars-identifier:eastus2-001-v1","profiling.host.kernel_proc_version":"Linux version 9.9.9-0-azure","profiling.agent.config.verbose":false,"azure.compute.vmid":"1-2-3-4-5"} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java index ee649e381c85d..3aa0a79df13bc 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java @@ -35,6 +35,45 @@ final class InstanceType implements ToXContentObject { * @return the {@link InstanceType} */ public static InstanceType fromHostSource(Map source) { + String provider = (String) source.get("cloud.provider"); + if (provider != null) { + String region = (String) source.get("cloud.region"); + String instanceType = (String) source.get("host.type"); + return new InstanceType(provider, region, instanceType); + } + + // Check and handle pre-8.14.0 host sources for backwards-compatibility. + InstanceType instanceType = fromObsoleteHostSource(source); + if (instanceType != null) { + return instanceType; + } + + // Support for configured tags (ECS). + // Example of tags: + // "profiling.host.tags": [ + // "cloud_provider:aws", + // "cloud_environment:qa", + // "cloud_region:eu-west-1", + // ], + String region = null; + List tags = listOf(source.get("profiling.host.tags")); + for (String tag : tags) { + String[] kv = tag.toLowerCase(Locale.ROOT).split(":", 2); + if (kv.length != 2) { + continue; + } + if ("cloud_provider".equals(kv[0])) { + provider = kv[1]; + } + if ("cloud_region".equals(kv[0])) { + region = kv[1]; + } + } + + return new InstanceType(provider, region, null); + } + + private static InstanceType fromObsoleteHostSource(Map source) { // Check and handle AWS. String region = (String) source.get("ec2.placement.region"); if (region != null) { @@ -67,30 +106,7 @@ public static InstanceType fromHostSource(Map source) { return new InstanceType("azure", region, instanceType); } - // Support for configured tags (ECS). - // Example of tags: - // "profiling.host.tags": [ - // "cloud_provider:aws", - // "cloud_environment:qa", - // "cloud_region:eu-west-1", - // ], - String provider = null; - region = null; - List tags = listOf(source.get("profiling.host.tags")); - for (String tag : tags) { - String[] kv = tag.toLowerCase(Locale.ROOT).split(":", 2); - if (kv.length != 2) { - continue; - } - if ("cloud_provider".equals(kv[0])) { - provider = kv[1]; - } - if ("cloud_region".equals(kv[0])) { - region = kv[1]; - } - } - - return new InstanceType(provider, region, null); + return null; } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java index 738c06fa310a9..e1698e71afab2 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java @@ -47,13 +47,14 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 4: Added 'service.name' keyword mapping to profiling-events // version 5: Add optional component template '@custom' to all index templates that reference component templates // version 6: Added 'host.arch' keyword mapping to profiling-hosts - public static final int INDEX_TEMPLATE_VERSION = 6; + // version 7: Added 'host.type', 'cloud.provider', 'cloud.region' keyword mappings to profiling-hosts + public static final int INDEX_TEMPLATE_VERSION = 7; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index public static final int PROFILING_EVENTS_VERSION = 2; public static final int PROFILING_EXECUTABLES_VERSION = 1; public static final int PROFILING_METRICS_VERSION = 1; - public static final int PROFILING_HOSTS_VERSION = 1; + public static final int PROFILING_HOSTS_VERSION = 2; public static final int PROFILING_STACKFRAMES_VERSION = 1; public static final int PROFILING_STACKTRACES_VERSION = 1; public static final int PROFILING_SYMBOLS_VERSION = 1; diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java index de32754ed69ff..5c24e295909bc 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java @@ -14,6 +14,32 @@ public class HostMetadataTests extends ESTestCase { public void testCreateFromSourceAWS() { + final String hostID = "1440256254710195396"; + final String arch = "amd64"; + final String provider = "aws"; + final String region = "eu-west-1"; + final String instanceType = "md5x.large"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource ( + Map.of ( + "host.id", hostID, + "host.arch", arch, + "host.type", instanceType, + "cloud.provider", provider, + "cloud.region", region + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(arch, host.hostArchitecture); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals(instanceType, host.instanceType.name); + } + + public void testCreateFromSourceAWSCompat() { final String hostID = "1440256254710195396"; final String arch = "x86_64"; final String provider = "aws"; @@ -21,8 +47,8 @@ public void testCreateFromSourceAWS() { final String instanceType = "md5x.large"; // tag::noformat - HostMetadata host = HostMetadata.fromSource( - Map.of( + HostMetadata host = HostMetadata.fromSource ( + Map.of ( "host.id", hostID, "host.arch", arch, "ec2.instance_type", instanceType, @@ -39,6 +65,32 @@ public void testCreateFromSourceAWS() { } public void testCreateFromSourceGCP() { + final String hostID = "1440256254710195396"; + final String arch = "amd64"; + final String provider = "gcp"; + final String[] regions = { "", "", "europe-west1", "europewest", "europe-west1" }; + + for (String region : regions) { + // tag::noformat + HostMetadata host = HostMetadata.fromSource ( + Map.of ( + "host.id", hostID, + "host.arch", arch, + "cloud.provider", provider, + "cloud.region", region + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(arch, host.hostArchitecture); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals("", host.instanceType.name); + } + } + + public void testCreateFromSourceGCPCompat() { final String hostID = "1440256254710195396"; final String arch = "x86_64"; final String provider = "gcp"; @@ -142,8 +194,8 @@ public void testCreateFromSourceECS() { Map.of( "host.id", hostID, "host.arch", arch, - "profiling.host.tags", Arrays.asList( - "cloud_provider:"+provider, "cloud_environment:qa", "cloud_region:"+region) + "profiling.host.tags", Arrays.asList ( + "cloud_provider:" + provider, "cloud_environment:qa", "cloud_region:" + region) ) ); // end::noformat diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml index 4697141bfc599..cc282d26ae418 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -116,7 +116,7 @@ setup: - {"create": {"_index": "profiling-executables", "_id": "lHp5_WAgpLy2alrUVab6HA"}} - {"@timestamp": "1698624000", "Executable": {"build": {"id": "c5f89ea1c68710d2a493bb604c343a92c4f8ddeb"}, "file": {"name": "vmlinux"}}, "Symbolization": {"next_time": "4852491791"}, "ecs": {"version": "1.12.0"}} - {"create": {"_index": "profiling-hosts", "_id": "eLH27YsBj2lLi3tJYlvr"}} - - {"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "host.arch": "amd64", "profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } + - {"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "host.type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "host.arch": "amd64", "profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "cloud.provider": "aws", "cloud.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } - {"index": {"_index": "test-events"}} - {"@timestamp": "1700504427", "events": ["S07KmaoGhvNte78xwwRbZQ"]} --- From fb17da0647b9dffe00449587dc80171eb14c9a35 Mon Sep 17 00:00:00 2001 From: yashdamani Date: Fri, 22 Mar 2024 20:36:59 +0530 Subject: [PATCH 25/79] Don't trim highlight snippet if number_of_fragments is 0 Don't trim highlight snippet if number_of_fragments is 0 Closes #101803 --- docs/changelog/106306.yaml | 6 ++ .../test/search/510_fragment_trimming_fix.yml | 62 +++++++++++++++++++ .../uhighlight/CustomPassageFormatter.java | 12 +++- .../highlight/DefaultHighlighter.java | 7 ++- .../CustomPassageFormatterTests.java | 4 +- .../CustomUnifiedHighlighterTests.java | 2 +- 6 files changed, 86 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/106306.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/510_fragment_trimming_fix.yml diff --git a/docs/changelog/106306.yaml b/docs/changelog/106306.yaml new file mode 100644 index 0000000000000..571fe73c31a3e --- /dev/null +++ b/docs/changelog/106306.yaml @@ -0,0 +1,6 @@ +pr: 99961 +summary: "added fix for inconsistent text trimming in Unified Highlighter" +area: Highlighting +type: bug +issues: + - 101803 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/510_fragment_trimming_fix.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/510_fragment_trimming_fix.yml new file mode 100644 index 0000000000000..355ffeebfb1d3 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/510_fragment_trimming_fix.yml @@ -0,0 +1,62 @@ +setup: + - skip: + version: ' - 8.13.99' + reason: 'no trimming highlight snippets when number_of_fragments is 0 was introduced in 8.14' + - do: + indices.create: + index: test_trim + body: + mappings: + properties: + text: + type: text + analyzer: whitespace + + - do: + bulk: + refresh: true + body: + - index: + _index: test_trim + _id: 1 + - text: " a b c d " + +--- +"Test unified highlighter with custom passage formatter and number_of_fragments > 0": + - do: + search: + index: test_trim + body: + query: + match: + text: "c" + highlight: + type: unified + number_of_fragments: 1 + fields: + text: + pre_tags: ["("] + post_tags: [")"] + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.highlight.text: ["a b (c) d"] } + +--- +"Test unified highlighter with custom passage formatter when number_of_fragments = 0": + - do: + search: + index: test_trim + body: + query: + match: + text: "c" + highlight: + type: unified + number_of_fragments: 0 + fields: + text: + pre_tags: ["("] + post_tags: [")"] + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.highlight.text: [" a b (c) d "] } diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java index 6ae2f53a94ad8..41a68494e7cbb 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java @@ -23,11 +23,13 @@ public class CustomPassageFormatter extends PassageFormatter { private final String preTag; private final String postTag; private final Encoder encoder; + private final int numberOfFragments; - public CustomPassageFormatter(String preTag, String postTag, Encoder encoder) { + public CustomPassageFormatter(String preTag, String postTag, Encoder encoder, int numberOfFragments) { this.preTag = preTag; this.postTag = postTag; this.encoder = encoder; + this.numberOfFragments = numberOfFragments; } @Override @@ -66,8 +68,12 @@ public Snippet[] format(Passage[] passages, String content) { } else if (sb.charAt(sb.length() - 1) == HighlightUtils.NULL_SEPARATOR) { sb.deleteCharAt(sb.length() - 1); } - // and we trim the snippets too - snippets[j] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0); + // and we trim the snippets too, if the number of fragments > 0 + if (numberOfFragments == 0) { + snippets[j] = new Snippet(sb.toString(), passage.getScore(), passage.getNumMatches() > 0); + } else { + snippets[j] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0); + } } return snippets; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java index e77436ba61423..da1be48e6b2c0 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java @@ -161,7 +161,12 @@ CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) { } protected PassageFormatter getPassageFormatter(SearchHighlightContext.Field field, Encoder encoder) { - return new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder); + return new CustomPassageFormatter( + field.fieldOptions().preTags()[0], + field.fieldOptions().postTags()[0], + encoder, + field.fieldOptions().numberOfFragments() + ); } protected Analyzer wrapAnalyzer(Analyzer analyzer, Integer maxAnalyzedOffset) { diff --git a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatterTests.java b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatterTests.java index c1ecaf12828d3..10db924f25f4b 100644 --- a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatterTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatterTests.java @@ -21,7 +21,7 @@ public class CustomPassageFormatterTests extends ESTestCase { public void testSimpleFormat() { String content = "This is a really cool highlighter. Unified highlighter gives nice snippets back. No matches here."; - CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new DefaultEncoder()); + CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new DefaultEncoder(), 3); Passage[] passages = new Passage[3]; String match = "highlighter"; @@ -62,7 +62,7 @@ public void testSimpleFormat() { public void testHtmlEncodeFormat() { String content = "This is a really cool highlighter. Unified highlighter gives nice snippets back."; - CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new SimpleHTMLEncoder()); + CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new SimpleHTMLEncoder(), 3); Passage[] passages = new Passage[2]; String match = "highlighter"; diff --git a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index bf249ba4409ab..8412cc241f51a 100644 --- a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -145,7 +145,7 @@ private void assertHighlightOneDoc( UnifiedHighlighter.Builder builder = UnifiedHighlighter.builder(searcher, analyzer); builder.withBreakIterator(() -> breakIterator); builder.withFieldMatcher(name -> "text".equals(name)); - builder.withFormatter(new CustomPassageFormatter("", "", new DefaultEncoder())); + builder.withFormatter(new CustomPassageFormatter("", "", new DefaultEncoder(), 3)); CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter( builder, offsetSource, From cbc418235abc81c44b8809b971584f53a823629b Mon Sep 17 00:00:00 2001 From: Fernando Briano Date: Fri, 22 Mar 2024 15:25:38 +0000 Subject: [PATCH 26/79] Updates REST API test issue69009.yml (#106663) --- .../rest-api-spec/test/search.highlight/issue69009.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/issue69009.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/issue69009.yml index 8b2f2f90dd0ee..cd3751dbb9653 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/issue69009.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/issue69009.yml @@ -29,7 +29,7 @@ setup: search: index: test body: { - "query": { "match": { "fox" } }, + "query": { "match": { "text": "fox" } }, "highlight": { "type": "unified", "fields": { "*": { } }, @@ -45,7 +45,7 @@ setup: search: index: test body: { - "query": { "match": { "fox" } }, + "query": { "match": { "text": "fox" } }, "highlight": { "type": "plain", "fields": { "*": { } }, @@ -61,7 +61,7 @@ setup: search: index: test body: { - "query": { "match": { "fox" } }, + "query": { "match": { "text": "fox" } }, "highlight": { "type": "fvh", "fields": { "*": { } }, From ecb422380fddbb3ea024c64405d8bc529c982d4f Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Fri, 22 Mar 2024 12:48:00 -0400 Subject: [PATCH 27/79] [ESQL] Migrate PropagateEquals optimization (#106627) Relates to #105217 This copies the PropagateEquals logical optimization into ESQL, following the pattern established in #106499. I've copied the optimization rule into the ESQL version of OptimizerRules, and the tests into OpitmizerRulesTests, and changed the imports &c to point to the appropriate ESQL classes instead of their QL counterparts. I expect to have several more PRs following this pattern, for the remaining logical optimizations that touch the binary comparison logic. I'm intending to make separate PRs for each, in the interest of making them easier to review. --- .../esql/optimizer/LogicalPlanOptimizer.java | 3 +- .../xpack/esql/optimizer/OptimizerRules.java | 342 ++++++++++++++++ .../esql/optimizer/OptimizerRulesTests.java | 383 +++++++++++++++++- 3 files changed, 715 insertions(+), 13 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 2879173a6f5ad..af8ad7a1fc435 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -82,7 +82,6 @@ import static java.util.Collections.singleton; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputExpressions; import static org.elasticsearch.xpack.ql.expression.Expressions.asAttributes; -import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateEquals; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.DOWN; @@ -126,7 +125,7 @@ protected static Batch operators() { new BooleanSimplification(), new LiteralsOnTheRight(), // needs to occur before BinaryComparison combinations (see class) - new PropagateEquals(), + new org.elasticsearch.xpack.esql.optimizer.OptimizerRules.PropagateEquals(), new PropagateNullable(), new BooleanFunctionEqualsElimination(), new org.elasticsearch.xpack.esql.optimizer.OptimizerRules.CombineDisjunctionsToIn(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index e375f11ab3ae7..3ae662580a200 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -8,7 +8,13 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; @@ -34,14 +40,23 @@ import org.elasticsearch.xpack.ql.expression.AttributeSet; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; +import org.elasticsearch.xpack.ql.expression.predicate.Range; +import org.elasticsearch.xpack.ql.expression.predicate.logical.And; +import org.elasticsearch.xpack.ql.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.plan.QueryPlan; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.util.CollectionUtils; import java.time.ZoneId; import java.util.ArrayList; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -50,6 +65,7 @@ import java.util.Set; import static org.elasticsearch.xpack.ql.common.Failure.fail; +import static org.elasticsearch.xpack.ql.expression.Literal.TRUE; import static org.elasticsearch.xpack.ql.expression.predicate.Predicates.combineOr; import static org.elasticsearch.xpack.ql.expression.predicate.Predicates.splitOr; @@ -240,4 +256,330 @@ protected Expression rule(Or or) { return e; } } + + /** + * Propagate Equals to eliminate conjuncted Ranges or BinaryComparisons. + * When encountering a different Equals, non-containing {@link Range} or {@link BinaryComparison}, the conjunction becomes false. + * When encountering a containing {@link Range}, {@link BinaryComparison} or {@link NotEquals}, these get eliminated by the equality. + * + * Since this rule can eliminate Ranges and BinaryComparisons, it should be applied before + * {@link org.elasticsearch.xpack.ql.optimizer.OptimizerRules.CombineBinaryComparisons}. + * + * This rule doesn't perform any promotion of {@link BinaryComparison}s, that is handled by + * {@link org.elasticsearch.xpack.ql.optimizer.OptimizerRules.CombineBinaryComparisons} on purpose as the resulting Range might be + * foldable (which is picked by the folding rule on the next run). + */ + public static final class PropagateEquals extends org.elasticsearch.xpack.ql.optimizer.OptimizerRules.OptimizerExpressionRule< + BinaryLogic> { + + PropagateEquals() { + super(org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.DOWN); + } + + public Expression rule(BinaryLogic e) { + if (e instanceof And) { + return propagate((And) e); + } else if (e instanceof Or) { + return propagate((Or) e); + } + return e; + } + + // combine conjunction + private static Expression propagate(And and) { + List ranges = new ArrayList<>(); + // Only equalities, not-equalities and inequalities with a foldable .right are extracted separately; + // the others go into the general 'exps'. + // TODO: In 105217, this should change to EsqlBinaryComparison, but it doesn't exist in this branch yet + List equals = new ArrayList<>(); + List notEquals = new ArrayList<>(); + List inequalities = new ArrayList<>(); + List exps = new ArrayList<>(); + + boolean changed = false; + + for (Expression ex : Predicates.splitAnd(and)) { + if (ex instanceof Range) { + ranges.add((Range) ex); + } else if (ex instanceof Equals || ex instanceof NullEquals) { + BinaryComparison otherEq = (BinaryComparison) ex; + // equals on different values evaluate to FALSE + // ignore date/time fields as equality comparison might actually be a range check + if (otherEq.right().foldable() && DataTypes.isDateTime(otherEq.left().dataType()) == false) { + for (BinaryComparison eq : equals) { + if (otherEq.left().semanticEquals(eq.left())) { + Integer comp = BinaryComparison.compare(eq.right().fold(), otherEq.right().fold()); + if (comp != null) { + // var cannot be equal to two different values at the same time + if (comp != 0) { + return new Literal(and.source(), Boolean.FALSE, DataTypes.BOOLEAN); + } + } + } + } + equals.add(otherEq); + } else { + exps.add(otherEq); + } + } else if (ex instanceof GreaterThan + || ex instanceof GreaterThanOrEqual + || ex instanceof LessThan + || ex instanceof LessThanOrEqual) { + BinaryComparison bc = (BinaryComparison) ex; + if (bc.right().foldable()) { + inequalities.add(bc); + } else { + exps.add(ex); + } + } else if (ex instanceof NotEquals otherNotEq) { + if (otherNotEq.right().foldable()) { + notEquals.add(otherNotEq); + } else { + exps.add(ex); + } + } else { + exps.add(ex); + } + } + + // check + for (BinaryComparison eq : equals) { + Object eqValue = eq.right().fold(); + + for (Iterator iterator = ranges.iterator(); iterator.hasNext();) { + Range range = iterator.next(); + + if (range.value().semanticEquals(eq.left())) { + // if equals is outside the interval, evaluate the whole expression to FALSE + if (range.lower().foldable()) { + Integer compare = BinaryComparison.compare(range.lower().fold(), eqValue); + if (compare != null && ( + // eq outside the lower boundary + compare > 0 || + // eq matches the boundary but should not be included + (compare == 0 && range.includeLower() == false))) { + return new Literal(and.source(), Boolean.FALSE, DataTypes.BOOLEAN); + } + } + if (range.upper().foldable()) { + Integer compare = BinaryComparison.compare(range.upper().fold(), eqValue); + if (compare != null && ( + // eq outside the upper boundary + compare < 0 || + // eq matches the boundary but should not be included + (compare == 0 && range.includeUpper() == false))) { + return new Literal(and.source(), Boolean.FALSE, DataTypes.BOOLEAN); + } + } + + // it's in the range and thus, remove it + iterator.remove(); + changed = true; + } + } + + // evaluate all NotEquals against the Equal + for (Iterator iter = notEquals.iterator(); iter.hasNext();) { + NotEquals neq = iter.next(); + if (eq.left().semanticEquals(neq.left())) { + Integer comp = BinaryComparison.compare(eqValue, neq.right().fold()); + if (comp != null) { + if (comp == 0) { // clashing and conflicting: a = 1 AND a != 1 + return new Literal(and.source(), Boolean.FALSE, DataTypes.BOOLEAN); + } else { // clashing and redundant: a = 1 AND a != 2 + iter.remove(); + changed = true; + } + } + } + } + + // evaluate all inequalities against the Equal + for (Iterator iter = inequalities.iterator(); iter.hasNext();) { + BinaryComparison bc = iter.next(); + if (eq.left().semanticEquals(bc.left())) { + Integer compare = BinaryComparison.compare(eqValue, bc.right().fold()); + if (compare != null) { + if (bc instanceof LessThan || bc instanceof LessThanOrEqual) { // a = 2 AND a />= ? + if ((compare == 0 && bc instanceof GreaterThan) || // a = 2 AND a > 2 + compare < 0) { // a = 2 AND a >/>= 3 + return new Literal(and.source(), Boolean.FALSE, DataTypes.BOOLEAN); + } + } + + iter.remove(); + changed = true; + } + } + } + } + + return changed ? Predicates.combineAnd(CollectionUtils.combine(exps, equals, notEquals, inequalities, ranges)) : and; + } + + // combine disjunction: + // a = 2 OR a > 3 -> nop; a = 2 OR a > 1 -> a > 1 + // a = 2 OR a < 3 -> a < 3; a = 2 OR a < 1 -> nop + // a = 2 OR 3 < a < 5 -> nop; a = 2 OR 1 < a < 3 -> 1 < a < 3; a = 2 OR 0 < a < 1 -> nop + // a = 2 OR a != 2 -> TRUE; a = 2 OR a = 5 -> nop; a = 2 OR a != 5 -> a != 5 + private static Expression propagate(Or or) { + List exps = new ArrayList<>(); + List equals = new ArrayList<>(); // foldable right term Equals + List notEquals = new ArrayList<>(); // foldable right term NotEquals + List ranges = new ArrayList<>(); + List inequalities = new ArrayList<>(); // foldable right term (=limit) BinaryComparision + + // split expressions by type + for (Expression ex : Predicates.splitOr(or)) { + if (ex instanceof Equals eq) { + if (eq.right().foldable()) { + equals.add(eq); + } else { + exps.add(ex); + } + } else if (ex instanceof NotEquals neq) { + if (neq.right().foldable()) { + notEquals.add(neq); + } else { + exps.add(ex); + } + } else if (ex instanceof Range) { + ranges.add((Range) ex); + } else if (ex instanceof BinaryComparison bc) { + if (bc.right().foldable()) { + inequalities.add(bc); + } else { + exps.add(ex); + } + } else { + exps.add(ex); + } + } + + boolean updated = false; // has the expression been modified? + + // evaluate the impact of each Equal over the different types of Expressions + for (Iterator iterEq = equals.iterator(); iterEq.hasNext();) { + Equals eq = iterEq.next(); + Object eqValue = eq.right().fold(); + boolean removeEquals = false; + + // Equals OR NotEquals + for (NotEquals neq : notEquals) { + if (eq.left().semanticEquals(neq.left())) { // a = 2 OR a != ? -> ... + Integer comp = BinaryComparison.compare(eqValue, neq.right().fold()); + if (comp != null) { + if (comp == 0) { // a = 2 OR a != 2 -> TRUE + return TRUE; + } else { // a = 2 OR a != 5 -> a != 5 + removeEquals = true; + break; + } + } + } + } + if (removeEquals) { + iterEq.remove(); + updated = true; + continue; + } + + // Equals OR Range + for (int i = 0; i < ranges.size(); i++) { // might modify list, so use index loop + Range range = ranges.get(i); + if (eq.left().semanticEquals(range.value())) { + Integer lowerComp = range.lower().foldable() ? BinaryComparison.compare(eqValue, range.lower().fold()) : null; + Integer upperComp = range.upper().foldable() ? BinaryComparison.compare(eqValue, range.upper().fold()) : null; + + if (lowerComp != null && lowerComp == 0) { + if (range.includeLower() == false) { // a = 2 OR 2 < a < ? -> 2 <= a < ? + ranges.set( + i, + new Range( + range.source(), + range.value(), + range.lower(), + true, + range.upper(), + range.includeUpper(), + range.zoneId() + ) + ); + } // else : a = 2 OR 2 <= a < ? -> 2 <= a < ? + removeEquals = true; // update range with lower equality instead or simply superfluous + break; + } else if (upperComp != null && upperComp == 0) { + if (range.includeUpper() == false) { // a = 2 OR ? < a < 2 -> ? < a <= 2 + ranges.set( + i, + new Range( + range.source(), + range.value(), + range.lower(), + range.includeLower(), + range.upper(), + true, + range.zoneId() + ) + ); + } // else : a = 2 OR ? < a <= 2 -> ? < a <= 2 + removeEquals = true; // update range with upper equality instead + break; + } else if (lowerComp != null && upperComp != null) { + if (0 < lowerComp && upperComp < 0) { // a = 2 OR 1 < a < 3 + removeEquals = true; // equality is superfluous + break; + } + } + } + } + if (removeEquals) { + iterEq.remove(); + updated = true; + continue; + } + + // Equals OR Inequality + for (int i = 0; i < inequalities.size(); i++) { + BinaryComparison bc = inequalities.get(i); + if (eq.left().semanticEquals(bc.left())) { + Integer comp = BinaryComparison.compare(eqValue, bc.right().fold()); + if (comp != null) { + if (bc instanceof GreaterThan || bc instanceof GreaterThanOrEqual) { + if (comp < 0) { // a = 1 OR a > 2 -> nop + continue; + } else if (comp == 0 && bc instanceof GreaterThan) { // a = 2 OR a > 2 -> a >= 2 + inequalities.set(i, new GreaterThanOrEqual(bc.source(), bc.left(), bc.right(), bc.zoneId())); + } // else (0 < comp || bc instanceof GreaterThanOrEqual) : + // a = 3 OR a > 2 -> a > 2; a = 2 OR a => 2 -> a => 2 + + removeEquals = true; // update range with equality instead or simply superfluous + break; + } else if (bc instanceof LessThan || bc instanceof LessThanOrEqual) { + if (comp > 0) { // a = 2 OR a < 1 -> nop + continue; + } + if (comp == 0 && bc instanceof LessThan) { // a = 2 OR a < 2 -> a <= 2 + inequalities.set(i, new LessThanOrEqual(bc.source(), bc.left(), bc.right(), bc.zoneId())); + } // else (comp < 0 || bc instanceof LessThanOrEqual) : a = 2 OR a < 3 -> a < 3; a = 2 OR a <= 2 -> a <= 2 + removeEquals = true; // update range with equality instead or simply superfluous + break; + } + } + } + } + if (removeEquals) { + iterEq.remove(); + updated = true; + } + } + + return updated ? Predicates.combineOr(CollectionUtils.combine(exps, equals, notEquals, inequalities, ranges)) : or; + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java index dd9704d57b12a..1aac8efbe6f65 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java @@ -9,11 +9,19 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; +import org.elasticsearch.xpack.ql.TestUtils; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; +import org.elasticsearch.xpack.ql.expression.predicate.Range; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.plan.logical.Filter; @@ -24,8 +32,12 @@ import java.util.List; import static java.util.Arrays.asList; -import static org.elasticsearch.xpack.ql.TestUtils.getFieldAttribute; +import static org.elasticsearch.xpack.ql.TestUtils.equalsOf; +import static org.elasticsearch.xpack.ql.TestUtils.nullEqualsOf; +import static org.elasticsearch.xpack.ql.TestUtils.rangeOf; import static org.elasticsearch.xpack.ql.TestUtils.relation; +import static org.elasticsearch.xpack.ql.expression.Literal.FALSE; +import static org.elasticsearch.xpack.ql.expression.Literal.TRUE; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; @@ -33,6 +45,8 @@ public class OptimizerRulesTests extends ESTestCase { private static final Literal ONE = new Literal(Source.EMPTY, 1, DataTypes.INTEGER); private static final Literal TWO = new Literal(Source.EMPTY, 2, DataTypes.INTEGER); private static final Literal THREE = new Literal(Source.EMPTY, 3, DataTypes.INTEGER); + private static final Literal FOUR = new Literal(Source.EMPTY, 4, DataTypes.INTEGER); + private static final Literal FIVE = new Literal(Source.EMPTY, 5, DataTypes.INTEGER); private static Equals equalsOf(Expression left, Expression right) { return new Equals(EMPTY, left, right, null); @@ -42,11 +56,35 @@ private static LessThan lessThanOf(Expression left, Expression right) { return new LessThan(EMPTY, left, right, null); } + public static GreaterThan greaterThanOf(Expression left, Expression right) { + return new GreaterThan(EMPTY, left, right, randomZone()); + } + + public static NotEquals notEqualsOf(Expression left, Expression right) { + return new NotEquals(EMPTY, left, right, randomZone()); + } + + public static NullEquals nullEqualsOf(Expression left, Expression right) { + return new NullEquals(EMPTY, left, right, randomZone()); + } + + public static LessThanOrEqual lessThanOrEqualOf(Expression left, Expression right) { + return new LessThanOrEqual(EMPTY, left, right, randomZone()); + } + + public static GreaterThanOrEqual greaterThanOrEqualOf(Expression left, Expression right) { + return new GreaterThanOrEqual(EMPTY, left, right, randomZone()); + } + + private static FieldAttribute getFieldAttribute() { + return TestUtils.getFieldAttribute("a"); + } + // // CombineDisjunction in Equals // public void testTwoEqualsWithOr() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); Or or = new Or(EMPTY, equalsOf(fa, ONE), equalsOf(fa, TWO)); Expression e = new OptimizerRules.CombineDisjunctionsToIn().rule(or); @@ -57,7 +95,7 @@ public void testTwoEqualsWithOr() { } public void testTwoEqualsWithSameValue() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); Or or = new Or(EMPTY, equalsOf(fa, ONE), equalsOf(fa, ONE)); Expression e = new OptimizerRules.CombineDisjunctionsToIn().rule(or); @@ -68,7 +106,7 @@ public void testTwoEqualsWithSameValue() { } public void testOneEqualsOneIn() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); Or or = new Or(EMPTY, equalsOf(fa, ONE), new In(EMPTY, fa, List.of(TWO))); Expression e = new OptimizerRules.CombineDisjunctionsToIn().rule(or); @@ -79,7 +117,7 @@ public void testOneEqualsOneIn() { } public void testOneEqualsOneInWithSameValue() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); Or or = new Or(EMPTY, equalsOf(fa, ONE), new In(EMPTY, fa, asList(ONE, TWO))); Expression e = new OptimizerRules.CombineDisjunctionsToIn().rule(or); @@ -90,7 +128,7 @@ public void testOneEqualsOneInWithSameValue() { } public void testSingleValueInToEquals() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); Equals equals = equalsOf(fa, ONE); Or or = new Or(EMPTY, equals, new In(EMPTY, fa, List.of(ONE))); @@ -99,7 +137,7 @@ public void testSingleValueInToEquals() { } public void testEqualsBehindAnd() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); And and = new And(EMPTY, equalsOf(fa, ONE), equalsOf(fa, TWO)); Filter dummy = new Filter(EMPTY, relation(), and); @@ -109,8 +147,8 @@ public void testEqualsBehindAnd() { } public void testTwoEqualsDifferentFields() { - FieldAttribute fieldOne = getFieldAttribute("ONE"); - FieldAttribute fieldTwo = getFieldAttribute("TWO"); + FieldAttribute fieldOne = TestUtils.getFieldAttribute("ONE"); + FieldAttribute fieldTwo = TestUtils.getFieldAttribute("TWO"); Or or = new Or(EMPTY, equalsOf(fieldOne, ONE), equalsOf(fieldTwo, TWO)); Expression e = new OptimizerRules.CombineDisjunctionsToIn().rule(or); @@ -118,7 +156,7 @@ public void testTwoEqualsDifferentFields() { } public void testMultipleIn() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); Or firstOr = new Or(EMPTY, new In(EMPTY, fa, List.of(ONE)), new In(EMPTY, fa, List.of(TWO))); Or secondOr = new Or(EMPTY, firstOr, new In(EMPTY, fa, List.of(THREE))); @@ -130,7 +168,7 @@ public void testMultipleIn() { } public void testOrWithNonCombinableExpressions() { - FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fa = getFieldAttribute(); Or firstOr = new Or(EMPTY, new In(EMPTY, fa, List.of(ONE)), lessThanOf(fa, TWO)); Or secondOr = new Or(EMPTY, firstOr, new In(EMPTY, fa, List.of(THREE))); @@ -143,4 +181,327 @@ public void testOrWithNonCombinableExpressions() { assertEquals(fa, in.value()); assertThat(in.list(), contains(ONE, THREE)); } + + // a == 1 AND a == 2 -> FALSE + public void testDualEqualsConjunction() { + FieldAttribute fa = getFieldAttribute(); + Equals eq1 = equalsOf(fa, ONE); + Equals eq2 = equalsOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); + assertEquals(FALSE, exp); + } + + // a <=> 1 AND a <=> 2 -> FALSE + public void testDualNullEqualsConjunction() { + FieldAttribute fa = getFieldAttribute(); + NullEquals eq1 = nullEqualsOf(fa, ONE); + NullEquals eq2 = nullEqualsOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); + assertEquals(FALSE, exp); + } + + // 1 < a < 10 AND a == 10 -> FALSE + public void testEliminateRangeByEqualsOutsideInterval() { + FieldAttribute fa = getFieldAttribute(); + Equals eq1 = equalsOf(fa, new Literal(EMPTY, 10, DataTypes.INTEGER)); + Range r = rangeOf(fa, ONE, false, new Literal(EMPTY, 10, DataTypes.INTEGER), false); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(FALSE, exp); + } + + // 1 < a < 10 AND a <=> 10 -> FALSE + public void testEliminateRangeByNullEqualsOutsideInterval() { + FieldAttribute fa = getFieldAttribute(); + NullEquals eq1 = nullEqualsOf(fa, new Literal(EMPTY, 10, DataTypes.INTEGER)); + Range r = rangeOf(fa, ONE, false, new Literal(EMPTY, 10, DataTypes.INTEGER), false); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(FALSE, exp); + } + + // a != 3 AND a = 3 -> FALSE + public void testPropagateEquals_VarNeq3AndVarEq3() { + FieldAttribute fa = getFieldAttribute(); + NotEquals neq = notEqualsOf(fa, THREE); + Equals eq = equalsOf(fa, THREE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, neq, eq)); + assertEquals(FALSE, exp); + } + + // a != 4 AND a = 3 -> a = 3 + public void testPropagateEquals_VarNeq4AndVarEq3() { + FieldAttribute fa = getFieldAttribute(); + NotEquals neq = notEqualsOf(fa, FOUR); + Equals eq = equalsOf(fa, THREE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, neq, eq)); + assertEquals(Equals.class, exp.getClass()); + assertEquals(eq, exp); + } + + // a = 2 AND a < 2 -> FALSE + public void testPropagateEquals_VarEq2AndVarLt2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThan lt = lessThanOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a <= 2 -> a = 2 + public void testPropagateEquals_VarEq2AndVarLte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThanOrEqual lt = lessThanOrEqualOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(eq, exp); + } + + // a = 2 AND a <= 1 -> FALSE + public void testPropagateEquals_VarEq2AndVarLte1() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThanOrEqual lt = lessThanOrEqualOf(fa, ONE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a > 2 -> FALSE + public void testPropagateEquals_VarEq2AndVarGt2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a >= 2 -> a = 2 + public void testPropagateEquals_VarEq2AndVarGte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThanOrEqual gte = greaterThanOrEqualOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gte)); + assertEquals(eq, exp); + } + + // a = 2 AND a > 3 -> FALSE + public void testPropagateEquals_VarEq2AndVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, THREE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a < 3 AND a > 1 AND a != 4 -> a = 2 + public void testPropagateEquals_VarEq2AndVarLt3AndVarGt1AndVarNeq4() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThan lt = lessThanOf(fa, THREE); + GreaterThan gt = greaterThanOf(fa, ONE); + NotEquals neq = notEqualsOf(fa, FOUR); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression and = Predicates.combineAnd(asList(eq, lt, gt, neq)); + Expression exp = rule.rule((And) and); + assertEquals(eq, exp); + } + + // a = 2 AND 1 < a < 3 AND a > 0 AND a != 4 -> a = 2 + public void testPropagateEquals_VarEq2AndVarRangeGt1Lt3AndVarGt0AndVarNeq4() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, ONE, false, THREE, false); + GreaterThan gt = greaterThanOf(fa, new Literal(EMPTY, 0, DataTypes.INTEGER)); + NotEquals neq = notEqualsOf(fa, FOUR); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression and = Predicates.combineAnd(asList(eq, range, gt, neq)); + Expression exp = rule.rule((And) and); + assertEquals(eq, exp); + } + + // a = 2 OR a > 1 -> a > 1 + public void testPropagateEquals_VarEq2OrVarGt1() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, ONE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, gt)); + assertEquals(gt, exp); + } + + // a = 2 OR a > 2 -> a >= 2 + public void testPropagateEquals_VarEq2OrVarGte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, gt)); + assertEquals(GreaterThanOrEqual.class, exp.getClass()); + GreaterThanOrEqual gte = (GreaterThanOrEqual) exp; + assertEquals(TWO, gte.right()); + } + + // a = 2 OR a < 3 -> a < 3 + public void testPropagateEquals_VarEq2OrVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThan lt = lessThanOf(fa, THREE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, lt)); + assertEquals(lt, exp); + } + + // a = 3 OR a < 3 -> a <= 3 + public void testPropagateEquals_VarEq3OrVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, THREE); + LessThan lt = lessThanOf(fa, THREE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, lt)); + assertEquals(LessThanOrEqual.class, exp.getClass()); + LessThanOrEqual lte = (LessThanOrEqual) exp; + assertEquals(THREE, lte.right()); + } + + // a = 2 OR 1 < a < 3 -> 1 < a < 3 + public void testPropagateEquals_VarEq2OrVarRangeGt1Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, ONE, false, THREE, false); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(range, exp); + } + + // a = 2 OR 2 < a < 3 -> 2 <= a < 3 + public void testPropagateEquals_VarEq2OrVarRangeGt2Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, TWO, false, THREE, false); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(TWO, r.lower()); + assertTrue(r.includeLower()); + assertEquals(THREE, r.upper()); + assertFalse(r.includeUpper()); + } + + // a = 3 OR 2 < a < 3 -> 2 < a <= 3 + public void testPropagateEquals_VarEq3OrVarRangeGt2Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, THREE); + Range range = rangeOf(fa, TWO, false, THREE, false); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(TWO, r.lower()); + assertFalse(r.includeLower()); + assertEquals(THREE, r.upper()); + assertTrue(r.includeUpper()); + } + + // a = 2 OR a != 2 -> TRUE + public void testPropagateEquals_VarEq2OrVarNeq2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + NotEquals neq = notEqualsOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, neq)); + assertEquals(TRUE, exp); + } + + // a = 2 OR a != 5 -> a != 5 + public void testPropagateEquals_VarEq2OrVarNeq5() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + NotEquals neq = notEqualsOf(fa, FIVE); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, neq)); + assertEquals(NotEquals.class, exp.getClass()); + NotEquals ne = (NotEquals) exp; + assertEquals(FIVE, ne.right()); + } + + // a = 2 OR 3 < a < 4 OR a > 2 OR a!= 2 -> TRUE + public void testPropagateEquals_VarEq2OrVarRangeGt3Lt4OrVarGt2OrVarNe2() { + FieldAttribute fa = getFieldAttribute(); + org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, THREE, false, FOUR, false); + GreaterThan gt = greaterThanOf(fa, TWO); + NotEquals neq = notEqualsOf(fa, TWO); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule((Or) Predicates.combineOr(asList(eq, range, neq, gt))); + assertEquals(TRUE, exp); + } + + // a == 1 AND a == 2 -> nop for date/time fields + public void testPropagateEquals_ignoreDateTimeFields() { + FieldAttribute fa = TestUtils.getFieldAttribute("a", DataTypes.DATETIME); + Equals eq1 = equalsOf(fa, ONE); + Equals eq2 = equalsOf(fa, TWO); + And and = new And(EMPTY, eq1, eq2); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(and); + assertEquals(and, exp); + } + + // 1 <= a < 10 AND a == 1 -> a == 1 + public void testEliminateRangeByEqualsInInterval() { + FieldAttribute fa = getFieldAttribute(); + Equals eq1 = equalsOf(fa, ONE); + Range r = rangeOf(fa, ONE, true, new Literal(EMPTY, 10, DataTypes.INTEGER), false); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(eq1, exp); + } + + // 1 <= a < 10 AND a <=> 1 -> a <=> 1 + public void testEliminateRangeByNullEqualsInInterval() { + FieldAttribute fa = getFieldAttribute(); + NullEquals eq1 = nullEqualsOf(fa, ONE); + Range r = rangeOf(fa, ONE, true, new Literal(EMPTY, 10, DataTypes.INTEGER), false); + + OptimizerRules.PropagateEquals rule = new OptimizerRules.PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(eq1, exp); + } } From 1259aeea9b2eb1f1b29f43203b74c7a73d2fc4e8 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Mar 2024 13:44:10 -0400 Subject: [PATCH 28/79] ESQL: Switch more tests from version checks (#106332) This switches another pile of ESQL's tests from checking the release version to checking for feature markers. --- .../src/main/resources/boolean.csv-spec | 16 ++++-- .../src/main/resources/conditional.csv-spec | 20 +++++-- .../src/main/resources/date.csv-spec | 4 +- .../src/main/resources/eval.csv-spec | 4 +- .../src/main/resources/floats.csv-spec | 12 +++- .../src/main/resources/ints.csv-spec | 56 ++++++++++++++----- .../src/main/resources/ip.csv-spec | 16 ++++-- .../src/main/resources/spatial.csv-spec | 16 ++++-- .../src/main/resources/string.csv-spec | 20 +++++-- .../xpack/esql/plugin/EsqlFeatures.java | 30 ++++++++-- 10 files changed, 148 insertions(+), 46 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index 1406028b2c81f..2713660cd47d8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -235,14 +235,18 @@ emp_no:integer |languages:integer |byte2bool:boolean |short2bool:boolean 10030 |3 |true |true ; -mvSort#[skip:-8.13.99, reason:newly added in 8.14] +mvSort +required_feature: esql.mv_sort + row a = [true, false, true, false] | eval sa = mv_sort(a), sb = mv_sort(a, "DESC"); a:boolean | sa:boolean | sb:boolean [true, false, true, false] | [false, false, true, true] | [true, true, false, false] ; -mvSortEmp#[skip:-8.13.99, reason:newly added in 8.14] +mvSortEmp +required_feature: esql.mv_sort + FROM employees | eval sd = mv_sort(is_rehired, "DESC"), sa = mv_sort(is_rehired) | sort emp_no @@ -258,7 +262,9 @@ emp_no:integer | is_rehired:boolean | sa:boolean | sd:boolea 10005 | [false,false,false,true] | [false,false,false,true] | [true,false,false,false] ; -mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +mvSlice +required_feature: esql.mv_sort + row a = [true, false, false, true] | eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3); @@ -266,7 +272,9 @@ a:boolean | a1:boolean | a2:boolean [true, false, false, true] | false | [false, true] ; -mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmp +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(is_rehired, 0) | keep emp_no, is_rehired, a1 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index 64c5a7358ce22..f574722f691e5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -129,7 +129,9 @@ error_rate:double | hour:date ; -nullOnMultivaluesMathOperation#[skip:-8.13.99,reason:fixed in 8.14+] +nullOnMultivaluesMathOperation +required_feature: esql.disable_nullable_opts + ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NULL; warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:37: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -139,7 +141,9 @@ a:integer | b:integer | sum:integer ; -notNullOnMultivaluesMathOperation#[skip:-8.13.99,reason:fixed in 8.14+] +notNullOnMultivaluesMathOperation +required_feature: esql.disable_nullable_opts + ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NOT NULL; warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:37: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -148,7 +152,9 @@ a:integer | b:integer | sum:integer ; -nullOnMultivaluesComparisonOperation#[skip:-8.13.99,reason:fixed in 8.14+] +nullOnMultivaluesComparisonOperation +required_feature: esql.disable_nullable_opts + ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NULL; a:integer | b:integer | same:boolean @@ -156,14 +162,18 @@ a:integer | b:integer | same:boolean ; -notNullOnMultivaluesComparisonOperation#[skip:-8.13.99,reason:fixed in 8.14+] +notNullOnMultivaluesComparisonOperation +required_feature: esql.disable_nullable_opts + ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; a:integer | b:integer | same:boolean ; -notNullOnMultivaluesComparisonOperationWithPartialMatch#[skip:-8.13.99,reason:fixed in 8.14+] +notNullOnMultivaluesComparisonOperationWithPartialMatch +required_feature: esql.disable_nullable_opts + ROW a = 5, b = [ 5, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; a:integer | b:integer | same:boolean diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 09128eca5c18e..de7a48bcf6834 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -1154,7 +1154,9 @@ FROM sample_data // end::docsNowWhere-result[] ; -mvSort#[skip:-8.13.99, reason:newly added in 8.14] +mvSort +required_feature: esql.mv_sort + row a = ["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00:00:00.000Z"] | eval datetime = TO_DATETIME(a) | eval sa = mv_sort(datetime), sd = mv_sort(datetime, "DESC") diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 7d18d2616e376..9b06e9a0a8b23 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -200,7 +200,9 @@ Chirstian. |Chirstian.Koblick|Chirstian.KoblickChirstian.|Chirstian Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakKyoichi. |Kyoichi ; -roundArrays#[skip:-8.13.99, reason:Alert order changed in 8.14] +roundArrays +required_feature: esql.disable_nullable_opts + row a = [1.2], b = [2.4, 7.9] | eval c = round(a), d = round(b), e = round([1.2]), f = round([1.2, 4.6]), g = round([1.14], 1), h = round([1.14], [1, 2]); warning:Line 1:56: evaluation of [round(b)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:56: java.lang.IllegalArgumentException: single-value function encountered multi-value diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 7e52864d0e379..75011388a9f5a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -240,7 +240,9 @@ row a = [1.1, 2.1, 2.1] | eval da = mv_dedupe(a); [1.1, 2.1, 2.1] | [1.1, 2.1] ; -mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmp +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change, 0, 1) | keep emp_no, salary_change, a1 @@ -455,14 +457,18 @@ ROW deg = [90.0, 180.0, 270.0] // end::to_radians-result[] ; -mvSort#[skip:-8.13.99, reason:newly added in 8.14] +mvSort +required_feature: esql.mv_sort + row a = [4.0, 2.0, -3.0, 2.0] | eval sa = mv_sort(a), sd = mv_sort(a, "DESC"); a:double | sa:double | sd:double [4.0, 2.0, -3.0, 2.0] | [-3.0, 2.0, 2.0, 4.0] | [4.0, 2.0, 2.0, -3.0] ; -mvSortEmp#[skip:-8.13.99, reason:newly added in 8.14] +mvSortEmp +required_feature: esql.mv_sort + FROM employees | eval sd = mv_sort(salary_change, "DESC"), sa = mv_sort(salary_change) | sort emp_no diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 20f2e579643f2..7a64c9a87e0c9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -416,7 +416,9 @@ row a = [1, 2, 2, 3] | eval da = mv_dedupe(a); [1, 2, 2, 3] | [1, 2, 3] ; -mvSort#[skip:-8.13.99, reason:newly added in 8.14] +mvSort +required_feature: esql.mv_sort + // tag::mv_sort[] ROW a = [4, 2, -3, 2] | EVAL sa = mv_sort(a), sd = mv_sort(a, "DESC") @@ -429,7 +431,9 @@ a:integer | sa:integer | sd:integer // end::mv_sort-result[] ; -mvSortEmpInt#[skip:-8.13.99, reason:newly added in 8.14] +mvSortEmpInt +required_feature: esql.mv_sort + FROM employees | eval sd = mv_sort(salary_change.int, "DESC"), sa = mv_sort(salary_change.int) | sort emp_no @@ -449,7 +453,9 @@ emp_no:integer | salary_change.int:integer | sa:integer | sd:integer 10009 | null | null | null ; -mvSortEmpLong#[skip:-8.13.99, reason:newly added in 8.14] +mvSortEmpLong +required_feature: esql.mv_sort + FROM employees | eval sd = mv_sort(salary_change.long, "DESC"), sa = mv_sort(salary_change.long) | sort emp_no @@ -469,7 +475,9 @@ emp_no:integer | salary_change.long:long | sa:long | sd:long 10009 | null | null | null ; -mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +mvSlice +required_feature: esql.mv_sort + // tag::mv_slice_positive[] row a = [1, 2, 2, 3] | eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3) @@ -481,7 +489,9 @@ a:integer | a1:integer | a2:integer // end::mv_slice_positive-result[] ; -mvSliceNegativeOffset#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceNegativeOffset +required_feature: esql.mv_sort + // tag::mv_slice_negative[] row a = [1, 2, 2, 3] | eval a1 = mv_slice(a, -2), a2 = mv_slice(a, -3, -1) @@ -493,7 +503,9 @@ a:integer | a1:integer | a2:integer // end::mv_slice_negative-result[] ; -mvSliceSingle#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceSingle +required_feature: esql.mv_sort + row a = 1 | eval a1 = mv_slice(a, 0); @@ -501,7 +513,9 @@ a:integer | a1:integer 1 | 1 ; -mvSliceOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceOutOfBound +required_feature: esql.mv_sort + row a = [1, 2, 2, 3] | eval a1 = mv_slice(a, 4), a2 = mv_slice(a, 2, 6), a3 = mv_slice(a, 4, 6); @@ -509,7 +523,9 @@ a:integer | a1:integer | a2:integer | a3:integer [1, 2, 2, 3] | null | [2, 3] | null ; -mvSliceEmpInt#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmpInt +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.int, 0, 1) | keep emp_no, salary_change.int, a1 @@ -524,7 +540,9 @@ emp_no:integer | salary_change.int:integer | a1:integer 10005 | [-2, 13] | [-2, 13] ; -mvSliceEmpIntSingle#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmpIntSingle +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.int, 1) | keep emp_no, salary_change.int, a1 @@ -539,7 +557,9 @@ emp_no:integer | salary_change.int:integer | a1:integer 10005 | [-2, 13] | 13 ; -mvSliceEmpIntEndOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmpIntEndOutOfBound +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.int, 1, 4) | keep emp_no, salary_change.int, a1 @@ -554,7 +574,9 @@ emp_no:integer | salary_change.int:integer | a1:integer 10005 | [-2, 13] | 13 ; -mvSliceEmpIntOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmpIntOutOfBound +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.int, 2, 4) | keep emp_no, salary_change.int, a1 @@ -569,7 +591,9 @@ emp_no:integer | salary_change.int:integer | a1:integer 10005 | [-2, 13] | null ; -mvSliceEmpIntStartOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmpIntStartOutOfBoundNegative +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.int, -5, -2) | keep emp_no, salary_change.int, a1 @@ -584,7 +608,9 @@ emp_no:integer | salary_change.int:integer | a1:integer 10005 | [-2, 13] | -2 ; -mvSliceEmpIntOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmpIntOutOfBoundNegative +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.int, -5, -3) | keep emp_no, salary_change.int, a1 @@ -599,7 +625,9 @@ emp_no:integer | salary_change.int:integer | a1:integer 10005 | [-2, 13] | null ; -mvSliceEmpLong#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmpLong +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.long, 0, 1) | keep emp_no, salary_change.long, a1 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index b83dda1376ac5..09b17ed4112c9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -302,7 +302,9 @@ eth0 |127.0.0.3 eth0 |fe80::cae2:65ff:fece:fec1 ; -mvSort#[skip:-8.13.99, reason:newly added in 8.14] +mvSort +required_feature: esql.mv_sort + FROM hosts | eval sd = mv_sort(ip1, "DESC"), sa = mv_sort(ip1) | sort host desc, ip1 @@ -318,7 +320,9 @@ epsilon | fe80::cae2:65ff:fece:fec1 | fe80::ca epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe82::cae2:65ff:fece:fec0, fe81::cae2:65ff:fece:feb9] ; -mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +mvSlice +required_feature: esql.mv_sort + from hosts | where host == "epsilon" | eval a1 = mv_slice(ip1, 0, 1) @@ -332,7 +336,9 @@ epsilon | fe80::cae2:65ff:fece:fec1 | fe80::ca epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] ; -mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +mvSlice +required_feature: esql.mv_sort + from hosts | where host == "epsilon" | eval a1 = mv_slice(ip1, 0, 1) @@ -346,7 +352,9 @@ epsilon | fe80::cae2:65ff:fece:fec1 | fe80::ca epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] ; -mvZip#[skip:-8.13.99, reason:newly added in 8.14] +mvZip +required_feature: esql.mv_sort + from hosts | eval zip = mv_zip(to_string(description), to_string(ip0), "@@") | keep host, description, ip0, zip diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 88155301a06bc..495d0cbb8d7f0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -71,7 +71,9 @@ c:geo_point POINT(39.58327988510707 20.619513023697994) ; -centroidFromString4#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +centroidFromString4 +required_feature: esql.st_x_y + ROW wkt = ["POINT(42.97109629958868 14.7552534006536)", "POINT(75.80929149873555 22.72774917539209)", "POINT(-0.030548143003023033 24.37553649504829)"] | MV_EXPAND wkt | EVAL pt = TO_GEOPOINT(wkt) @@ -82,7 +84,9 @@ c:geo_point | x:double | y:double POINT(39.58327988510707 20.619513023697994) | 39.58327988510707 | 20.619513023697994 ; -stXFromString#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +stXFromString +required_feature: esql.st_x_y + // tag::st_x_y[] ROW point = TO_GEOPOINT("POINT(42.97109629958868 14.7552534006536)") | EVAL x = ST_X(point), y = ST_Y(point) @@ -113,7 +117,9 @@ WIIT | Bandar Lampung | POINT(105.2667 -5.45) | Indonesia ZAH | Zāhedān | POINT(60.8628 29.4964) | Iran | POINT(60.900708564915 29.4752941956573) | Zahedan Int'l | 9 | mid ; -stXFromAirportsSupportsNull#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +stXFromAirportsSupportsNull +required_feature: esql.st_x_y + FROM airports | EVAL x = FLOOR(ABS(ST_X(city_location))/200), y = FLOOR(ABS(ST_Y(city_location))/100) | STATS c = count(*) BY x, y @@ -604,7 +610,9 @@ c:cartesian_point POINT(3949.163965353159 1078.2645465797348) ; -stXFromCartesianString#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +stXFromCartesianString +required_feature: esql.st_x_y + ROW point = TO_CARTESIANPOINT("POINT(4297.10986328125 -1475.530029296875)") | EVAL x = ST_X(point), y = ST_Y(point) ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 1bd7860af1018..06fca2682bbb9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -714,14 +714,18 @@ ROW a=[10, 9, 8] // end::mv_concat-to_string-result[] ; -mvSort#[skip:-8.13.99, reason:newly added in 8.14] +mvSort +required_feature: esql.mv_sort + row a = ["Mon", "Tues", "Wed", "Thu", "Fri"] | eval sa = mv_sort(a), sd = mv_sort(a, "DESC"); a:keyword | sa:keyword | sd:keyword ["Mon", "Tues", "Wed", "Thu", "Fri"] | [Fri, Mon, Thu, Tues, Wed] | [Wed, Tues, Thu, Mon, Fri] ; -mvSortEmp#[skip:-8.13.99, reason:newly added in 8.14] +mvSortEmp +required_feature: esql.mv_sort + FROM employees | eval sd = mv_sort(job_positions, "DESC"), sa = mv_sort(job_positions) | sort emp_no @@ -737,7 +741,9 @@ emp_no:integer | job_positions:keyword 10005 | null | null | null ; -mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +mvSliceEmp +required_feature: esql.mv_sort + from employees | eval a1 = mv_slice(salary_change.keyword, 0, 1) | keep emp_no, salary_change.keyword, a1 @@ -752,7 +758,9 @@ emp_no:integer | salary_change.keyword:keyword | a1:keyword 10005 | [-2.14,13.07] | [-2.14,13.07] ; -mvZip#[skip:-8.13.99, reason:newly added in 8.14] +mvZip +required_feature: esql.mv_sort + // tag::mv_zip[] ROW a = ["x", "y", "z"], b = ["1", "2"] | EVAL c = mv_zip(a, b, "-") @@ -766,7 +774,9 @@ a:keyword | b:keyword | c:keyword // end::mv_zip-result[] ; -mvZipEmp#[skip:-8.13.99, reason:newly added in 8.14] +mvZipEmp +required_feature: esql.mv_sort + from employees | eval full_name = mv_zip(first_name, last_name, " "), full_name_2 = mv_zip(last_name, first_name), jobs = mv_zip(job_positions, salary_change.keyword, "#") | keep emp_no, full_name, full_name_2, job_positions, salary_change.keyword, jobs diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index 3b2c1e9d9a486..17f262143f57a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -15,6 +15,23 @@ import java.util.Set; public class EsqlFeatures implements FeatureSpecification { + /** + * Introduction of {@code MV_SORT}, {@code MV_SLICE}, and {@code MV_ZIP}. + * Added in #106095. + */ + private static final NodeFeature MV_SORT = new NodeFeature("esql.mv_sort"); + + /** + * When we disabled some broken optimizations around {@code nullable}. + * Fixed in #105691. + */ + private static final NodeFeature DISABLE_NULLABLE_OPTS = new NodeFeature("esql.disable_nullable_opts"); + + /** + * Introduction of {@code ST_X} and {@code ST_Y}. Added in #105768. + */ + private static final NodeFeature ST_X_Y = new NodeFeature("esql.st_x_y"); + /** * When we added the warnings for multivalued fields emitting {@code null} * when they touched multivalued fields. Added in #102417. @@ -48,8 +65,16 @@ public class EsqlFeatures implements FeatureSpecification { */ private static final NodeFeature AGG_VALUES = new NodeFeature("esql.agg_values"); + /** + * Does ESQL support async queries. + */ public static final NodeFeature ASYNC_QUERY = new NodeFeature("esql.async_query"); + @Override + public Set getFeatures() { + return Set.of(ASYNC_QUERY, AGG_VALUES, MV_SORT, DISABLE_NULLABLE_OPTS, ST_X_Y); + } + @Override public Map getHistoricalFeatures() { return Map.ofEntries( @@ -61,9 +86,4 @@ public Map getHistoricalFeatures() { // Map.entry(GEO_SHAPE_SUPPORT, Version.V_8_13_0) ); } - - @Override - public Set getFeatures() { - return Set.of(ASYNC_QUERY, AGG_VALUES); - } } From 82d7e4ec937a59620c535b18ae9e403ddfcc7525 Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:06:19 -0400 Subject: [PATCH 29/79] [DOCS] Clarify behavior of the generic `data` node role (#106375) --- docs/reference/modules/node.asciidoc | 41 +++++++++++++++------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index ec60b2bca37e4..8a42d11f6367a 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -68,8 +68,8 @@ A node that has the `master` role, which makes it eligible to be <>:: -A node that has the `data` role. Data nodes hold data and perform data -related operations such as CRUD, search, and aggregations. A node with the `data` role can fill any of the specialised data node roles. +A node that has one of several data roles. Data nodes hold data and perform data +related operations such as CRUD, search, and aggregations. A node with a generic `data` role can fill any of the specialized data node roles. <>:: @@ -220,7 +220,7 @@ therefore ensure that the storage and networking available to the nodes in your cluster are good enough to meet your performance goals. [[data-node]] -==== Data node +==== Data nodes Data nodes hold the shards that contain the documents you have indexed. Data nodes handle data related operations like CRUD, search, and aggregations. @@ -230,20 +230,27 @@ monitor these resources and to add more data nodes if they are overloaded. The main benefit of having dedicated data nodes is the separation of the master and data roles. -To create a dedicated data node, set: +In a multi-tier deployment architecture, you use specialized data roles to +assign data nodes to specific tiers: `data_content`,`data_hot`, `data_warm`, +`data_cold`, or `data_frozen`. A node can belong to multiple tiers. + +If you want to include a node in all tiers, or if your cluster does not use multiple tiers, then you can use the generic `data` role. + +WARNING: If you assign a node to a specific tier using a specialized data role, then you shouldn't also assign it the generic `data` role. The generic `data` role takes precedence over specialized data roles. + +[[generic-data-node]] +===== Generic data node + +Generic data nodes are included in all content tiers. + +To create a dedicated generic data node, set: [source,yaml] ---- node.roles: [ data ] ---- -In a multi-tier deployment architecture, you use specialized data roles to -assign data nodes to specific tiers: `data_content`,`data_hot`, `data_warm`, -`data_cold`, or `data_frozen`. A node can belong to multiple tiers, but a node -that has one of the specialized data roles cannot have the generic `data` role. - -[role="xpack"] [[data-content-node]] -==== Content data node +===== Content data node Content data nodes are part of the content tier. include::{es-repo-dir}/datatiers.asciidoc[tag=content-tier] @@ -254,9 +261,8 @@ To create a dedicated content node, set: node.roles: [ data_content ] ---- -[role="xpack"] [[data-hot-node]] -==== Hot data node +===== Hot data node Hot data nodes are part of the hot tier. include::{es-repo-dir}/datatiers.asciidoc[tag=hot-tier] @@ -267,9 +273,8 @@ To create a dedicated hot node, set: node.roles: [ data_hot ] ---- -[role="xpack"] [[data-warm-node]] -==== Warm data node +===== Warm data node Warm data nodes are part of the warm tier. include::{es-repo-dir}/datatiers.asciidoc[tag=warm-tier] @@ -280,9 +285,8 @@ To create a dedicated warm node, set: node.roles: [ data_warm ] ---- -[role="xpack"] [[data-cold-node]] -==== Cold data node +===== Cold data node Cold data nodes are part of the cold tier. include::{es-repo-dir}/datatiers.asciidoc[tag=cold-tier] @@ -293,9 +297,8 @@ To create a dedicated cold node, set: node.roles: [ data_cold ] ---- -[role="xpack"] [[data-frozen-node]] -==== Frozen data node +===== Frozen data node Frozen data nodes are part of the frozen tier. include::{es-repo-dir}/datatiers.asciidoc[tag=frozen-tier] From bb9566a57e0e65b601036fbceab5fac7c056e794 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Fri, 22 Mar 2024 14:43:48 -0500 Subject: [PATCH 30/79] Update discovery.asciidoc (#106541) (#106695) Fix typo (cherry picked from commit 96a46b9c5b41145626477dc2fa062456d3e46a75) Co-authored-by: Boen <13752080613@163.com> --- docs/reference/modules/discovery/discovery.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/modules/discovery/discovery.asciidoc b/docs/reference/modules/discovery/discovery.asciidoc index a665a401ebab2..2311fa738fc1a 100644 --- a/docs/reference/modules/discovery/discovery.asciidoc +++ b/docs/reference/modules/discovery/discovery.asciidoc @@ -115,7 +115,7 @@ supplied in `unicast_hosts.txt`. The `unicast_hosts.txt` file contains one node entry per line. Each node entry consists of the host (host name or IP address) and an optional transport port -number. If the port number is specified, is must come immediately after the +number. If the port number is specified, it must come immediately after the host (on the same line) separated by a `:`. If the port number is not specified, {es} will implicitly use the first port in the port range given by `transport.profiles.default.port`, or by `transport.port` if From 35fcc9a29d88bad59adf6a50e83754629df73cb5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 22 Mar 2024 16:30:59 -0400 Subject: [PATCH 31/79] ESQL: Add README.md to docs (#106698) This explains how to run the tests that build the docs. I tried to add it in #106577 but the sync code deleted it. So I fixed that too. --- docs/reference/esql/functions/README.md | 21 +++++++++++++++++++ x-pack/plugin/esql/build.gradle | 2 +- .../function/AbstractFunctionTestCase.java | 3 ++- 3 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 docs/reference/esql/functions/README.md diff --git a/docs/reference/esql/functions/README.md b/docs/reference/esql/functions/README.md new file mode 100644 index 0000000000000..fd310ebacfe7e --- /dev/null +++ b/docs/reference/esql/functions/README.md @@ -0,0 +1,21 @@ +The files in these subdirectories and generated by ESQL's test suite: +* `description` - description of each function scraped from `@FunctionInfo#description` +* `examples` - examples of each function scraped from `@FunctionInfo#examples` +* `parameters` - description of each function's parameters scraped from `@Param` +* `signature` - railroad diagram of the syntax to invoke each function +* `types` - a table of each combination of support type for each parameter. These are generated from tests. +* `layout` - a fully generated description for each function + +Most functions can use the generated docs generated in the `layout` directory. +If we need something more custom for the function we can make a file in this +directory that can `include::` any parts of the files above. + +To regenerate the files for a function run its tests using gradle: +``` +./gradlew :x-pack:plugin:esql:tests -Dtests.class='*SinTests' +``` + +To regenerate the files for all functions run all of ESQL's tests using gradle: +``` +./gradlew :x-pack:plugin:esql:tests +``` diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index 8f8d2774a5020..3fdfa7835b036 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -88,7 +88,7 @@ tasks.named("test").configure { into "${rootDir}/docs/reference/esql/functions" include '**/*.asciidoc', '**/*.svg' preserve { - include '/*.asciidoc' + include '/*.asciidoc', 'README.md' } } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 61d62d6f54344..755e5fcf25b9b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.bytes.BytesReference; @@ -1161,7 +1162,7 @@ private static void renderDescription(String description, String note) throws IO *Description* """ + description + "\n"; - if (note != null) { + if (Strings.isNullOrEmpty(note) == false) { rendered += "\nNOTE: " + note + "\n"; } LogManager.getLogger(getTestClass()).info("Writing description for [{}]:\n{}", functionName(), rendered); From e9fcb0a0f0f45773248c5258982d6bf2d5517299 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Sun, 24 Mar 2024 13:19:49 +0200 Subject: [PATCH 32/79] Retrieve routing hash from id when missing (#106682) * Retrieve routing hash from id when missing * add test * add test 2 --- .../action/index/IndexRequest.java | 2 +- .../TimeSeriesRoutingHashFieldMapper.java | 9 ++++- .../mapper/TimeSeriesIdFieldMapperTests.java | 34 ------------------- ...TimeSeriesRoutingHashFieldMapperTests.java | 20 +++++++++++ 4 files changed, 29 insertions(+), 36 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index a8d6220415a43..d142db2d5a1ab 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -329,7 +329,7 @@ public IndexRequest id(String id) { */ @Override public IndexRequest routing(String routing) { - if (routing != null && routing.length() == 0) { + if (routing != null && routing.isEmpty()) { this.routing = null; } else { this.routing = routing; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java index d5750600a25c9..b9629d7561982 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.time.ZoneId; +import java.util.Arrays; import java.util.Base64; import java.util.Collections; @@ -111,7 +112,13 @@ public void postParse(DocumentParserContext context) { if (context.indexSettings().getMode() == IndexMode.TIME_SERIES && context.indexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID)) { String routingHash = context.sourceToParse().routing(); - var field = new SortedDocValuesField(NAME, Uid.encodeId(routingHash != null ? routingHash : encode(0))); + if (routingHash == null) { + assert context.sourceToParse().id() != null; + routingHash = Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(Arrays.copyOf(Base64.getUrlDecoder().decode(context.sourceToParse().id()), 4)); + } + var field = new SortedDocValuesField(NAME, Uid.encodeId(routingHash)); context.rootDoc().add(field); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java index 50abb47e51125..87b107d5bd139 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -720,38 +720,4 @@ public void testParseWithDynamicMappingInvalidRoutingHash() { }); assertThat(failure.getMessage(), equalTo("[5:1] failed to parse: Illegal base64 character 20")); } - - public void testParseWithDynamicMappingNullId() { - Settings indexSettings = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dim") - .build(); - MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); - SourceToParse source = new SourceToParse(null, new BytesArray(""" - { - "@timestamp": 1609459200000, - "dim": "6a841a21", - "value": 100 - }"""), XContentType.JSON); - var failure = expectThrows(DocumentParsingException.class, () -> { - IndexShard.prepareIndex( - mapper, - source, - UNASSIGNED_SEQ_NO, - randomNonNegativeLong(), - Versions.MATCH_ANY, - VersionType.INTERNAL, - Engine.Operation.Origin.PRIMARY, - -1, - false, - UNASSIGNED_SEQ_NO, - 0, - System.nanoTime() - ); - }); - assertThat( - failure.getMessage(), - equalTo("[5:1] failed to parse: _ts_routing_hash was null but must be set because index [index] is in time_series mode") - ); - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java index df5ff9a8fe7e5..5352bd446a80b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java @@ -59,6 +59,15 @@ private static ParsedDocument parseDocument(int hash, DocumentMapper docMapper, }, TimeSeriesRoutingHashFieldMapper.encode(hash))); } + private static ParsedDocument parseDocument(String id, DocumentMapper docMapper, CheckedConsumer f) + throws IOException { + // Add the @timestamp field required by DataStreamTimestampFieldMapper for all time series indices + return docMapper.parse(source(id, b -> { + f.accept(b); + b.field("@timestamp", "2021-10-01"); + }, null)); + } + private static int getRoutingHash(ParsedDocument document) { BytesRef value = document.rootDoc().getBinaryValue(TimeSeriesRoutingHashFieldMapper.NAME); return TimeSeriesRoutingHashFieldMapper.decode(Uid.decodeId(value.bytes)); @@ -76,6 +85,17 @@ public void testEnabledInTimeSeriesMode() throws Exception { assertEquals(hash, getRoutingHash(doc)); } + public void testRetrievedFromIdInTimeSeriesMode() throws Exception { + DocumentMapper docMapper = createMapper(mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + + int hash = randomInt(); + ParsedDocument doc = parseDocument(TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE, docMapper, b -> b.field("a", "value")); + assertThat(doc.rootDoc().getField("a").binaryValue(), equalTo(new BytesRef("value"))); + assertEquals(0, getRoutingHash(doc)); + } + public void testDisabledInStandardMode() throws Exception { DocumentMapper docMapper = createMapperService( getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.name()).build(), From 12e04e12dad49c9a17bb66ac2bfcf1868f15dfcd Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 25 Mar 2024 19:13:35 +1100 Subject: [PATCH 33/79] Explicitly set number_of_shards to 1 in tests (#106707) Some tests rely on the default number_of_shards to be 1. This may not hold if the default number_of_shards changes. This PR removes that assumption in the tests by explicitly configuring the number_of_shards to 1 at index creation time. Relates: #100171 Relates: ES-7911 --- .../resources/rest-api-spec/test/aggregations/time_series.yml | 1 + .../resources/rest-api-spec/test/count/30_min_score.yml | 3 +++ .../test/search.vectors/160_knn_query_missing_params.yml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml index 9e8ec6b3f6768..421c0c5800949 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml @@ -9,6 +9,7 @@ setup: index: tsdb body: settings: + number_of_shards: 1 mode: time_series routing_path: [key] time_series: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/count/30_min_score.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/count/30_min_score.yml index 278a7095add5e..8bbfb5cff7ed3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/count/30_min_score.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/count/30_min_score.yml @@ -3,6 +3,9 @@ - do: indices.create: index: test_count_min_score + body: + settings: + number_of_shards: 1 - do: index: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml index 5194c95151eda..9ff6319a01af4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml @@ -6,6 +6,8 @@ setup: indices.create: index: knn_query_test_index body: + settings: + number_of_shards: 1 mappings: properties: vector: From 1b2ed4c7a2064178788bfb541b878f56cb34b085 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Mon, 25 Mar 2024 10:36:18 +0100 Subject: [PATCH 34/79] ESQL: Fix count pushdown for unmapped fields (#106690) Assume that unmapped fields are multi-valued per default. --- .../qa/testFixtures/src/main/resources/stats.csv-spec | 9 +++++++++ .../org/elasticsearch/xpack/esql/stats/SearchStats.java | 3 ++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 4aff4c689c077..917735040c61d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1217,3 +1217,12 @@ FROM airports c:l 891 ; + +countMV#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS vals = COUNT(salary_change.int) +; + +vals:l +183 +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java index c813308ea0443..679781a40c869 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java @@ -195,7 +195,8 @@ public boolean isSingleValue(String field) { if (exists(field) == false) { stat.singleValue = true; } else { - var sv = new boolean[] { true }; + // fields are MV per default + var sv = new boolean[] { false }; for (SearchContext context : contexts) { var sec = context.getSearchExecutionContext(); MappedFieldType mappedType = sec.isFieldMapped(field) ? null : sec.getFieldType(field); From 9ba2e5f7c53c78ee7044b8bc722d4d2110edc889 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 25 Mar 2024 12:06:47 +0100 Subject: [PATCH 35/79] Remove duplicate ForUtil (#105886) We copied this into our codebase twice. The version with customizable block size is unnecessary, we always use the same block size anyway. => no need to duplicate 1k lines. --- .../index/codec/{tsdb => }/ForUtil.java | 43 +- .../codec/postings/ES812PostingsFormat.java | 1 + .../codec/postings/ES812PostingsReader.java | 3 +- .../codec/postings/ES812PostingsWriter.java | 1 + .../index/codec/postings/ES812SkipReader.java | 1 + .../index/codec/postings/ForUtil.java | 1049 ----------------- .../index/codec/postings/PForUtil.java | 1 + .../index/codec/tsdb/DocValuesForUtil.java | 1 + .../codec/{postings => }/ForUtilTests.java | 2 +- .../codec/tsdb/DocValuesForUtilTests.java | 1 + 10 files changed, 25 insertions(+), 1078 deletions(-) rename server/src/main/java/org/elasticsearch/index/codec/{tsdb => }/ForUtil.java (97%) delete mode 100644 server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java rename server/src/test/java/org/elasticsearch/index/codec/{postings => }/ForUtilTests.java (98%) diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/ForUtil.java similarity index 97% rename from server/src/main/java/org/elasticsearch/index/codec/tsdb/ForUtil.java rename to server/src/main/java/org/elasticsearch/index/codec/ForUtil.java index 874b90a08b920..5687b0d1b687d 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ForUtil.java +++ b/server/src/main/java/org/elasticsearch/index/codec/ForUtil.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.index.codec.tsdb; +package org.elasticsearch.index.codec; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; @@ -20,20 +20,9 @@ // else we pack 2 ints per long public final class ForUtil { - static final int DEFAULT_BLOCK_SIZE = 128; - private final int blockSize; - private final int blockSizeLog2; - private final long[] tmp; - - public ForUtil() { - this(DEFAULT_BLOCK_SIZE); - } - - private ForUtil(int blockSize) { - this.blockSize = blockSize; - this.blockSizeLog2 = (int) (Math.log(blockSize) / Math.log(2)); - this.tmp = new long[blockSize / 2]; - } + public static final int BLOCK_SIZE = 128; + private static final int BLOCK_SIZE_LOG2 = 7; + private final long[] tmp = new long[BLOCK_SIZE / 2]; private static long expandMask32(long mask32) { return mask32 | (mask32 << 32); @@ -129,20 +118,20 @@ private static void collapse32(long[] arr) { } /** Encode 128 integers from {@code longs} into {@code out}. */ - void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException { + public void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException { final int nextPrimitive; final int numLongs; if (bitsPerValue <= 8) { nextPrimitive = 8; - numLongs = blockSize / 8; + numLongs = BLOCK_SIZE / 8; collapse8(longs); } else if (bitsPerValue <= 16) { nextPrimitive = 16; - numLongs = blockSize / 4; + numLongs = BLOCK_SIZE / 4; collapse16(longs); } else { nextPrimitive = 32; - numLongs = blockSize / 2; + numLongs = BLOCK_SIZE / 2; collapse32(longs); } @@ -202,11 +191,11 @@ void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException { } /** Number of bytes required to encode 128 integers of {@code bitsPerValue} bits per value. */ - int numBytes(int bitsPerValue) { - return bitsPerValue << (blockSizeLog2 - 3); + public int numBytes(int bitsPerValue) { + return bitsPerValue << (BLOCK_SIZE_LOG2 - 3); } - private static void decodeSlow(int blockSize, int bitsPerValue, DataInput in, long[] tmp, long[] longs) throws IOException { + private static void decodeSlow(int bitsPerValue, DataInput in, long[] tmp, long[] longs) throws IOException { final int numLongs = bitsPerValue << 1; in.readLongs(tmp, 0, numLongs); final long mask = MASKS32[bitsPerValue]; @@ -220,7 +209,7 @@ private static void decodeSlow(int blockSize, int bitsPerValue, DataInput in, lo final long mask32RemainingBitsPerLong = MASKS32[remainingBitsPerLong]; int tmpIdx = 0; int remainingBits = remainingBitsPerLong; - for (; longsIdx < blockSize / 2; ++longsIdx) { + for (; longsIdx < BLOCK_SIZE / 2; ++longsIdx) { int b = bitsPerValue - remainingBits; long l = (tmp[tmpIdx++] & MASKS32[remainingBits]) << b; while (b >= remainingBitsPerLong) { @@ -310,7 +299,7 @@ private static void shiftLongs(long[] a, int count, long[] b, int bi, int shift, private static final long MASK32_24 = MASKS32[24]; /** Decode 128 integers into {@code longs}. */ - void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { + public void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { switch (bitsPerValue) { case 1: decode1(in, tmp, longs); @@ -409,7 +398,7 @@ void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { expand32(longs); break; default: - decodeSlow(blockSize, bitsPerValue, in, tmp, longs); + decodeSlow(bitsPerValue, in, tmp, longs); expand32(longs); break; } @@ -421,7 +410,7 @@ void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { * [0..63], and values [64..127] are encoded in the low-order bits of {@code longs} [0..63]. This * representation may allow subsequent operations to be performed on two values at a time. */ - void decodeTo32(int bitsPerValue, DataInput in, long[] longs) throws IOException { + public void decodeTo32(int bitsPerValue, DataInput in, long[] longs) throws IOException { switch (bitsPerValue) { case 1: decode1(in, tmp, longs); @@ -512,7 +501,7 @@ void decodeTo32(int bitsPerValue, DataInput in, long[] longs) throws IOException decode24(in, tmp, longs); break; default: - decodeSlow(blockSize, bitsPerValue, in, tmp, longs); + decodeSlow(bitsPerValue, in, tmp, longs); break; } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java index 5270326876e08..6ccfaba7853f2 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java @@ -36,6 +36,7 @@ import org.apache.lucene.store.DataOutput; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.ForUtil; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java index 8b3d5d02a04c0..11bd90cd31610 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java @@ -36,11 +36,13 @@ import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.ForUtil; import org.elasticsearch.index.codec.postings.ES812PostingsFormat.IntBlockTermState; import java.io.IOException; import java.util.Arrays; +import static org.elasticsearch.index.codec.ForUtil.BLOCK_SIZE; import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.DOC_CODEC; import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.MAX_SKIP_LEVELS; import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.PAY_CODEC; @@ -48,7 +50,6 @@ import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.TERMS_CODEC; import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_CURRENT; import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_START; -import static org.elasticsearch.index.codec.postings.ForUtil.BLOCK_SIZE; /** * Concrete class that reads docId(maybe frq,pos,offset,payloads) list with postings format. diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java index 9ab7ed42efb09..cc95b4ffcfacf 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.ForUtil; import org.elasticsearch.index.codec.postings.ES812PostingsFormat.IntBlockTermState; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java index 11c0c611312fc..f9b36114361ca 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java @@ -21,6 +21,7 @@ import org.apache.lucene.codecs.MultiLevelSkipListReader; import org.apache.lucene.store.IndexInput; +import org.elasticsearch.index.codec.ForUtil; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java deleted file mode 100644 index d874caab1b8c0..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java +++ /dev/null @@ -1,1049 +0,0 @@ -/* - * @notice - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Modifications copyright (C) 2022 Elasticsearch B.V. - */ -package org.elasticsearch.index.codec.postings; - -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; - -import java.io.IOException; - -// Inspired from https://fulmicoton.com/posts/bitpacking/ -// Encodes multiple integers in a long to get SIMD-like speedups. -// If bitsPerValue <= 8 then we pack 8 ints per long -// else if bitsPerValue <= 16 we pack 4 ints per long -// else we pack 2 ints per long -final class ForUtil { - - static final int BLOCK_SIZE = 128; - private static final int BLOCK_SIZE_LOG2 = 7; - - private static long expandMask32(long mask32) { - return mask32 | (mask32 << 32); - } - - private static long expandMask16(long mask16) { - return expandMask32(mask16 | (mask16 << 16)); - } - - private static long expandMask8(long mask8) { - return expandMask16(mask8 | (mask8 << 8)); - } - - private static long mask32(int bitsPerValue) { - return expandMask32((1L << bitsPerValue) - 1); - } - - private static long mask16(int bitsPerValue) { - return expandMask16((1L << bitsPerValue) - 1); - } - - private static long mask8(int bitsPerValue) { - return expandMask8((1L << bitsPerValue) - 1); - } - - private static void expand8(long[] arr) { - for (int i = 0; i < 16; ++i) { - long l = arr[i]; - arr[i] = (l >>> 56) & 0xFFL; - arr[16 + i] = (l >>> 48) & 0xFFL; - arr[32 + i] = (l >>> 40) & 0xFFL; - arr[48 + i] = (l >>> 32) & 0xFFL; - arr[64 + i] = (l >>> 24) & 0xFFL; - arr[80 + i] = (l >>> 16) & 0xFFL; - arr[96 + i] = (l >>> 8) & 0xFFL; - arr[112 + i] = l & 0xFFL; - } - } - - private static void expand8To32(long[] arr) { - for (int i = 0; i < 16; ++i) { - long l = arr[i]; - arr[i] = (l >>> 24) & 0x000000FF000000FFL; - arr[16 + i] = (l >>> 16) & 0x000000FF000000FFL; - arr[32 + i] = (l >>> 8) & 0x000000FF000000FFL; - arr[48 + i] = l & 0x000000FF000000FFL; - } - } - - private static void collapse8(long[] arr) { - for (int i = 0; i < 16; ++i) { - arr[i] = (arr[i] << 56) | (arr[16 + i] << 48) | (arr[32 + i] << 40) | (arr[48 + i] << 32) | (arr[64 + i] << 24) | (arr[80 + i] - << 16) | (arr[96 + i] << 8) | arr[112 + i]; - } - } - - private static void expand16(long[] arr) { - for (int i = 0; i < 32; ++i) { - long l = arr[i]; - arr[i] = (l >>> 48) & 0xFFFFL; - arr[32 + i] = (l >>> 32) & 0xFFFFL; - arr[64 + i] = (l >>> 16) & 0xFFFFL; - arr[96 + i] = l & 0xFFFFL; - } - } - - private static void expand16To32(long[] arr) { - for (int i = 0; i < 32; ++i) { - long l = arr[i]; - arr[i] = (l >>> 16) & 0x0000FFFF0000FFFFL; - arr[32 + i] = l & 0x0000FFFF0000FFFFL; - } - } - - private static void collapse16(long[] arr) { - for (int i = 0; i < 32; ++i) { - arr[i] = (arr[i] << 48) | (arr[32 + i] << 32) | (arr[64 + i] << 16) | arr[96 + i]; - } - } - - private static void expand32(long[] arr) { - for (int i = 0; i < 64; ++i) { - long l = arr[i]; - arr[i] = l >>> 32; - arr[64 + i] = l & 0xFFFFFFFFL; - } - } - - private static void collapse32(long[] arr) { - for (int i = 0; i < 64; ++i) { - arr[i] = (arr[i] << 32) | arr[64 + i]; - } - } - - private final long[] tmp = new long[BLOCK_SIZE / 2]; - - /** Encode 128 integers from {@code longs} into {@code out}. */ - void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException { - final int nextPrimitive; - final int numLongs; - if (bitsPerValue <= 8) { - nextPrimitive = 8; - numLongs = BLOCK_SIZE / 8; - collapse8(longs); - } else if (bitsPerValue <= 16) { - nextPrimitive = 16; - numLongs = BLOCK_SIZE / 4; - collapse16(longs); - } else { - nextPrimitive = 32; - numLongs = BLOCK_SIZE / 2; - collapse32(longs); - } - - final int numLongsPerShift = bitsPerValue * 2; - int idx = 0; - int shift = nextPrimitive - bitsPerValue; - for (int i = 0; i < numLongsPerShift; ++i) { - tmp[i] = longs[idx++] << shift; - } - for (shift = shift - bitsPerValue; shift >= 0; shift -= bitsPerValue) { - for (int i = 0; i < numLongsPerShift; ++i) { - tmp[i] |= longs[idx++] << shift; - } - } - - final int remainingBitsPerLong = shift + bitsPerValue; - final long maskRemainingBitsPerLong; - if (nextPrimitive == 8) { - maskRemainingBitsPerLong = MASKS8[remainingBitsPerLong]; - } else if (nextPrimitive == 16) { - maskRemainingBitsPerLong = MASKS16[remainingBitsPerLong]; - } else { - maskRemainingBitsPerLong = MASKS32[remainingBitsPerLong]; - } - - int tmpIdx = 0; - int remainingBitsPerValue = bitsPerValue; - while (idx < numLongs) { - if (remainingBitsPerValue >= remainingBitsPerLong) { - remainingBitsPerValue -= remainingBitsPerLong; - tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & maskRemainingBitsPerLong; - if (remainingBitsPerValue == 0) { - idx++; - remainingBitsPerValue = bitsPerValue; - } - } else { - final long mask1, mask2; - if (nextPrimitive == 8) { - mask1 = MASKS8[remainingBitsPerValue]; - mask2 = MASKS8[remainingBitsPerLong - remainingBitsPerValue]; - } else if (nextPrimitive == 16) { - mask1 = MASKS16[remainingBitsPerValue]; - mask2 = MASKS16[remainingBitsPerLong - remainingBitsPerValue]; - } else { - mask1 = MASKS32[remainingBitsPerValue]; - mask2 = MASKS32[remainingBitsPerLong - remainingBitsPerValue]; - } - tmp[tmpIdx] |= (longs[idx++] & mask1) << (remainingBitsPerLong - remainingBitsPerValue); - remainingBitsPerValue = bitsPerValue - remainingBitsPerLong + remainingBitsPerValue; - tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & mask2; - } - } - - for (int i = 0; i < numLongsPerShift; ++i) { - out.writeLong(tmp[i]); - } - } - - /** Number of bytes required to encode 128 integers of {@code bitsPerValue} bits per value. */ - int numBytes(int bitsPerValue) { - return bitsPerValue << (BLOCK_SIZE_LOG2 - 3); - } - - private static void decodeSlow(int bitsPerValue, DataInput in, long[] tmp, long[] longs) throws IOException { - final int numLongs = bitsPerValue << 1; - in.readLongs(tmp, 0, numLongs); - final long mask = MASKS32[bitsPerValue]; - int longsIdx = 0; - int shift = 32 - bitsPerValue; - for (; shift >= 0; shift -= bitsPerValue) { - shiftLongs(tmp, numLongs, longs, longsIdx, shift, mask); - longsIdx += numLongs; - } - final int remainingBitsPerLong = shift + bitsPerValue; - final long mask32RemainingBitsPerLong = MASKS32[remainingBitsPerLong]; - int tmpIdx = 0; - int remainingBits = remainingBitsPerLong; - for (; longsIdx < BLOCK_SIZE / 2; ++longsIdx) { - int b = bitsPerValue - remainingBits; - long l = (tmp[tmpIdx++] & MASKS32[remainingBits]) << b; - while (b >= remainingBitsPerLong) { - b -= remainingBitsPerLong; - l |= (tmp[tmpIdx++] & mask32RemainingBitsPerLong) << b; - } - if (b > 0) { - l |= (tmp[tmpIdx] >>> (remainingBitsPerLong - b)) & MASKS32[b]; - remainingBits = remainingBitsPerLong - b; - } else { - remainingBits = remainingBitsPerLong; - } - longs[longsIdx] = l; - } - } - - /** - * The pattern that this shiftLongs method applies is recognized by the C2 compiler, which - * generates SIMD instructions for it in order to shift multiple longs at once. - */ - private static void shiftLongs(long[] a, int count, long[] b, int bi, int shift, long mask) { - for (int i = 0; i < count; ++i) { - b[bi + i] = (a[i] >>> shift) & mask; - } - } - - private static final long[] MASKS8 = new long[8]; - private static final long[] MASKS16 = new long[16]; - private static final long[] MASKS32 = new long[32]; - - static { - for (int i = 0; i < 8; ++i) { - MASKS8[i] = mask8(i); - } - for (int i = 0; i < 16; ++i) { - MASKS16[i] = mask16(i); - } - for (int i = 0; i < 32; ++i) { - MASKS32[i] = mask32(i); - } - } - - // mark values in array as final longs to avoid the cost of reading array, arrays should only be - // used when the idx is a variable - private static final long MASK8_1 = MASKS8[1]; - private static final long MASK8_2 = MASKS8[2]; - private static final long MASK8_3 = MASKS8[3]; - private static final long MASK8_4 = MASKS8[4]; - private static final long MASK8_5 = MASKS8[5]; - private static final long MASK8_6 = MASKS8[6]; - private static final long MASK8_7 = MASKS8[7]; - private static final long MASK16_1 = MASKS16[1]; - private static final long MASK16_2 = MASKS16[2]; - private static final long MASK16_3 = MASKS16[3]; - private static final long MASK16_4 = MASKS16[4]; - private static final long MASK16_5 = MASKS16[5]; - private static final long MASK16_6 = MASKS16[6]; - private static final long MASK16_7 = MASKS16[7]; - private static final long MASK16_9 = MASKS16[9]; - private static final long MASK16_10 = MASKS16[10]; - private static final long MASK16_11 = MASKS16[11]; - private static final long MASK16_12 = MASKS16[12]; - private static final long MASK16_13 = MASKS16[13]; - private static final long MASK16_14 = MASKS16[14]; - private static final long MASK16_15 = MASKS16[15]; - private static final long MASK32_1 = MASKS32[1]; - private static final long MASK32_2 = MASKS32[2]; - private static final long MASK32_3 = MASKS32[3]; - private static final long MASK32_4 = MASKS32[4]; - private static final long MASK32_5 = MASKS32[5]; - private static final long MASK32_6 = MASKS32[6]; - private static final long MASK32_7 = MASKS32[7]; - private static final long MASK32_8 = MASKS32[8]; - private static final long MASK32_9 = MASKS32[9]; - private static final long MASK32_10 = MASKS32[10]; - private static final long MASK32_11 = MASKS32[11]; - private static final long MASK32_12 = MASKS32[12]; - private static final long MASK32_13 = MASKS32[13]; - private static final long MASK32_14 = MASKS32[14]; - private static final long MASK32_15 = MASKS32[15]; - private static final long MASK32_17 = MASKS32[17]; - private static final long MASK32_18 = MASKS32[18]; - private static final long MASK32_19 = MASKS32[19]; - private static final long MASK32_20 = MASKS32[20]; - private static final long MASK32_21 = MASKS32[21]; - private static final long MASK32_22 = MASKS32[22]; - private static final long MASK32_23 = MASKS32[23]; - private static final long MASK32_24 = MASKS32[24]; - - /** Decode 128 integers into {@code longs}. */ - void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { - switch (bitsPerValue) { - case 1: - decode1(in, tmp, longs); - expand8(longs); - break; - case 2: - decode2(in, tmp, longs); - expand8(longs); - break; - case 3: - decode3(in, tmp, longs); - expand8(longs); - break; - case 4: - decode4(in, tmp, longs); - expand8(longs); - break; - case 5: - decode5(in, tmp, longs); - expand8(longs); - break; - case 6: - decode6(in, tmp, longs); - expand8(longs); - break; - case 7: - decode7(in, tmp, longs); - expand8(longs); - break; - case 8: - decode8(in, tmp, longs); - expand8(longs); - break; - case 9: - decode9(in, tmp, longs); - expand16(longs); - break; - case 10: - decode10(in, tmp, longs); - expand16(longs); - break; - case 11: - decode11(in, tmp, longs); - expand16(longs); - break; - case 12: - decode12(in, tmp, longs); - expand16(longs); - break; - case 13: - decode13(in, tmp, longs); - expand16(longs); - break; - case 14: - decode14(in, tmp, longs); - expand16(longs); - break; - case 15: - decode15(in, tmp, longs); - expand16(longs); - break; - case 16: - decode16(in, tmp, longs); - expand16(longs); - break; - case 17: - decode17(in, tmp, longs); - expand32(longs); - break; - case 18: - decode18(in, tmp, longs); - expand32(longs); - break; - case 19: - decode19(in, tmp, longs); - expand32(longs); - break; - case 20: - decode20(in, tmp, longs); - expand32(longs); - break; - case 21: - decode21(in, tmp, longs); - expand32(longs); - break; - case 22: - decode22(in, tmp, longs); - expand32(longs); - break; - case 23: - decode23(in, tmp, longs); - expand32(longs); - break; - case 24: - decode24(in, tmp, longs); - expand32(longs); - break; - default: - decodeSlow(bitsPerValue, in, tmp, longs); - expand32(longs); - break; - } - } - - /** - * Decodes 128 integers into 64 {@code longs} such that each long contains two values, each - * represented with 32 bits. Values [0..63] are encoded in the high-order bits of {@code longs} - * [0..63], and values [64..127] are encoded in the low-order bits of {@code longs} [0..63]. This - * representation may allow subsequent operations to be performed on two values at a time. - */ - void decodeTo32(int bitsPerValue, DataInput in, long[] longs) throws IOException { - switch (bitsPerValue) { - case 1: - decode1(in, tmp, longs); - expand8To32(longs); - break; - case 2: - decode2(in, tmp, longs); - expand8To32(longs); - break; - case 3: - decode3(in, tmp, longs); - expand8To32(longs); - break; - case 4: - decode4(in, tmp, longs); - expand8To32(longs); - break; - case 5: - decode5(in, tmp, longs); - expand8To32(longs); - break; - case 6: - decode6(in, tmp, longs); - expand8To32(longs); - break; - case 7: - decode7(in, tmp, longs); - expand8To32(longs); - break; - case 8: - decode8(in, tmp, longs); - expand8To32(longs); - break; - case 9: - decode9(in, tmp, longs); - expand16To32(longs); - break; - case 10: - decode10(in, tmp, longs); - expand16To32(longs); - break; - case 11: - decode11(in, tmp, longs); - expand16To32(longs); - break; - case 12: - decode12(in, tmp, longs); - expand16To32(longs); - break; - case 13: - decode13(in, tmp, longs); - expand16To32(longs); - break; - case 14: - decode14(in, tmp, longs); - expand16To32(longs); - break; - case 15: - decode15(in, tmp, longs); - expand16To32(longs); - break; - case 16: - decode16(in, tmp, longs); - expand16To32(longs); - break; - case 17: - decode17(in, tmp, longs); - break; - case 18: - decode18(in, tmp, longs); - break; - case 19: - decode19(in, tmp, longs); - break; - case 20: - decode20(in, tmp, longs); - break; - case 21: - decode21(in, tmp, longs); - break; - case 22: - decode22(in, tmp, longs); - break; - case 23: - decode23(in, tmp, longs); - break; - case 24: - decode24(in, tmp, longs); - break; - default: - decodeSlow(bitsPerValue, in, tmp, longs); - break; - } - } - - private static void decode1(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 2); - shiftLongs(tmp, 2, longs, 0, 7, MASK8_1); - shiftLongs(tmp, 2, longs, 2, 6, MASK8_1); - shiftLongs(tmp, 2, longs, 4, 5, MASK8_1); - shiftLongs(tmp, 2, longs, 6, 4, MASK8_1); - shiftLongs(tmp, 2, longs, 8, 3, MASK8_1); - shiftLongs(tmp, 2, longs, 10, 2, MASK8_1); - shiftLongs(tmp, 2, longs, 12, 1, MASK8_1); - shiftLongs(tmp, 2, longs, 14, 0, MASK8_1); - } - - private static void decode2(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 4); - shiftLongs(tmp, 4, longs, 0, 6, MASK8_2); - shiftLongs(tmp, 4, longs, 4, 4, MASK8_2); - shiftLongs(tmp, 4, longs, 8, 2, MASK8_2); - shiftLongs(tmp, 4, longs, 12, 0, MASK8_2); - } - - private static void decode3(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 6); - shiftLongs(tmp, 6, longs, 0, 5, MASK8_3); - shiftLongs(tmp, 6, longs, 6, 2, MASK8_3); - for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 2; ++iter, tmpIdx += 3, longsIdx += 2) { - long l0 = (tmp[tmpIdx + 0] & MASK8_2) << 1; - l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_1; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 2; - l1 |= (tmp[tmpIdx + 2] & MASK8_2) << 0; - longs[longsIdx + 1] = l1; - } - } - - private static void decode4(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 8); - shiftLongs(tmp, 8, longs, 0, 4, MASK8_4); - shiftLongs(tmp, 8, longs, 8, 0, MASK8_4); - } - - private static void decode5(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 10); - shiftLongs(tmp, 10, longs, 0, 3, MASK8_5); - for (int iter = 0, tmpIdx = 0, longsIdx = 10; iter < 2; ++iter, tmpIdx += 5, longsIdx += 3) { - long l0 = (tmp[tmpIdx + 0] & MASK8_3) << 2; - l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_2; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 4; - l1 |= (tmp[tmpIdx + 2] & MASK8_3) << 1; - l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK8_1; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 3] & MASK8_2) << 3; - l2 |= (tmp[tmpIdx + 4] & MASK8_3) << 0; - longs[longsIdx + 2] = l2; - } - } - - private static void decode6(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 12); - shiftLongs(tmp, 12, longs, 0, 2, MASK8_6); - shiftLongs(tmp, 12, tmp, 0, 0, MASK8_2); - for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 4; ++iter, tmpIdx += 3, longsIdx += 1) { - long l0 = tmp[tmpIdx + 0] << 4; - l0 |= tmp[tmpIdx + 1] << 2; - l0 |= tmp[tmpIdx + 2] << 0; - longs[longsIdx + 0] = l0; - } - } - - private static void decode7(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 14); - shiftLongs(tmp, 14, longs, 0, 1, MASK8_7); - shiftLongs(tmp, 14, tmp, 0, 0, MASK8_1); - for (int iter = 0, tmpIdx = 0, longsIdx = 14; iter < 2; ++iter, tmpIdx += 7, longsIdx += 1) { - long l0 = tmp[tmpIdx + 0] << 6; - l0 |= tmp[tmpIdx + 1] << 5; - l0 |= tmp[tmpIdx + 2] << 4; - l0 |= tmp[tmpIdx + 3] << 3; - l0 |= tmp[tmpIdx + 4] << 2; - l0 |= tmp[tmpIdx + 5] << 1; - l0 |= tmp[tmpIdx + 6] << 0; - longs[longsIdx + 0] = l0; - } - } - - private static void decode8(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(longs, 0, 16); - } - - private static void decode9(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 18); - shiftLongs(tmp, 18, longs, 0, 7, MASK16_9); - for (int iter = 0, tmpIdx = 0, longsIdx = 18; iter < 2; ++iter, tmpIdx += 9, longsIdx += 7) { - long l0 = (tmp[tmpIdx + 0] & MASK16_7) << 2; - l0 |= (tmp[tmpIdx + 1] >>> 5) & MASK16_2; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK16_5) << 4; - l1 |= (tmp[tmpIdx + 2] >>> 3) & MASK16_4; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 2] & MASK16_3) << 6; - l2 |= (tmp[tmpIdx + 3] >>> 1) & MASK16_6; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 3] & MASK16_1) << 8; - l3 |= (tmp[tmpIdx + 4] & MASK16_7) << 1; - l3 |= (tmp[tmpIdx + 5] >>> 6) & MASK16_1; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 5] & MASK16_6) << 3; - l4 |= (tmp[tmpIdx + 6] >>> 4) & MASK16_3; - longs[longsIdx + 4] = l4; - long l5 = (tmp[tmpIdx + 6] & MASK16_4) << 5; - l5 |= (tmp[tmpIdx + 7] >>> 2) & MASK16_5; - longs[longsIdx + 5] = l5; - long l6 = (tmp[tmpIdx + 7] & MASK16_2) << 7; - l6 |= (tmp[tmpIdx + 8] & MASK16_7) << 0; - longs[longsIdx + 6] = l6; - } - } - - private static void decode10(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 20); - shiftLongs(tmp, 20, longs, 0, 6, MASK16_10); - for (int iter = 0, tmpIdx = 0, longsIdx = 20; iter < 4; ++iter, tmpIdx += 5, longsIdx += 3) { - long l0 = (tmp[tmpIdx + 0] & MASK16_6) << 4; - l0 |= (tmp[tmpIdx + 1] >>> 2) & MASK16_4; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK16_2) << 8; - l1 |= (tmp[tmpIdx + 2] & MASK16_6) << 2; - l1 |= (tmp[tmpIdx + 3] >>> 4) & MASK16_2; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 3] & MASK16_4) << 6; - l2 |= (tmp[tmpIdx + 4] & MASK16_6) << 0; - longs[longsIdx + 2] = l2; - } - } - - private static void decode11(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 22); - shiftLongs(tmp, 22, longs, 0, 5, MASK16_11); - for (int iter = 0, tmpIdx = 0, longsIdx = 22; iter < 2; ++iter, tmpIdx += 11, longsIdx += 5) { - long l0 = (tmp[tmpIdx + 0] & MASK16_5) << 6; - l0 |= (tmp[tmpIdx + 1] & MASK16_5) << 1; - l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK16_1; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 2] & MASK16_4) << 7; - l1 |= (tmp[tmpIdx + 3] & MASK16_5) << 2; - l1 |= (tmp[tmpIdx + 4] >>> 3) & MASK16_2; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 4] & MASK16_3) << 8; - l2 |= (tmp[tmpIdx + 5] & MASK16_5) << 3; - l2 |= (tmp[tmpIdx + 6] >>> 2) & MASK16_3; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 6] & MASK16_2) << 9; - l3 |= (tmp[tmpIdx + 7] & MASK16_5) << 4; - l3 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_4; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 8] & MASK16_1) << 10; - l4 |= (tmp[tmpIdx + 9] & MASK16_5) << 5; - l4 |= (tmp[tmpIdx + 10] & MASK16_5) << 0; - longs[longsIdx + 4] = l4; - } - } - - private static void decode12(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 24); - shiftLongs(tmp, 24, longs, 0, 4, MASK16_12); - shiftLongs(tmp, 24, tmp, 0, 0, MASK16_4); - for (int iter = 0, tmpIdx = 0, longsIdx = 24; iter < 8; ++iter, tmpIdx += 3, longsIdx += 1) { - long l0 = tmp[tmpIdx + 0] << 8; - l0 |= tmp[tmpIdx + 1] << 4; - l0 |= tmp[tmpIdx + 2] << 0; - longs[longsIdx + 0] = l0; - } - } - - private static void decode13(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 26); - shiftLongs(tmp, 26, longs, 0, 3, MASK16_13); - for (int iter = 0, tmpIdx = 0, longsIdx = 26; iter < 2; ++iter, tmpIdx += 13, longsIdx += 3) { - long l0 = (tmp[tmpIdx + 0] & MASK16_3) << 10; - l0 |= (tmp[tmpIdx + 1] & MASK16_3) << 7; - l0 |= (tmp[tmpIdx + 2] & MASK16_3) << 4; - l0 |= (tmp[tmpIdx + 3] & MASK16_3) << 1; - l0 |= (tmp[tmpIdx + 4] >>> 2) & MASK16_1; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 4] & MASK16_2) << 11; - l1 |= (tmp[tmpIdx + 5] & MASK16_3) << 8; - l1 |= (tmp[tmpIdx + 6] & MASK16_3) << 5; - l1 |= (tmp[tmpIdx + 7] & MASK16_3) << 2; - l1 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_2; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 8] & MASK16_1) << 12; - l2 |= (tmp[tmpIdx + 9] & MASK16_3) << 9; - l2 |= (tmp[tmpIdx + 10] & MASK16_3) << 6; - l2 |= (tmp[tmpIdx + 11] & MASK16_3) << 3; - l2 |= (tmp[tmpIdx + 12] & MASK16_3) << 0; - longs[longsIdx + 2] = l2; - } - } - - private static void decode14(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 28); - shiftLongs(tmp, 28, longs, 0, 2, MASK16_14); - shiftLongs(tmp, 28, tmp, 0, 0, MASK16_2); - for (int iter = 0, tmpIdx = 0, longsIdx = 28; iter < 4; ++iter, tmpIdx += 7, longsIdx += 1) { - long l0 = tmp[tmpIdx + 0] << 12; - l0 |= tmp[tmpIdx + 1] << 10; - l0 |= tmp[tmpIdx + 2] << 8; - l0 |= tmp[tmpIdx + 3] << 6; - l0 |= tmp[tmpIdx + 4] << 4; - l0 |= tmp[tmpIdx + 5] << 2; - l0 |= tmp[tmpIdx + 6] << 0; - longs[longsIdx + 0] = l0; - } - } - - private static void decode15(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 30); - shiftLongs(tmp, 30, longs, 0, 1, MASK16_15); - shiftLongs(tmp, 30, tmp, 0, 0, MASK16_1); - for (int iter = 0, tmpIdx = 0, longsIdx = 30; iter < 2; ++iter, tmpIdx += 15, longsIdx += 1) { - long l0 = tmp[tmpIdx + 0] << 14; - l0 |= tmp[tmpIdx + 1] << 13; - l0 |= tmp[tmpIdx + 2] << 12; - l0 |= tmp[tmpIdx + 3] << 11; - l0 |= tmp[tmpIdx + 4] << 10; - l0 |= tmp[tmpIdx + 5] << 9; - l0 |= tmp[tmpIdx + 6] << 8; - l0 |= tmp[tmpIdx + 7] << 7; - l0 |= tmp[tmpIdx + 8] << 6; - l0 |= tmp[tmpIdx + 9] << 5; - l0 |= tmp[tmpIdx + 10] << 4; - l0 |= tmp[tmpIdx + 11] << 3; - l0 |= tmp[tmpIdx + 12] << 2; - l0 |= tmp[tmpIdx + 13] << 1; - l0 |= tmp[tmpIdx + 14] << 0; - longs[longsIdx + 0] = l0; - } - } - - private static void decode16(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(longs, 0, 32); - } - - private static void decode17(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 34); - shiftLongs(tmp, 34, longs, 0, 15, MASK32_17); - for (int iter = 0, tmpIdx = 0, longsIdx = 34; iter < 2; ++iter, tmpIdx += 17, longsIdx += 15) { - long l0 = (tmp[tmpIdx + 0] & MASK32_15) << 2; - l0 |= (tmp[tmpIdx + 1] >>> 13) & MASK32_2; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK32_13) << 4; - l1 |= (tmp[tmpIdx + 2] >>> 11) & MASK32_4; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 2] & MASK32_11) << 6; - l2 |= (tmp[tmpIdx + 3] >>> 9) & MASK32_6; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 3] & MASK32_9) << 8; - l3 |= (tmp[tmpIdx + 4] >>> 7) & MASK32_8; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 4] & MASK32_7) << 10; - l4 |= (tmp[tmpIdx + 5] >>> 5) & MASK32_10; - longs[longsIdx + 4] = l4; - long l5 = (tmp[tmpIdx + 5] & MASK32_5) << 12; - l5 |= (tmp[tmpIdx + 6] >>> 3) & MASK32_12; - longs[longsIdx + 5] = l5; - long l6 = (tmp[tmpIdx + 6] & MASK32_3) << 14; - l6 |= (tmp[tmpIdx + 7] >>> 1) & MASK32_14; - longs[longsIdx + 6] = l6; - long l7 = (tmp[tmpIdx + 7] & MASK32_1) << 16; - l7 |= (tmp[tmpIdx + 8] & MASK32_15) << 1; - l7 |= (tmp[tmpIdx + 9] >>> 14) & MASK32_1; - longs[longsIdx + 7] = l7; - long l8 = (tmp[tmpIdx + 9] & MASK32_14) << 3; - l8 |= (tmp[tmpIdx + 10] >>> 12) & MASK32_3; - longs[longsIdx + 8] = l8; - long l9 = (tmp[tmpIdx + 10] & MASK32_12) << 5; - l9 |= (tmp[tmpIdx + 11] >>> 10) & MASK32_5; - longs[longsIdx + 9] = l9; - long l10 = (tmp[tmpIdx + 11] & MASK32_10) << 7; - l10 |= (tmp[tmpIdx + 12] >>> 8) & MASK32_7; - longs[longsIdx + 10] = l10; - long l11 = (tmp[tmpIdx + 12] & MASK32_8) << 9; - l11 |= (tmp[tmpIdx + 13] >>> 6) & MASK32_9; - longs[longsIdx + 11] = l11; - long l12 = (tmp[tmpIdx + 13] & MASK32_6) << 11; - l12 |= (tmp[tmpIdx + 14] >>> 4) & MASK32_11; - longs[longsIdx + 12] = l12; - long l13 = (tmp[tmpIdx + 14] & MASK32_4) << 13; - l13 |= (tmp[tmpIdx + 15] >>> 2) & MASK32_13; - longs[longsIdx + 13] = l13; - long l14 = (tmp[tmpIdx + 15] & MASK32_2) << 15; - l14 |= (tmp[tmpIdx + 16] & MASK32_15) << 0; - longs[longsIdx + 14] = l14; - } - } - - private static void decode18(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 36); - shiftLongs(tmp, 36, longs, 0, 14, MASK32_18); - for (int iter = 0, tmpIdx = 0, longsIdx = 36; iter < 4; ++iter, tmpIdx += 9, longsIdx += 7) { - long l0 = (tmp[tmpIdx + 0] & MASK32_14) << 4; - l0 |= (tmp[tmpIdx + 1] >>> 10) & MASK32_4; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK32_10) << 8; - l1 |= (tmp[tmpIdx + 2] >>> 6) & MASK32_8; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 2] & MASK32_6) << 12; - l2 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_12; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 3] & MASK32_2) << 16; - l3 |= (tmp[tmpIdx + 4] & MASK32_14) << 2; - l3 |= (tmp[tmpIdx + 5] >>> 12) & MASK32_2; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 5] & MASK32_12) << 6; - l4 |= (tmp[tmpIdx + 6] >>> 8) & MASK32_6; - longs[longsIdx + 4] = l4; - long l5 = (tmp[tmpIdx + 6] & MASK32_8) << 10; - l5 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_10; - longs[longsIdx + 5] = l5; - long l6 = (tmp[tmpIdx + 7] & MASK32_4) << 14; - l6 |= (tmp[tmpIdx + 8] & MASK32_14) << 0; - longs[longsIdx + 6] = l6; - } - } - - private static void decode19(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 38); - shiftLongs(tmp, 38, longs, 0, 13, MASK32_19); - for (int iter = 0, tmpIdx = 0, longsIdx = 38; iter < 2; ++iter, tmpIdx += 19, longsIdx += 13) { - long l0 = (tmp[tmpIdx + 0] & MASK32_13) << 6; - l0 |= (tmp[tmpIdx + 1] >>> 7) & MASK32_6; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK32_7) << 12; - l1 |= (tmp[tmpIdx + 2] >>> 1) & MASK32_12; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 2] & MASK32_1) << 18; - l2 |= (tmp[tmpIdx + 3] & MASK32_13) << 5; - l2 |= (tmp[tmpIdx + 4] >>> 8) & MASK32_5; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 4] & MASK32_8) << 11; - l3 |= (tmp[tmpIdx + 5] >>> 2) & MASK32_11; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 5] & MASK32_2) << 17; - l4 |= (tmp[tmpIdx + 6] & MASK32_13) << 4; - l4 |= (tmp[tmpIdx + 7] >>> 9) & MASK32_4; - longs[longsIdx + 4] = l4; - long l5 = (tmp[tmpIdx + 7] & MASK32_9) << 10; - l5 |= (tmp[tmpIdx + 8] >>> 3) & MASK32_10; - longs[longsIdx + 5] = l5; - long l6 = (tmp[tmpIdx + 8] & MASK32_3) << 16; - l6 |= (tmp[tmpIdx + 9] & MASK32_13) << 3; - l6 |= (tmp[tmpIdx + 10] >>> 10) & MASK32_3; - longs[longsIdx + 6] = l6; - long l7 = (tmp[tmpIdx + 10] & MASK32_10) << 9; - l7 |= (tmp[tmpIdx + 11] >>> 4) & MASK32_9; - longs[longsIdx + 7] = l7; - long l8 = (tmp[tmpIdx + 11] & MASK32_4) << 15; - l8 |= (tmp[tmpIdx + 12] & MASK32_13) << 2; - l8 |= (tmp[tmpIdx + 13] >>> 11) & MASK32_2; - longs[longsIdx + 8] = l8; - long l9 = (tmp[tmpIdx + 13] & MASK32_11) << 8; - l9 |= (tmp[tmpIdx + 14] >>> 5) & MASK32_8; - longs[longsIdx + 9] = l9; - long l10 = (tmp[tmpIdx + 14] & MASK32_5) << 14; - l10 |= (tmp[tmpIdx + 15] & MASK32_13) << 1; - l10 |= (tmp[tmpIdx + 16] >>> 12) & MASK32_1; - longs[longsIdx + 10] = l10; - long l11 = (tmp[tmpIdx + 16] & MASK32_12) << 7; - l11 |= (tmp[tmpIdx + 17] >>> 6) & MASK32_7; - longs[longsIdx + 11] = l11; - long l12 = (tmp[tmpIdx + 17] & MASK32_6) << 13; - l12 |= (tmp[tmpIdx + 18] & MASK32_13) << 0; - longs[longsIdx + 12] = l12; - } - } - - private static void decode20(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 40); - shiftLongs(tmp, 40, longs, 0, 12, MASK32_20); - for (int iter = 0, tmpIdx = 0, longsIdx = 40; iter < 8; ++iter, tmpIdx += 5, longsIdx += 3) { - long l0 = (tmp[tmpIdx + 0] & MASK32_12) << 8; - l0 |= (tmp[tmpIdx + 1] >>> 4) & MASK32_8; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK32_4) << 16; - l1 |= (tmp[tmpIdx + 2] & MASK32_12) << 4; - l1 |= (tmp[tmpIdx + 3] >>> 8) & MASK32_4; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 3] & MASK32_8) << 12; - l2 |= (tmp[tmpIdx + 4] & MASK32_12) << 0; - longs[longsIdx + 2] = l2; - } - } - - private static void decode21(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 42); - shiftLongs(tmp, 42, longs, 0, 11, MASK32_21); - for (int iter = 0, tmpIdx = 0, longsIdx = 42; iter < 2; ++iter, tmpIdx += 21, longsIdx += 11) { - long l0 = (tmp[tmpIdx + 0] & MASK32_11) << 10; - l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK32_10; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 1] & MASK32_1) << 20; - l1 |= (tmp[tmpIdx + 2] & MASK32_11) << 9; - l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_9; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 3] & MASK32_2) << 19; - l2 |= (tmp[tmpIdx + 4] & MASK32_11) << 8; - l2 |= (tmp[tmpIdx + 5] >>> 3) & MASK32_8; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 5] & MASK32_3) << 18; - l3 |= (tmp[tmpIdx + 6] & MASK32_11) << 7; - l3 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_7; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 7] & MASK32_4) << 17; - l4 |= (tmp[tmpIdx + 8] & MASK32_11) << 6; - l4 |= (tmp[tmpIdx + 9] >>> 5) & MASK32_6; - longs[longsIdx + 4] = l4; - long l5 = (tmp[tmpIdx + 9] & MASK32_5) << 16; - l5 |= (tmp[tmpIdx + 10] & MASK32_11) << 5; - l5 |= (tmp[tmpIdx + 11] >>> 6) & MASK32_5; - longs[longsIdx + 5] = l5; - long l6 = (tmp[tmpIdx + 11] & MASK32_6) << 15; - l6 |= (tmp[tmpIdx + 12] & MASK32_11) << 4; - l6 |= (tmp[tmpIdx + 13] >>> 7) & MASK32_4; - longs[longsIdx + 6] = l6; - long l7 = (tmp[tmpIdx + 13] & MASK32_7) << 14; - l7 |= (tmp[tmpIdx + 14] & MASK32_11) << 3; - l7 |= (tmp[tmpIdx + 15] >>> 8) & MASK32_3; - longs[longsIdx + 7] = l7; - long l8 = (tmp[tmpIdx + 15] & MASK32_8) << 13; - l8 |= (tmp[tmpIdx + 16] & MASK32_11) << 2; - l8 |= (tmp[tmpIdx + 17] >>> 9) & MASK32_2; - longs[longsIdx + 8] = l8; - long l9 = (tmp[tmpIdx + 17] & MASK32_9) << 12; - l9 |= (tmp[tmpIdx + 18] & MASK32_11) << 1; - l9 |= (tmp[tmpIdx + 19] >>> 10) & MASK32_1; - longs[longsIdx + 9] = l9; - long l10 = (tmp[tmpIdx + 19] & MASK32_10) << 11; - l10 |= (tmp[tmpIdx + 20] & MASK32_11) << 0; - longs[longsIdx + 10] = l10; - } - } - - private static void decode22(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 44); - shiftLongs(tmp, 44, longs, 0, 10, MASK32_22); - for (int iter = 0, tmpIdx = 0, longsIdx = 44; iter < 4; ++iter, tmpIdx += 11, longsIdx += 5) { - long l0 = (tmp[tmpIdx + 0] & MASK32_10) << 12; - l0 |= (tmp[tmpIdx + 1] & MASK32_10) << 2; - l0 |= (tmp[tmpIdx + 2] >>> 8) & MASK32_2; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 2] & MASK32_8) << 14; - l1 |= (tmp[tmpIdx + 3] & MASK32_10) << 4; - l1 |= (tmp[tmpIdx + 4] >>> 6) & MASK32_4; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 4] & MASK32_6) << 16; - l2 |= (tmp[tmpIdx + 5] & MASK32_10) << 6; - l2 |= (tmp[tmpIdx + 6] >>> 4) & MASK32_6; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 6] & MASK32_4) << 18; - l3 |= (tmp[tmpIdx + 7] & MASK32_10) << 8; - l3 |= (tmp[tmpIdx + 8] >>> 2) & MASK32_8; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 8] & MASK32_2) << 20; - l4 |= (tmp[tmpIdx + 9] & MASK32_10) << 10; - l4 |= (tmp[tmpIdx + 10] & MASK32_10) << 0; - longs[longsIdx + 4] = l4; - } - } - - private static void decode23(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 46); - shiftLongs(tmp, 46, longs, 0, 9, MASK32_23); - for (int iter = 0, tmpIdx = 0, longsIdx = 46; iter < 2; ++iter, tmpIdx += 23, longsIdx += 9) { - long l0 = (tmp[tmpIdx + 0] & MASK32_9) << 14; - l0 |= (tmp[tmpIdx + 1] & MASK32_9) << 5; - l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK32_5; - longs[longsIdx + 0] = l0; - long l1 = (tmp[tmpIdx + 2] & MASK32_4) << 19; - l1 |= (tmp[tmpIdx + 3] & MASK32_9) << 10; - l1 |= (tmp[tmpIdx + 4] & MASK32_9) << 1; - l1 |= (tmp[tmpIdx + 5] >>> 8) & MASK32_1; - longs[longsIdx + 1] = l1; - long l2 = (tmp[tmpIdx + 5] & MASK32_8) << 15; - l2 |= (tmp[tmpIdx + 6] & MASK32_9) << 6; - l2 |= (tmp[tmpIdx + 7] >>> 3) & MASK32_6; - longs[longsIdx + 2] = l2; - long l3 = (tmp[tmpIdx + 7] & MASK32_3) << 20; - l3 |= (tmp[tmpIdx + 8] & MASK32_9) << 11; - l3 |= (tmp[tmpIdx + 9] & MASK32_9) << 2; - l3 |= (tmp[tmpIdx + 10] >>> 7) & MASK32_2; - longs[longsIdx + 3] = l3; - long l4 = (tmp[tmpIdx + 10] & MASK32_7) << 16; - l4 |= (tmp[tmpIdx + 11] & MASK32_9) << 7; - l4 |= (tmp[tmpIdx + 12] >>> 2) & MASK32_7; - longs[longsIdx + 4] = l4; - long l5 = (tmp[tmpIdx + 12] & MASK32_2) << 21; - l5 |= (tmp[tmpIdx + 13] & MASK32_9) << 12; - l5 |= (tmp[tmpIdx + 14] & MASK32_9) << 3; - l5 |= (tmp[tmpIdx + 15] >>> 6) & MASK32_3; - longs[longsIdx + 5] = l5; - long l6 = (tmp[tmpIdx + 15] & MASK32_6) << 17; - l6 |= (tmp[tmpIdx + 16] & MASK32_9) << 8; - l6 |= (tmp[tmpIdx + 17] >>> 1) & MASK32_8; - longs[longsIdx + 6] = l6; - long l7 = (tmp[tmpIdx + 17] & MASK32_1) << 22; - l7 |= (tmp[tmpIdx + 18] & MASK32_9) << 13; - l7 |= (tmp[tmpIdx + 19] & MASK32_9) << 4; - l7 |= (tmp[tmpIdx + 20] >>> 5) & MASK32_4; - longs[longsIdx + 7] = l7; - long l8 = (tmp[tmpIdx + 20] & MASK32_5) << 18; - l8 |= (tmp[tmpIdx + 21] & MASK32_9) << 9; - l8 |= (tmp[tmpIdx + 22] & MASK32_9) << 0; - longs[longsIdx + 8] = l8; - } - } - - private static void decode24(DataInput in, long[] tmp, long[] longs) throws IOException { - in.readLongs(tmp, 0, 48); - shiftLongs(tmp, 48, longs, 0, 8, MASK32_24); - shiftLongs(tmp, 48, tmp, 0, 0, MASK32_8); - for (int iter = 0, tmpIdx = 0, longsIdx = 48; iter < 16; ++iter, tmpIdx += 3, longsIdx += 1) { - long l0 = tmp[tmpIdx + 0] << 16; - l0 |= tmp[tmpIdx + 1] << 8; - l0 |= tmp[tmpIdx + 2] << 0; - longs[longsIdx + 0] = l0; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java index 26a600c73eeb5..46ab0b0d00671 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java @@ -23,6 +23,7 @@ import org.apache.lucene.store.DataOutput; import org.apache.lucene.util.LongHeap; import org.apache.lucene.util.packed.PackedInts; +import org.elasticsearch.index.codec.ForUtil; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtil.java index 873dcc9b87207..303c66309a23f 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtil.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtil.java @@ -11,6 +11,7 @@ import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.index.codec.ForUtil; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java b/server/src/test/java/org/elasticsearch/index/codec/ForUtilTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java rename to server/src/test/java/org/elasticsearch/index/codec/ForUtilTests.java index 14e8d3344c3dc..5d9052203c5f4 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/ForUtilTests.java @@ -17,7 +17,7 @@ * * Modifications copyright (C) 2022 Elasticsearch B.V. */ -package org.elasticsearch.index.codec.postings; +package org.elasticsearch.index.codec; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; diff --git a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtilTests.java b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtilTests.java index ec8308404a118..f766d2148a96e 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtilTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtilTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.packed.PackedInts; +import org.elasticsearch.index.codec.ForUtil; import java.io.IOException; import java.util.Arrays; From 42fae6bf4ccb9c168d0f6b55c03eff06acf1b9cc Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Mon, 25 Mar 2024 11:14:49 +0000 Subject: [PATCH 36/79] Skip logging when no existing autosharding event (#106711) This skips rather confusing logging like ``` Rolling over data stream [logs-mysql.error-default] using existing auto-sharding recommendation [null] ``` Will only log this statement when there actually is an active auto sharding event that's being used in the rollover process. --- .../rollover/MetadataRolloverService.java | 26 ++++++------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 6645de880ad86..b03353a11793f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -292,12 +292,14 @@ private RolloverResult rolloverDataStream( DataStreamAutoShardingEvent dataStreamAutoShardingEvent = autoShardingResult == null ? dataStream.getAutoShardingEvent() : switch (autoShardingResult.type()) { - case NO_CHANGE_REQUIRED -> { - logger.info( - "Rolling over data stream [{}] using existing auto-sharding recommendation [{}]", - dataStreamName, - dataStream.getAutoShardingEvent() - ); + case NO_CHANGE_REQUIRED, COOLDOWN_PREVENTED_INCREASE, COOLDOWN_PREVENTED_DECREASE -> { + if (dataStream.getAutoShardingEvent() != null) { + logger.info( + "Rolling over data stream [{}] using existing auto-sharding recommendation [{}]", + dataStreamName, + dataStream.getAutoShardingEvent() + ); + } yield dataStream.getAutoShardingEvent(); } case INCREASE_SHARDS, DECREASE_SHARDS -> { @@ -308,18 +310,6 @@ yield new DataStreamAutoShardingEvent( now.toEpochMilli() ); } - case COOLDOWN_PREVENTED_INCREASE, COOLDOWN_PREVENTED_DECREASE -> { - // we're in the cooldown period for this particular recommendation so perhaps use a previous autosharding - // recommendation (or the value configured in the backing index template otherwise) - if (dataStream.getAutoShardingEvent() != null) { - logger.info( - "Rolling over data stream [{}] using existing auto-sharding recommendation [{}]", - dataStreamName, - dataStream.getAutoShardingEvent() - ); - } - yield dataStream.getAutoShardingEvent(); - } // data sharding might not be available due to the feature not being available/enabled or due to cluster level excludes // being configured. the index template will dictate the number of shards as usual case NOT_APPLICABLE -> { From c15f727f2d86cc82febc4e3efce9d45b8e53f2c0 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Mon, 25 Mar 2024 12:16:18 +0100 Subject: [PATCH 37/79] Update Gradle wrapper to 8.7 (#105854) removed tiny issue on build script that seems like an oversight and doesn't do anything at all --- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- .../src/main/resources/minimumGradleVersion | 2 +- gradle/wrapper/gradle-wrapper.jar | Bin 43462 -> 43453 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- x-pack/plugin/ent-search/build.gradle | 2 -- 6 files changed, 7 insertions(+), 9 deletions(-) diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index 865f1ba80d1e6..fcbbad6dd644c 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip +distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index f043ef362390f..631c6d36a93a4 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.6 \ No newline at end of file +8.7 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index d64cd4917707c1f8861d8cb53dd15194d4248596..e6441136f3d4ba8a0da8d277868979cfbc8ad796 100644 GIT binary patch delta 34118 zcmY(qRX`kF)3u#IAjsf0xCD212@LM;?(PINyAue(f;$XO2=4Cg1P$=#e%|lo zKk1`B>Q#GH)wNd-&cJofz}3=WfYndTeo)CyX{fOHsQjGa<{e=jamMNwjdatD={CN3>GNchOE9OGPIqr)3v>RcKWR3Z zF-guIMjE2UF0Wqk1)21791y#}ciBI*bAenY*BMW_)AeSuM5}vz_~`+1i!Lo?XAEq{TlK5-efNFgHr6o zD>^vB&%3ZGEWMS>`?tu!@66|uiDvS5`?bF=gIq3rkK(j<_TybyoaDHg8;Y#`;>tXI z=tXo~e9{U!*hqTe#nZjW4z0mP8A9UUv1}C#R*@yu9G3k;`Me0-BA2&Aw6f`{Ozan2 z8c8Cs#dA-7V)ZwcGKH}jW!Ja&VaUc@mu5a@CObzNot?b{f+~+212lwF;!QKI16FDS zodx>XN$sk9;t;)maB^s6sr^L32EbMV(uvW%or=|0@U6cUkE`_!<=LHLlRGJx@gQI=B(nn z-GEjDE}*8>3U$n(t^(b^C$qSTI;}6q&ypp?-2rGpqg7b}pyT zOARu2x>0HB{&D(d3sp`+}ka+Pca5glh|c=M)Ujn_$ly^X6&u z%Q4Y*LtB_>i6(YR!?{Os-(^J`(70lZ&Hp1I^?t@~SFL1!m0x6j|NM!-JTDk)%Q^R< z@e?23FD&9_W{Bgtr&CG&*Oer3Z(Bu2EbV3T9FeQ|-vo5pwzwQ%g&=zFS7b{n6T2ZQ z*!H(=z<{D9@c`KmHO&DbUIzpg`+r5207}4D=_P$ONIc5lsFgn)UB-oUE#{r+|uHc^hzv_df zV`n8&qry%jXQ33}Bjqcim~BY1?KZ}x453Oh7G@fA(}+m(f$)TY%7n=MeLi{jJ7LMB zt(mE*vFnep?YpkT_&WPV9*f>uSi#n#@STJmV&SLZnlLsWYI@y+Bs=gzcqche=&cBH2WL)dkR!a95*Ri)JH_4c*- zl4pPLl^as5_y&6RDE@@7342DNyF&GLJez#eMJjI}#pZN{Y8io{l*D+|f_Y&RQPia@ zNDL;SBERA|B#cjlNC@VU{2csOvB8$HzU$01Q?y)KEfos>W46VMh>P~oQC8k=26-Ku)@C|n^zDP!hO}Y z_tF}0@*Ds!JMt>?4y|l3?`v#5*oV-=vL7}zehMON^=s1%q+n=^^Z{^mTs7}*->#YL z)x-~SWE{e?YCarwU$=cS>VzmUh?Q&7?#Xrcce+jeZ|%0!l|H_=D_`77hBfd4Zqk&! zq-Dnt_?5*$Wsw8zGd@?woEtfYZ2|9L8b>TO6>oMh%`B7iBb)-aCefM~q|S2Cc0t9T zlu-ZXmM0wd$!gd-dTtik{bqyx32%f;`XUvbUWWJmpHfk8^PQIEsByJm+@+-aj4J#D z4#Br3pO6z1eIC>X^yKk|PeVwX_4B+IYJyJyc3B`4 zPrM#raacGIzVOexcVB;fcsxS=s1e&V;Xe$tw&KQ`YaCkHTKe*Al#velxV{3wxx}`7@isG zp6{+s)CG%HF#JBAQ_jM%zCX5X;J%-*%&jVI?6KpYyzGbq7qf;&hFprh?E5Wyo=bZ) z8YNycvMNGp1836!-?nihm6jI`^C`EeGryoNZO1AFTQhzFJOA%Q{X(sMYlzABt!&f{ zoDENSuoJQIg5Q#@BUsNJX2h>jkdx4<+ipUymWKFr;w+s>$laIIkfP6nU}r+?J9bZg zUIxz>RX$kX=C4m(zh-Eg$BsJ4OL&_J38PbHW&7JmR27%efAkqqdvf)Am)VF$+U3WR z-E#I9H6^)zHLKCs7|Zs<7Bo9VCS3@CDQ;{UTczoEprCKL3ZZW!ffmZFkcWU-V|_M2 zUA9~8tE9<5`59W-UgUmDFp11YlORl3mS3*2#ZHjv{*-1#uMV_oVTy{PY(}AqZv#wF zJVks)%N6LaHF$$<6p8S8Lqn+5&t}DmLKiC~lE{jPZ39oj{wR&fe*LX-z0m}9ZnZ{U z>3-5Bh{KKN^n5i!M79Aw5eY=`6fG#aW1_ZG;fw7JM69qk^*(rmO{|Z6rXy?l=K=#_ zE-zd*P|(sskasO(cZ5L~_{Mz&Y@@@Q)5_8l<6vB$@226O+pDvkFaK8b>%2 zfMtgJ@+cN@w>3)(_uR;s8$sGONbYvoEZ3-)zZk4!`tNzd<0lwt{RAgplo*f@Z)uO` zzd`ljSqKfHJOLxya4_}T`k5Ok1Mpo#MSqf~&ia3uIy{zyuaF}pV6 z)@$ZG5LYh8Gge*LqM_|GiT1*J*uKes=Oku_gMj&;FS`*sfpM+ygN&yOla-^WtIU#$ zuw(_-?DS?6DY7IbON7J)p^IM?N>7x^3)(7wR4PZJu(teex%l>zKAUSNL@~{czc}bR z)I{XzXqZBU3a;7UQ~PvAx8g-3q-9AEd}1JrlfS8NdPc+!=HJ6Bs( zCG!0;e0z-22(Uzw>hkEmC&xj?{0p|kc zM}MMXCF%RLLa#5jG`+}{pDL3M&|%3BlwOi?dq!)KUdv5__zR>u^o|QkYiqr(m3HxF z6J*DyN#Jpooc$ok=b7{UAVM@nwGsr6kozSddwulf5g1{B=0#2)zv!zLXQup^BZ4sv*sEsn)+MA?t zEL)}3*R?4(J~CpeSJPM!oZ~8;8s_=@6o`IA%{aEA9!GELRvOuncE`s7sH91 zmF=+T!Q6%){?lJn3`5}oW31(^Of|$r%`~gT{eimT7R~*Mg@x+tWM3KE>=Q>nkMG$U za7r>Yz2LEaA|PsMafvJ(Y>Xzha?=>#B!sYfVob4k5Orb$INFdL@U0(J8Hj&kgWUlO zPm+R07E+oq^4f4#HvEPANGWLL_!uF{nkHYE&BCH%l1FL_r(Nj@M)*VOD5S42Gk-yT z^23oAMvpA57H(fkDGMx86Z}rtQhR^L!T2iS!788E z+^${W1V}J_NwdwdxpXAW8}#6o1(Uu|vhJvubFvQIH1bDl4J4iDJ+181KuDuHwvM?` z%1@Tnq+7>p{O&p=@QT}4wT;HCb@i)&7int<0#bj8j0sfN3s6|a(l7Bj#7$hxX@~iP z1HF8RFH}irky&eCN4T94VyKqGywEGY{Gt0Xl-`|dOU&{Q;Ao;sL>C6N zXx1y^RZSaL-pG|JN;j9ADjo^XR}gce#seM4QB1?S`L*aB&QlbBIRegMnTkTCks7JU z<0(b+^Q?HN1&$M1l&I@>HMS;!&bb()a}hhJzsmB?I`poqTrSoO>m_JE5U4=?o;OV6 zBZjt;*%1P>%2{UL=;a4(aI>PRk|mr&F^=v6Fr&xMj8fRCXE5Z2qdre&;$_RNid5!S zm^XiLK25G6_j4dWkFqjtU7#s;b8h?BYFxV?OE?c~&ME`n`$ix_`mb^AWr+{M9{^^Rl;~KREplwy2q;&xe zUR0SjHzKVYzuqQ84w$NKVPGVHL_4I)Uw<$uL2-Ml#+5r2X{LLqc*p13{;w#E*Kwb*1D|v?e;(<>vl@VjnFB^^Y;;b3 z=R@(uRj6D}-h6CCOxAdqn~_SG=bN%^9(Ac?zfRkO5x2VM0+@_qk?MDXvf=@q_* z3IM@)er6-OXyE1Z4sU3{8$Y$>8NcnU-nkyWD&2ZaqX1JF_JYL8y}>@V8A5%lX#U3E zet5PJM`z79q9u5v(OE~{by|Jzlw2<0h`hKpOefhw=fgLTY9M8h+?37k@TWpzAb2Fc zQMf^aVf!yXlK?@5d-re}!fuAWu0t57ZKSSacwRGJ$0uC}ZgxCTw>cjRk*xCt%w&hh zoeiIgdz__&u~8s|_TZsGvJ7sjvBW<(C@}Y%#l_ID2&C`0;Eg2Z+pk;IK}4T@W6X5H z`s?ayU-iF+aNr5--T-^~K~p;}D(*GWOAYDV9JEw!w8ZYzS3;W6*_`#aZw&9J ziXhBKU3~zd$kKzCAP-=t&cFDeQR*_e*(excIUxKuD@;-twSlP6>wWQU)$|H3Cy+`= z-#7OW!ZlYzZxkdQpfqVDFU3V2B_-eJS)Fi{fLtRz!K{~7TR~XilNCu=Z;{GIf9KYz zf3h=Jo+1#_s>z$lc~e)l93h&RqW1VHYN;Yjwg#Qi0yzjN^M4cuL>Ew`_-_wRhi*!f zLK6vTpgo^Bz?8AsU%#n}^EGigkG3FXen3M;hm#C38P@Zs4{!QZPAU=m7ZV&xKI_HWNt90Ef zxClm)ZY?S|n**2cNYy-xBlLAVZ=~+!|7y`(fh+M$#4zl&T^gV8ZaG(RBD!`3?9xcK zp2+aD(T%QIgrLx5au&TjG1AazI;`8m{K7^!@m>uGCSR;Ut{&?t%3AsF{>0Cm(Kf)2 z?4?|J+!BUg*P~C{?mwPQ#)gDMmro20YVNsVx5oWQMkzQ? zsQ%Y>%7_wkJqnSMuZjB9lBM(o zWut|B7w48cn}4buUBbdPBW_J@H7g=szrKEpb|aE>!4rLm+sO9K%iI75y~2HkUo^iw zJ3se$8$|W>3}?JU@3h@M^HEFNmvCp|+$-0M?RQ8SMoZ@38%!tz8f8-Ptb@106heiJ z^Bx!`0=Im z1!NUhO=9ICM*+||b3a7w*Y#5*Q}K^ar+oMMtekF0JnO>hzHqZKH0&PZ^^M(j;vwf_ z@^|VMBpcw8;4E-9J{(u7sHSyZpQbS&N{VQ%ZCh{c1UA5;?R} z+52*X_tkDQ(s~#-6`z4|Y}3N#a&dgP4S_^tsV=oZr4A1 zaSoPN1czE(UIBrC_r$0HM?RyBGe#lTBL4~JW#A`P^#0wuK)C-2$B6TvMi@@%K@JAT_IB^T7Zfqc8?{wHcSVG_?{(wUG%zhCm=%qP~EqeqKI$9UivF zv+5IUOs|%@ypo6b+i=xsZ=^G1yeWe)z6IX-EC`F=(|_GCNbHbNp(CZ*lpSu5n`FRA zhnrc4w+Vh?r>her@Ba_jv0Omp#-H7avZb=j_A~B%V0&FNi#!S8cwn0(Gg-Gi_LMI{ zCg=g@m{W@u?GQ|yp^yENd;M=W2s-k7Gw2Z(tsD5fTGF{iZ%Ccgjy6O!AB4x z%&=6jB7^}pyftW2YQpOY1w@%wZy%}-l0qJlOSKZXnN2wo3|hujU+-U~blRF!^;Tan z0w;Srh0|Q~6*tXf!5-rCD)OYE(%S|^WTpa1KHtpHZ{!;KdcM^#g8Z^+LkbiBHt85m z;2xv#83lWB(kplfgqv@ZNDcHizwi4-8+WHA$U-HBNqsZ`hKcUI3zV3d1ngJP-AMRET*A{> zb2A>Fk|L|WYV;Eu4>{a6ESi2r3aZL7x}eRc?cf|~bP)6b7%BnsR{Sa>K^0obn?yiJ zCVvaZ&;d_6WEk${F1SN0{_`(#TuOOH1as&#&xN~+JDzX(D-WU_nLEI}T_VaeLA=bc zl_UZS$nu#C1yH}YV>N2^9^zye{rDrn(rS99>Fh&jtNY7PP15q%g=RGnxACdCov47= zwf^9zfJaL{y`R#~tvVL#*<`=`Qe zj_@Me$6sIK=LMFbBrJps7vdaf_HeX?eC+P^{AgSvbEn?n<}NDWiQGQG4^ZOc|GskK z$Ve2_n8gQ-KZ=s(f`_X!+vM5)4+QmOP()2Fe#IL2toZBf+)8gTVgDSTN1CkP<}!j7 z0SEl>PBg{MnPHkj4wj$mZ?m5x!1ePVEYI(L_sb0OZ*=M%yQb?L{UL(2_*CTVbRxBe z@{)COwTK1}!*CK0Vi4~AB;HF(MmQf|dsoy(eiQ>WTKcEQlnKOri5xYsqi61Y=I4kzAjn5~{IWrz_l))|Ls zvq7xgQs?Xx@`N?f7+3XKLyD~6DRJw*uj*j?yvT3}a;(j_?YOe%hUFcPGWRVBXzpMJ zM43g6DLFqS9tcTLSg=^&N-y0dXL816v&-nqC0iXdg7kV|PY+js`F8dm z2PuHw&k+8*&9SPQ6f!^5q0&AH(i+z3I7a?8O+S5`g)>}fG|BM&ZnmL;rk)|u{1!aZ zEZHpAMmK_v$GbrrWNP|^2^s*!0waLW=-h5PZa-4jWYUt(Hr@EA(m3Mc3^uDxwt-me^55FMA9^>hpp26MhqjLg#^Y7OIJ5%ZLdNx&uDgIIqc zZRZl|n6TyV)0^DDyVtw*jlWkDY&Gw4q;k!UwqSL6&sW$B*5Rc?&)dt29bDB*b6IBY z6SY6Unsf6AOQdEf=P1inu6(6hVZ0~v-<>;LAlcQ2u?wRWj5VczBT$Op#8IhppP-1t zfz5H59Aa~yh7EN;BXJsLyjkjqARS5iIhDVPj<=4AJb}m6M@n{xYj3qsR*Q8;hVxDyC4vLI;;?^eENOb5QARj#nII5l$MtBCI@5u~(ylFi$ zw6-+$$XQ}Ca>FWT>q{k)g{Ml(Yv=6aDfe?m|5|kbGtWS}fKWI+})F6`x@||0oJ^(g|+xi zqlPdy5;`g*i*C=Q(aGeDw!eQg&w>UUj^{o?PrlFI=34qAU2u@BgwrBiaM8zoDTFJ< zh7nWpv>dr?q;4ZA?}V}|7qWz4W?6#S&m>hs4IwvCBe@-C>+oohsQZ^JC*RfDRm!?y zS4$7oxcI|##ga*y5hV>J4a%HHl^t$pjY%caL%-FlRb<$A$E!ws?8hf0@(4HdgQ!@> zds{&g$ocr9W4I84TMa9-(&^_B*&R%^=@?Ntxi|Ejnh;z=!|uVj&3fiTngDPg=0=P2 zB)3#%HetD84ayj??qrxsd9nqrBem(8^_u_UY{1@R_vK-0H9N7lBX5K(^O2=0#TtUUGSz{ z%g>qU8#a$DyZ~EMa|8*@`GOhCW3%DN%xuS91T7~iXRr)SG`%=Lfu%U~Z_`1b=lSi?qpD4$vLh$?HU6t0MydaowUpb zQr{>_${AMesCEffZo`}K0^~x>RY_ZIG{(r39MP>@=aiM@C;K)jUcfQV8#?SDvq>9D zI{XeKM%$$XP5`7p3K0T}x;qn)VMo>2t}Ib(6zui;k}<<~KibAb%p)**e>ln<=qyWU zrRDy|UXFi9y~PdEFIAXejLA{K)6<)Q`?;Q5!KsuEw({!#Rl8*5_F{TP?u|5(Hijv( ztAA^I5+$A*+*e0V0R~fc{ET-RAS3suZ}TRk3r)xqj~g_hxB`qIK5z(5wxYboz%46G zq{izIz^5xW1Vq#%lhXaZL&)FJWp0VZNO%2&ADd?+J%K$fM#T_Eke1{dQsx48dUPUY zLS+DWMJeUSjYL453f@HpRGU6Dv)rw+-c6xB>(=p4U%}_p>z^I@Ow9`nkUG21?cMIh9}hN?R-d)*6%pr6d@mcb*ixr7 z)>Lo<&2F}~>WT1ybm^9UO{6P9;m+fU^06_$o9gBWL9_}EMZFD=rLJ~&e?fhDnJNBI zKM=-WR6g7HY5tHf=V~6~QIQ~rakNvcsamU8m28YE=z8+G7K=h%)l6k zmCpiDInKL6*e#)#Pt;ANmjf`8h-nEt&d}(SBZMI_A{BI#ck-_V7nx)K9_D9K-p@?Zh81#b@{wS?wCcJ%og)8RF*-0z+~)6f#T` zWqF7_CBcnn=S-1QykC*F0YTsKMVG49BuKQBH%WuDkEy%E?*x&tt%0m>>5^HCOq|ux zuvFB)JPR-W|%$24eEC^AtG3Gp4qdK%pjRijF5Sg3X}uaKEE z-L5p5aVR!NTM8T`4|2QA@hXiLXRcJveWZ%YeFfV%mO5q#($TJ`*U>hicS+CMj%Ip# zivoL;dd*araeJK9EA<(tihD50FHWbITBgF9E<33A+eMr2;cgI3Gg6<-2o|_g9|> zv5}i932( zYfTE9?4#nQhP@a|zm#9FST2 z!y+p3B;p>KkUzH!K;GkBW}bWssz)9b>Ulg^)EDca;jDl+q=243BddS$hY^fC6lbpM z(q_bo4V8~eVeA?0LFD6ZtKcmOH^75#q$Eo%a&qvE8Zsqg=$p}u^|>DSWUP5i{6)LAYF4E2DfGZuMJ zMwxxmkxQf}Q$V3&2w|$`9_SQS^2NVbTHh;atB>=A%!}k-f4*i$X8m}Ni^ppZXk5_oYF>Gq(& z0wy{LjJOu}69}~#UFPc;$7ka+=gl(FZCy4xEsk);+he>Nnl>hb5Ud-lj!CNicgd^2 z_Qgr_-&S7*#nLAI7r()P$`x~fy)+y=W~6aNh_humoZr7MWGSWJPLk}$#w_1n%(@? z3FnHf1lbxKJbQ9c&i<$(wd{tUTX6DAKs@cXIOBv~!9i{wD@*|kwfX~sjKASrNFGvN zrFc=!0Bb^OhR2f`%hrp2ibv#KUxl)Np1aixD9{^o=)*U%n%rTHX?FSWL^UGpHpY@7 z74U}KoIRwxI#>)Pn4($A`nw1%-D}`sGRZD8Z#lF$6 zOeA5)+W2qvA%m^|$WluUU-O+KtMqd;Pd58?qZj})MbxYGO<{z9U&t4D{S2G>e+J9K ztFZ?}ya>SVOLp9hpW)}G%kTrg*KXXXsLkGdgHb+R-ZXqdkdQC0_)`?6mqo8(EU#d( zy;u&aVPe6C=YgCRPV!mJ6R6kdY*`e+VGM~`VtC>{k27!9vAZT)x2~AiX5|m1Rq}_= z;A9LX^nd$l-9&2%4s~p5r6ad-siV`HtxKF}l&xGSYJmP=z!?Mlwmwef$EQq~7;#OE z)U5eS6dB~~1pkj#9(}T3j!((8Uf%!W49FfUAozijoxInUE7z`~U3Y^}xc3xp){#9D z<^Tz2xw}@o@fdUZ@hnW#dX6gDOj4R8dV}Dw`u!h@*K)-NrxT8%2`T}EvOImNF_N1S zy?uo6_ZS>Qga4Xme3j#aX+1qdFFE{NT0Wfusa$^;eL5xGE_66!5_N8!Z~jCAH2=${ z*goHjl|z|kbmIE{cl-PloSTtD+2=CDm~ZHRgXJ8~1(g4W=1c3=2eF#3tah7ho`zm4 z05P&?nyqq$nC?iJ-nK_iBo=u5l#|Ka3H7{UZ&O`~t-=triw=SE7ynzMAE{Mv-{7E_ zViZtA(0^wD{iCCcg@c{54Ro@U5p1QZq_XlEGtdBAQ9@nT?(zLO0#)q55G8_Ug~Xnu zR-^1~hp|cy&52iogG@o?-^AD8Jb^;@&Ea5jEicDlze6%>?u$-eE};bQ`T6@(bED0J zKYtdc?%9*<<$2LCBzVx9CA4YV|q-qg*-{yQ;|0=KIgI6~z0DKTtajw2Oms3L zn{C%{P`duw!(F@*P)lFy11|Z&x`E2<=$Ln38>UR~z6~za(3r;45kQK_^QTX%!s zNzoIFFH8|Y>YVrUL5#mgA-Jh>j7)n)5}iVM4%_@^GSwEIBA2g-;43* z*)i7u*xc8jo2z8&=8t7qo|B-rsGw)b8UXnu`RgE4u!(J8yIJi(5m3~aYsADcfZ!GG zzqa7p=sg`V_KjiqI*LA-=T;uiNRB;BZZ)~88 z`C%p8%hIev2rxS12@doqsrjgMg3{A&N8A?%Ui5vSHh7!iC^ltF&HqG~;=16=h0{ygy^@HxixUb1XYcR36SB}}o3nxu z_IpEmGh_CK<+sUh@2zbK9MqO!S5cao=8LSQg0Zv4?ju%ww^mvc0WU$q@!oo#2bv24 z+?c}14L2vlDn%Y0!t*z=$*a!`*|uAVu&NO!z_arim$=btpUPR5XGCG0U3YU`v>yMr z^zmTdcEa!APX zYF>^Q-TP11;{VgtMqC}7>B^2gN-3KYl33gS-p%f!X<_Hr?`rG8{jb9jmuQA9U;BeG zHj6Pk(UB5c6zwX%SNi*Py*)gk^?+729$bAN-EUd*RKN7{CM4`Q65a1qF*-QWACA&m zrT)B(M}yih{2r!Tiv5Y&O&=H_OtaHUz96Npo_k0eN|!*s2mLe!Zkuv>^E8Xa43ZwH zOI058AZznYGrRJ+`*GmZzMi6yliFmGMge6^j?|PN%ARns!Eg$ufpcLc#1Ns!1@1 zvC7N8M$mRgnixwEtX{ypBS^n`k@t2cCh#_6L6WtQb8E~*Vu+Rr)YsKZRX~hzLG*BE zaeU#LPo?RLm(Wzltk79Jd1Y$|6aWz1)wf1K1RtqS;qyQMy@H@B805vQ%wfSJB?m&&=^m4i* zYVH`zTTFbFtNFkAI`Khe4e^CdGZw;O0 zqkQe2|NG_y6D%h(|EZNf&77_!NU%0y={^E=*gKGQ=)LdKPM3zUlM@otH2X07Awv8o zY8Y7a1^&Yy%b%m{mNQ5sWNMTIq96Wtr>a(hL>Qi&F(ckgKkyvM0IH<_}v~Fv-GqDapig=3*ZMOx!%cYY)SKzo7ECyem z9Mj3C)tCYM?C9YIlt1?zTJXNOo&oVxu&uXKJs7i+j8p*Qvu2PAnY}b`KStdpi`trk ztAO}T8eOC%x)mu+4ps8sYZ=vYJp16SVWEEgQyFKSfWQ@O5id6GfL`|2<}hMXLPszS zgK>NWOoR zBRyKeUPevpqKKShD|MZ`R;~#PdNMB3LWjqFKNvH9k+;(`;-pyXM55?qaji#nl~K8m z_MifoM*W*X9CQiXAOH{cZcP0;Bn10E1)T@62Um>et2ci!J2$5-_HPy(AGif+BJpJ^ ziHWynC_%-NlrFY+(f7HyVvbDIM$5ci_i3?22ZkF>Y8RPBhgx-7k3M2>6m5R24C|~I z&RPh9xpMGzhN4bii*ryWaN^d(`0 zTOADlU)g`1p+SVMNLztd)c+;XjXox(VHQwqzu>FROvf0`s&|NEv26}(TAe;@=FpZq zaVs6mp>W0rM3Qg*6x5f_bPJd!6dQGmh?&v0rpBNfS$DW-{4L7#_~-eA@7<2BsZV=X zow){3aATmLZOQrs>uzDkXOD=IiX;Ue*B(^4RF%H zeaZ^*MWn4tBDj(wj114r(`)P96EHq4th-;tWiHhkp2rDlrklX}I@ib-nel0slFoQO zOeTc;Rh7sMIebO`1%u)=GlEj+7HU;c|Nj>2j)J-kpR)s3#+9AiB zd$hAk6;3pu9(GCR#)#>aCGPYq%r&i02$0L9=7AlIGYdlUO5%eH&M!ZWD&6^NBAj0Y9ZDcPg@r@8Y&-}e!aq0S(`}NuQ({;aigCPnq75U9cBH&Y7 ze)W0aD>muAepOKgm7uPg3Dz7G%)nEqTUm_&^^3(>+eEI;$ia`m>m0QHEkTt^=cx^JsBC68#H(3zc~Z$E9I)oSrF$3 zUClHXhMBZ|^1ikm3nL$Z@v|JRhud*IhOvx!6X<(YSX(9LG#yYuZeB{=7-MyPF;?_8 zy2i3iVKG2q!=JHN>~!#Bl{cwa6-yB@b<;8LSj}`f9pw7#x3yTD>C=>1S@H)~(n_K4 z2-yr{2?|1b#lS`qG@+823j;&UE5|2+EdU4nVw5=m>o_gj#K>>(*t=xI7{R)lJhLU{ z4IO6!x@1f$aDVIE@1a0lraN9!(j~_uGlks)!&davUFRNYHflp<|ENwAxsp~4Hun$Q z$w>@YzXp#VX~)ZP8`_b_sTg(Gt7?oXJW%^Pf0UW%YM+OGjKS}X`yO~{7WH6nX8S6Z ztl!5AnM2Lo*_}ZLvo%?iV;D2z>#qdpMx*xY2*GGlRzmHCom`VedAoR=(A1nO)Y>;5 zCK-~a;#g5yDgf7_phlkM@)C8s!xOu)N2UnQhif-v5kL$*t=X}L9EyBRq$V(sI{90> z=ghTPGswRVbTW@dS2H|)QYTY&I$ljbpNPTc_T|FEJkSW7MV!JM4I(ksRqQ8)V5>}v z2Sf^Z9_v;dKSp_orZm09jb8;C(vzFFJgoYuWRc|Tt_&3k({wPKiD|*m!+za$(l*!gNRo{xtmqjy1=kGzFkTH=Nc>EL@1Um0BiN1)wBO$i z6rG={bRcT|%A3s3xh!Bw?=L&_-X+6}L9i~xRj2}-)7fsoq0|;;PS%mcn%_#oV#kAp zGw^23c8_0~ ze}v9(p};6HM0+qF5^^>BBEI3d=2DW&O#|(;wg}?3?uO=w+{*)+^l_-gE zSw8GV=4_%U4*OU^hibDV38{Qb7P#Y8zh@BM9pEM_o2FuFc2LWrW2jRRB<+IE)G=Vx zuu?cp2-`hgqlsn|$nx@I%TC!`>bX^G00_oKboOGGXLgyLKXoo$^@L7v;GWqfUFw3< zekKMWo0LR;TaFY}Tt4!O$3MU@pqcw!0w0 zA}SnJ6Lb597|P5W8$OsEHTku2Kw9y4V=hx*K%iSn!#LW9W#~OiWf^dXEP$^2 zaok=UyGwy3GRp)bm6Gqr>8-4h@3=2`Eto2|JE6Sufh?%U6;ut1v1d@#EfcQP2chCt z+mB{Bk5~()7G>wM3KYf7Xh?LGbwg1uWLotmc_}Z_o;XOUDyfU?{9atAT$={v82^w9 z(MW$gINHt4xB3{bdbhRR%T}L?McK?!zkLK3(e>zKyei(yq%Nsijm~LV|9mll-XHavFcc$teX7v);H>=oN-+E_Q{c|! zp

    JV~-9AH}jxf6IF!PxrB9is{_9s@PYth^`pb%DkwghLdAyDREz(csf9)HcVRq z+2Vn~>{(S&_;bq_qA{v7XbU?yR7;~JrLfo;g$Lkm#ufO1P`QW_`zWW+4+7xzQZnO$ z5&GyJs4-VGb5MEDBc5=zxZh9xEVoY(|2yRv&!T7LAlIs@tw+4n?v1T8M>;hBv}2n) zcqi+>M*U@uY>4N3eDSAH2Rg@dsl!1py>kO39GMP#qOHipL~*cCac2_vH^6x@xmO|E zkWeyvl@P$2Iy*mCgVF+b{&|FY*5Ygi8237i)9YW#Fp& z?TJTQW+7U)xCE*`Nsx^yaiJ0KSW}}jc-ub)8Z8x(|K7G>`&l{Y&~W=q#^4Gf{}aJ%6kLXsmv6cr=Hi*uB`V26;dr4C$WrPnHO>g zg1@A%DvIWPDtXzll39kY6#%j;aN7grYJP9AlJgs3FnC?crv$wC7S4_Z?<_s0j;MmE z75yQGul2=bY%`l__1X3jxju2$Ws%hNv75ywfAqjgFO7wFsFDOW^)q2%VIF~WhwEW0 z45z^+r+}sJ{q+>X-w(}OiD(!*&cy4X&yM`!L0Fe+_RUfs@=J{AH#K~gArqT=#DcGE z!FwY(h&+&811rVCVoOuK)Z<-$EX zp`TzcUQC256@YWZ*GkE@P_et4D@qpM92fWA6c$MV=^qTu7&g)U?O~-fUR&xFqNiY1 zRd=|zUs_rmFZhKI|H}dcKhy%Okl(#y#QuMi81zsY56Y@757xBQqDNkd+XhLQhp2BB zBF^aJ__D676wLu|yYo6jNJNw^B+Ce;DYK!f$!dNs1*?D^97u^jKS++7S z5qE%zG#HY-SMUn^_yru=T6v`)CM%K<>_Z>tPe|js`c<|y7?qol&)C=>uLWkg5 zmzNcSAG_sL)E9or;i+O}tY^70@h7+=bG1;YDlX{<4zF_?{)K5B&?^tKZ6<$SD%@>F zY0cl2H7)%zKeDX%Eo7`ky^mzS)s;842cP{_;dzFuyd~Npb4u!bwkkhf8-^C2e3`q8>MuPhgiv0VxHxvrN9_`rJv&GX0fWz-L-Jg^B zrTsm>)-~j0F1sV=^V?UUi{L2cp%YwpvHwwLaSsCIrGI#({{QfbgDxLKsUC6w@m?y} zg?l=7aMX-RnMxvLn_4oSB|9t;)Qf2%m-GKo_07?N1l^ahJ+Wf8C>h5~=-o1BJzV@5HBTB-ACNpsHnGt6_ku37M z{vIEB^tR=--4SEg{jfF=gEogtGwi&A$mwk7E+SV$$ZuU}#F3Y7t}o{!w4LJh8v4PW%8HfUK@dta#l*z@w*9Xzz(i)r#WXi`r1D#oBPtNM7M?Hkq zhhS1)ea5(6VY45|)tCTr*@yc$^Zc!zQzsNXU?aRN6mh7zVu~i=qTrX^>de+f6HYfDsW@6PBlw0CsDBcOWUmt&st>Z zYNJEsRCP1#g0+Htb=wITvexBY@fOpAmR7?szQNR~nM)?sPWIj)0)jG-EF8U@nnBaQZy z)ImpVYQL>lBejMDjlxA$#G4%y+^_>N;}r@Zoe2|u-9-x@vvD^ZWnV>Gm=pZa7REAf zOnomhCxBaGZgT+4kiE%aS&lH2sI1mSCM<%)Cr*Sli;#!aXcUb&@Z|Hj{VPsJyClqD%>hy`Y7z(GASs8Mqas3!D zSQE83*%uctlD|p%4)v`arra4y>yP5m25V*_+n)Ry1v>z_Fz!TV6t+N?x?#iH$q=m= z8&X{uW%LVRO87dVl=$Y*>dabJVq{o|Kx`7(D2$5DVX&}XGbg|Ua(*5b=;5qzW9;|w>m{hIO(Tu-z(ey8H=EMluJNyK4BJmGpX~ZM2O61 zk*O7js{-MBqwq>Urf0igN+6soGGc!Y?SP6hiXuJzZ1V4WZqE*?h;PG84gvG~dds6~484!kPM zMP87IP?dhdc;%|cS&LxY*Ib6P3%p|9)E3IgRmhhwtUR3eRK6iZ_6fiGW}jnL4(I|t ze`2yLvmuY42lNwO6>I#Son3$R4NOoP*WUm1R4jl#agtSLE}fSu-Z>{+*?pQIn7`s3LAzF#1pSxCAo?clr9 z9PUj#REq28*ZkJnxs$aK%8^5?P<_Q!#Z?%JH0FKVF;&zH3F#J^fz|ahl$Ycs~kFij_XP;U<`FcaDYyXYPM~&jEe1Xj1n;wyRdD;lmnq&FEro=;+Z$=v-&fYM9eK*S_D&oTXFW#b0 zRY}Y7R#bLzTfg9i7{s?=P9~qjA?$-U2p5;0?gPPu`1JY|*?*8IPO!eX>oiX=O#F!A zl`S%e5Y(csR1f)I(iKMf-;5%_rPP7h&}5Fc(8byKUH1*d7?9%QC|4aADj3L8yuo6GOv#%HDgU3bN(UHw1+(99&Om%f!DY(RYSf4&Uny% zH}*&rEXc$W5+eyeEg|I|E-HnkIO0!$1sV7Z&NXxiCZJ@`kH4eEi5}q~!Vv5qQq{MI zi4^`GYoUN-7Q(jy^SKXL4$G4K+FQXR)B}ee=pS0RyK=YC8c2bGnMA~rrOh&jd3_AT zxVaq37w^-;OU3+C`Kko-Z%l_2FC^maa=Ae0Fm@PEtXEg@cX*oka1Lt&h@jES<6?o1Oi1C9>}7+U(Ve zQ$=8RlzcnfCd59CsJ=gG^A!2Bb_PY~K2sSau{)?Ge03G7US&qrgV!3NUi>UHWZ*lo zS;~0--vn{ot+7UWMV{a(X3rZ8Z06Ps3$-sd|CWE(Y#l`swvcDbMjuReGsoA`rmZ`^ z=AaArdbeU0EtwnOuzq@u5P1rlZjH#gNgh6HIhG(>dX%4m{_!&DNTQE)8= zXD-vcpcSi|DSm3aUMnrV;DQY?svz?9*#GT$NXb~Hem=24iy>7xj367(!#RjnrHtrP-Q`T2W*PEvAR-=j ztY2|#<|JvHNVnM-tNdoS_yRSo=yFqukTZmB$|>Vclj)o=YzC9!ph8)ZOH5X=%Aq|9gNgc}^KFVLht!Lyw54v5u&D zW%vT%z`H{Ax>Ry+bD&QjHQke_wEA;oj(&E!s4|OURButQKSc7Ar-PzIiFa8F@ezkaY2J9&PH+VI1!G+{JgsQ7%da*_Gr!exT*OgJld)b-?cd)xI+|v_C`h(Cg`N~oj0`SQPTma z{@vc8L^D-rBXwS#00jT#@=-n1H-C3hvg61r2jx#ok&cr#BV~9JdPaVihyrGq*lb>bm$H6rIoc}ifaSn6mTD9% z$FRJxbNozOo6y}!OUci1VBv-7{TYZ4GkOM@46Y9?8%mSH9?l&lU59)T#Fjg(h%6I} z?ib zZ(xb8Rwr+vv>@$h{WglT2lL`#V=-9tP^c)cjvnz(g|VL^h8^CPVv12dE(o}WQ@0OP z^2-&ssBXP^#Oh`X5@F+~$PCB6kK-T7sFUK|>$lNDSkvAy%{y2qgq-&v zv}^&gm`wiYztWgMS<{^qQKYNV=>CQaOeglAY~EZvr}n~tW=yg)_+fzqF%~+*V_$3h z2hDW`e$qR;QMg?(wKE>%H_6ASS@6bkOi-m- zg6B7AzD;gBS1%OD7|47a%3BykN{w}P!Wn-nQOfpKUpx8Mk{$IO62D!%U9$kr!e%T> zlqQih?3(U&5%r!KZFZPdbwZ0laAJCj!c&pEFVzrH&_&i5m68Y_*J+-Qjlnz}Q{3oAD)`d14H zKUGmbwC|beC9Mtp>SbL~NVrlctU3WBpHz(UeIa~_{u^_4OaHs_LQt>bUwcyD`_Bbh zC=x|1vSjL)JvVHLw|xKynEvq2m)7O-6qdmjht7pZ*z|o%NA17v$9H*(5D5(MXiNo1 z72Tv}QASqr$!mY58s_Q{hHa9MY+QZ`2zX-FT@Kd?`8pczcV^9IeOKDG4WKqiP7N|S z+O977=VQTk8k5dafK`vd(4?_3pBdB?YG9*Z=R@y|$S+d%1sJf-Ka++I&v9hH)h#}} zw-MjQWJ?ME<7PR(G<1#*Z-&M?%=yzhQw$Lki(R+Pq$X~Q!9BO=fP9FyCIS8zE3n04 z8ScD%XmJnIv=pMTgt6VSxBXOZucndRE@7^aU0wefJYueY(Cb%?%0rz)zWEnsNsKhQ z+&o6d^x=R;Pt7fUa_`JVb1HPHYbXg{Jvux|atQ^bV#_|>7QZNC~P^IKUThB6{kvz2pr2*Cyxj zy37Nri8za8J!@Iw9rbt~#^<9zOaM8LOi$kPBcAGqPq-DB^-93Qeup{9@9&=zV6KQN zL)ic5S%n1!F(7b>MQ973$~<0|9MY-G!?wk?j-cQhMQlM2n{&7JoTBGsP;=fC6CBJn zxlpk^%x=B16rfb-W9pYV#9IRHQL9VG4?Uh>pN>2}0-MST2AB2pQjf*rT+TLCX-+&m z9I{ic2ogXoh=HwdI#igr(JC>>NUP|M>SA?-ux<2&>Jyx>Iko!B<3vS}{g*dKqxYW7 z0i`&U#*v)jot+keO#G&wowD!VvD(j`Z9a*-_RALKn0b(KnZ37d#Db7royLhBW~*7o zRa`=1fo9C4dgq;;R)JpP++a9^{xd)8``^fPW9!a%MCDYJc;3yicPs8IiQM>DhUX*; zeIrxE#JRrr|D$@bKgOm4C9D+e!_hQKj3LC`Js)|Aijx=J!rlgnpKeF>b+QlKhI^4* zf%Of^RmkW|xU|p#Lad44Y5LvIUIR>VGH8G zz7ZEIREG%UOy4)C!$muX6StM4@Fsh&Goa}cj10RL(#>oGtr6h~7tZDDQ_J>h)VmYlKK>9ns8w4tdx6LdN5xJQ9t-ABtTf_ zf1dKVv!mhhQFSN=ggf(#$)FtN-okyT&o6Ms+*u72Uf$5?4)78EErTECzweDUbbU)) zc*tt+9J~Pt%!M352Y5b`Mwrjn^Orp+)L_U1ORHJ}OUsB78YPcIRh4p5jzoDB7B*fb z4v`bouQeCAW#z9b1?4(M3dcwNn2F2plwC^RVHl#h&b-8n#5^o+Ll20OlJ^gOYiK2< z;MQuR!t!>`i}CAOa4a+Rh5IL|@kh4EdEL*O=3oGx4asg?XCTcUOQnmHs^6nLu6WcI zSt9q7nl*?2TIikKNb?3JZBo$cW6)b#;ZKzi+(~D-%0Ec+QW=bZZm@w|prGiThO3dy zU#TQ;RYQ+xU~*@Zj;Rf~z~iL8Da`RT!Z)b3ILBhnIl@VX9K0PSj5owH#*FJXX3vZ= zg_Zyn^G&l!WR6wN9GWvt)sM?g2^CA8&F#&t2z3_MiluRqvNbV{Me6yZ&X-_ zd6#Xdh%+6tCmSNTdCBusVkRwJ_A~<^Nd6~MNOvS;YDixM43`|8e_bmc*UWi7TLA})`T_F ztk&Nd=dgFUss#Ol$LXTRzP9l1JOSvAws~^X%(`ct$?2Im?UNpXjBec_-+8YK%rq#P zT9=h8&gCtgx?=Oj$Yr2jI3`VVuZ`lH>*N+*K11CD&>>F)?(`yr~54vHJftY*z?EorK zm`euBK<$(!XO%6-1=m>qqp6F`S@Pe3;pK5URT$8!Dd|;`eOWdmn916Ut5;iXWQoXE z0qtwxlH=m_NONP3EY2eW{Qwr-X1V3;5tV;g7tlL4BRilT#Y&~o_!f;*hWxWmvA;Pg zRb^Y$#PipnVlLXQIzKCuQP9IER0Ai4jZp+STb1Xq0w(nVn<3j(<#!vuc?7eJEZC<- zPhM7ObhgabN2`pm($tu^MaBkRLzx&jdh;>BP|^$TyD1UHt9Qvr{ZcBs^l!JI4~d-Py$P5QOYO&8eQOFe)&G zZm+?jOJioGs7MkkQBCzJSFJV6DiCav#kmdxc@IJ9j5m#&1)dhJt`y8{T!uxpBZ>&z zD^V~%GEaODak5qGj|@cA7HSH{#jHW;Q0KRdTp@PJO#Q1gGI=((a1o%X*{knz&_`ym zkRLikN^fQ%Gy1|~6%h^vx>ToJ(#aJDxoD8qyOD{CPbSvR*bC>Nm+mkw>6mD0mlD0X zGepCcS_x7+6X7dH;%e`aIfPr-NXSqlu&?$Br1R}3lSF2 zWOXDtG;v#EVLSQ!>4323VX-|E#qb+x%IxzUBDI~N23x? zXUHfTTV#_f9T$-2FPG@t)rpc9u9!@h^!4=fL^kg9 zVv%&KY3!?bU*V4X)wNT%Chr;YK()=~lc%$auOB_|oH`H)Xot@1cmk{^qdt&1C55>k zYnIkdoiAYW41zrRBfqR?9r^cpWIEqfS;|R#bIs4$cqA zoq~$yl8h{IXTSdSdH?;`ky6i%+Oc?HvwH+IS`%_a!d#CqQob9OTNIuhUnOQsX;nl_ z;1w99qO9lAb|guQ9?p4*9TmIZ5{su!h?v-jpOuShq!{AuHUYtmZ%brpgHl$BKLK_L z6q5vZodM$)RE^NNO>{ZWPb%Ce111V4wIX}?DHA=uzTu0$1h8zy!SID~m5t)(ov$!6 zB^@fP#vpx3enbrbX=vzol zj^Bg7V$Qa53#3Lptz<6Dz=!f+FvUBVIBtYPN{(%t(EcveSuxi3DI>XQ*$HX~O{KLK5Dh{H2ir87E^!(ye{9H&2U4kFxtKHkw zZPOTIa*29KbXx-U4hj&iH<9Z@0wh8B6+>qQJn{>F0mGnrj|0_{nwN}Vw_C!rm0!dC z>iRlEf}<+z&?Z4o3?C>QrLBhXP!MV0L#CgF{>;ydIBd5A{bd-S+VFn zLqq4a*HD%65IqQ5BxNz~vOGU=JJv|NG{OcW%2PU~MEfy6(bl#^TfT7+az5M-I`i&l z#g!HUfN}j#adA-21x7jbP6F;`99c8Qt|`_@u@fbhZF+Wkmr;IdVHj+F=pDb4MY?fU znDe##Hn){D}<>vVhYL#)+6p9eAT3T$?;-~bZU%l7MpPNh_mPc(h@79 z;LPOXk>e3nmIxl9lno5cI5G@Q!pE&hQ`s{$Ae4JhTebeTsj*|!6%0;g=wH?B1-p{P z`In#EP12q6=xXU)LiD+mLidPrYGHaKbe5%|vzApq9(PI6I5XjlGf<_uyy59iw8W;k zdLZ|8R8RWDc`#)n2?~}@5)vvksY9UaLW`FM=2s|vyg>Remm=QGthdNL87$nR&TKB*LB%*B}|HkG64 zZ|O4=Yq?Zwl>_KgIG@<8i{Zw#P3q_CVT7Dt zoMwoI)BkpQj8u(m!>1dfOwin(50}VNiLA>A2OG&TBXcP=H(3I;!WdPFe?r_e{%>bc6(Zk?6~Ew&;#ZxBJ| zAd1(sAHqlo_*rP;nTk)kAORe3cF&tj>m&LsvB)`-y9#$4XU=Dd^+CzvoAz%9216#f0cS`;kERxrtjbl^7pmO;_y zYBGOL7R1ne7%F9M2~0a7Srciz=MeaMU~ zV%Y#m_KV$XReYHtsraWLrdJItLtRiRo98T3J|x~(a>~)#>JHDJ z|4j!VO^qWQfCm9-$N29SpHUqvz62%#%98;2FNIF*?c9hZ7GAu$q>=0 zX_igPSK8Et(fmD)V=CvbtA-V(wS?z6WV|RX2`g=w=4D)+H|F_N(^ON!jHf72<2nCJ z^$hEygTAq7URR{Vq$)BsmFKTZ+i1i(D@SJuTGBN3W8{JpJ^J zkF=gBTz|P;Xxo1NIypGzJq8GK^#4tl)S%8$PP6E8c|GkkQ)vZ1OiB%mH#@hO1Z%Hp zv%2~Mlar^}7TRN-SscvQ*xVv+i1g8CwybQHCi3k;o$K@bmB%^-U8dILX)7b~#iPu@ z&D&W7YY2M3v`s(lNm2#^dCRFd;UYMUw1Rh2mto8laH1m`n0u;>okp5XmbsShOhQwo z@EYOehg-KNab)Rieib?m&NXls+&31)MB&H-zj_WmJsGjc1sCSOz0!2Cm1vV?y@kkQ z<1k6O$hvTQnGD*esux*aD3lEm$mUi0td0NiOtz3?7}h;Bt*vIC{tDBr@D)9rjhP^< zY*uKu^BiuSO%)&FL>C?Ng!HYZHLy`R>`rgq+lJhdXfo|df zmkzpQf{6o9%^|7Yb5v{Tu& zsP*Y~<#jK$S_}uEisRC;=y{zbq`4Owc@JyvB->nPzb#&vcMKi5n66PVV{Aub>*>q8 z=@u7jYA4Ziw2{fSED#t4QLD7Rt`au^y(Ggp3y(UcwIKtI(OMi@GHxs!bj$v~j(FZK zbdcP^gExtXQqQ8^Q#rHy1&W8q!@^aL>g1v2R45T(KErWB)1rB@rU`#n&-?g2Ti~xXCrexrLgajgzNy=N9|A6K=RZ zc3yk>w5sz1zsg~tO~-Ie?%Aplh#)l3`s632mi#CCl^75%i6IY;dzpuxu+2fliEjQn z&=~U+@fV4>{Fp=kk0oQIvBdqS#yY`Z+>Z|T&K{d;v3}=JqzKx05XU3M&@D5!uPTGydasyeZ5=1~IX-?HlM@AGB9|Mzb{{Dt@bUU8{KUPU@EX zv0fpQNvG~nD2WiOe{Vn=hE^rQD(5m+!$rs%s{w9;yg9oxRhqi0)rwsd245)igLmv* zJb@Xlet$+)oS1Ra#qTB@U|lix{Y4lGW-$5*4xOLY{9v9&RK<|K!fTd0wCKYZ)h&2f zEMcTCd+bj&YVmc#>&|?F!3?br3ChoMPTA{RH@NF(jmGMB2fMyW(<0jUT=8QFYD7-% zS0ydgp%;?W=>{V9>BOf=p$q5U511~Q0-|C!85)W0ov7eb35%XV;3mdUI@f5|x5C)R z$t?xLFZOv}A(ZjjSbF+8&%@RChpRvo>)sy>-IO8A@>i1A+8bZd^5J#(lgNH&A=V4V z*HUa0{zT{u-_FF$978RziwA@@*XkV{<-CE1N=Z!_!7;wq*xt3t((m+^$SZKaPim3K zO|Gq*w5r&7iqiQ!03SY{@*LKDkzhkHe*TzQaYAkz&jNxf^&A_-40(aGs53&}$dlKz zsel3=FvHqdeIf!UYwL&Mg3w_H?utbE_(PL9B|VAyaOo8k4qb>EvNYHrVmj^ocJQTf zL%4vl{qgmJf#@uWL@)WiB>Lm>?ivwB%uO|)i~;#--nFx4Kr6{TruZU0N_t_zqkg`? zwPFK|WiC4sI%o1H%$!1ANyq6_0OSPQJybh^vFriV=`S;kSsYkExZwB{68$dTODWJQ z@N57kBhwN(y~OHW_M}rX2W13cl@*i_tjW`TMfa~Y;I}1hzApXgWqag@(*@(|EMOg- z^qMk(s~dL#ps>>`oWZD=i1XI3(;gs7q#^Uj&L`gVu#4zn$i!BIHMoOZG!YoPO^=Gu z5`X-(KoSsHL77c<7^Y*IM2bI!dzg5j>;I@2-EeB$LgW|;csQTM&Z|R)q>yEjk@Sw% z6FQk*&zHWzcXalUJSoa&pgH24n`wKkg=2^ta$b1`(BBpBT2Ah9yQF&Kh+3jTaSE|=vChGz2_R^{$C;D`Ua(_=|OO11uLm;+3k%kO19EA`U065i;fRBoH z{Hq$cgHKRFPf0#%L?$*KeS@FDD;_TfJ#dwP7zzO5F>xntH(ONK{4)#jYUDQr6N(N< zp+fAS9l9)^c4Ss8628Zq5AzMq4zc(In_yJSXAT57Dtl}@= zvZoD7iq0cx7*#I{{r9m{%~g6@Hdr|*njKBb_5}mobCv=&X^`D9?;x6cHwRcwnlO^h zl;MiKr#LaoB*PELm8+8%btnC)b^E12!^ zMmVA!z>59e7n+^!P{PA?f9M^2FjKVw1%x~<`RY5FcXJE)AE}MTopGFDkyEjGiE|C6 z(ad%<3?v*?p;LJGopSEY18HPu2*}U!Nm|rfewc6(&y(&}B#j85d-5PeQ{}zg>>Rvl zDQ3H4E%q_P&kjuAQ>!0bqgAj){vzHpnn+h(AjQ6GO9v**l0|aCsCyXVE@uh?DU;Em zE*+7EU9tDH````D`|rM6WUlzBf1e{ht8$62#ilA6Dcw)qAzSRwu{czZJAcKv8w(Q6 zx)b$aq*=E=b5(UH-5*u)3iFlD;XQyklZrwHy}+=h6=aKtTriguHP@Inf+H@q32_LL z2tX|+X}4dMYB;*EW9~^5bydv)_!<%q#%Ocyh=1>FwL{rtZ?#2Scp{Q55%Fd-LgLU$ zM2u#|F{%vi%+O2^~uK3)?$6>9cc7_}F zWU72eFrzZ~x3ZIBH;~EMtD%51o*bnW;&QuzwWd$ds=O>Ev807cu%>Ac^ZK&7bCN;Ftk#eeQL4pG0p!W{Ri@tGw>nhIo`rC zi!Z6?70nYrNf92V{Y_i(a4DG=5>RktP=?%GcHEx?aKN$@{w{uj#Cqev$bXefo?yC6KI%Rol z%~$974WCymg;BBhd9Mv}_MeNro_8IB4!evgo*je4h?B-CAkEW-Wr-Q_V9~ef(znU& z{f-OHnj>@lZH(EcUb2TpOkc70@1BPiY0B#++1EPY5|UU?&^Vpw|C`k4ZWiB-3oAQM zgmG%M`2qDw5BMY|tG++34My2fE|^kvMSp(d+~P(Vk*d+RW1833i_bX^RYbg9tDtX` zox?y^YYfs-#fX|y7i(FN7js)66jN!`p9^r7oildEU#6J1(415H3h>W*p(p9@dI|c7 z&c*Aqzksg}o`D@i+o@WIw&jjvL!(`)JglV5zwMn)praO2M05H&CDeps0Wq8(8AkuE zPm|8MB6f0kOzg(gw}k>rzhQyo#<#sVdht~Wdk`y`=%0!jbd1&>Kxed8lS{Xq?Zw>* zU5;dM1tt``JH+A9@>H%-9f=EnW)UkRJe0+e^iqm0C5Z5?iEn#lbp}Xso ztleC}hl&*yPFcoCZ@sgvvjBA_Ew6msFml$cfLQY_(=h03WS_z+Leeh$M3#-?f9YT^Q($z z+pgaEv$rIa*9wST`WHASQio=9IaVS7l<87%;83~X*`{BX#@>>p=k`@FYo ze!K5_h8hOc`m0mK0p}LxsguM}w=9vw6Ku8y@RNrXSRPh&S`t4UQY=e-B8~3YCt1Fc zU$CtRW%hbcy{6K{>v0F*X<`rXVM3a{!muAeG$zBf`a(^l${EA9w3>J{aPwJT?mKVN2ba+v)Mp*~gQ_+Ws6= zy@D?85!U@VY0z9T=E9LMbe$?7_KIg)-R$tD)9NqIt84fb{B;f7C)n+B8)Cvo*F0t! zva6LeeC}AK4gL#d#N_HvvD& z0;mdU3@7%d5>h(xX-NBmJAOChtb(pX-qUtRLF5f$ z`X?Kpu?ENMc88>O&ym_$Jc7LZ> z#73|xJ|aa@l}PawS4Mpt9n)38w#q^P1w2N|rYKdcG;nb!_nHMZA_09L!j)pBK~e+j?tb-_A`wF8 zIyh>&%v=|n?+~h}%i1#^9UqZ?E9W!qJ0d0EHmioSt@%v7FzF`eM$X==#oaPESHBm@ zYzTXVo*y|C0~l_)|NF|F(If~YWJVkQAEMf5IbH{}#>PZpbXZU;+b^P8LWmlmDJ%Zu)4CajvRL!g_Faph`g0hpA2)D0|h zYy0h5+@4T81(s0D=crojdj|dYa{Y=<2zKp@xl&{sHO;#|!uTHtTey25f1U z#=Nyz{rJy#@SPk3_U|aALcg%vEjwIqSO$LZI59^;Mu~Swb53L+>oxWiN7J{;P*(2b@ao*aU~}-_j10 z@fQiaWnb}fRrHhNKrxKmi{aC#34BRP(a#0K>-J8D+v_2!~(V-6J%M@L{s?fU5ChwFfqn)2$siOUKw z?SmIRlbE8ot5P^z0J&G+rQ5}H=JE{FNsg`^jab7g-c}o`s{JS{-#}CRdW@hO`HfEp z1eR0DsN! zt5xmsYt{Uu;ZM`CgW)VYk=!$}N;w+Ct$Wf!*Z-7}@pA62F^1e$Ojz9O5H;TyT&rV( zr#IBM8te~-2t2;kv2xm&z%tt3pyt|s#vg2EOx1XkfsB*RM;D>ab$W-D6#Jdf zJ3{yD;P4=pFNk2GL$g~+5x;f9m*U2!ovWMK^U5`mAgBRhGpu)e`?#4vsE1aofu)iT zDm;aQIK6pNd8MMt@}h|t9c$)FT7PLDvu3e)y`otVe1SU4U=o@d!gn(DB9kC>Ac1wJ z?`{Hq$Q!rGb9h&VL#z+BKsLciCttdLJe9EmZF)J)c1MdVCrxg~EM80_b3k{ur=jVjrVhDK1GTjd3&t#ORvC0Q_&m|n>&TF1C_>k^8&ylR7oz#rG?mE%V| zepj0BlD|o?p8~LK_to`GINhGyW{{jZ{xqaO*SPvH)BYy1eH22DL_Kkn28N!0z3fzj z_+xZ3{ph_Tgkd)D$OjREak$O{F~mODA_D`5VsoobVnpxI zV0F_79%JB!?@jPs=cY73FhGuT!?fpVX1W=Wm zK5}i7(Pfh4o|Z{Ur=Y>bM1BDo2OdXBB(4Y#Z!61A8C6;7`6v-(P{ou1mAETEV?Nt< zMY&?ucJcJ$NyK0Zf@b;U#3ad?#dp`>zmNn=H1&-H`Y+)ai-TfyZJX@O&nRB*7j$ zDQF!q#a7VHL3z#Hc?Ca!MRbgL`daF zW#;L$yiQP|5VvgvRLluk3>-1cS+7MQ1)DC&DpYyS9j;!Rt$HdXK1}tG3G_)ZwXvGH zG;PB^f@CFrbEK4>3gTVj73~Tny+~k_pEHt|^eLw{?6NbG&`Ng9diB9XsMr(ztNC!{FhW8Hi!)TI`(Q|F*b z-z;#*c1T~kN67omP(l7)ZuTlxaC_XI(K8$VPfAzj?R**AMb0*p@$^PsN!LB@RYQ4U zA^xYY9sX4+;7gY%$i%ddfvneGfzbE4ZTJT5Vk3&1`?ULTy28&D#A&{dr5ZlZH&NTz zdfZr%Rw*Ukmgu@$C5$}QLOyb|PMA5syQns?iN@F|VFEvFPK321mTW^uv?GGNH6rnM zR9a2vB`}Y++T3Wumy$6`W)_c0PS*L;;0J^(T7<)`s{}lZVp`e)fM^?{$ zLbNw>N&6aw5Hlf_M)h8=)x0$*)V-w-Pw5Kh+EY{^$?#{v)_Y{9p5K{DjLnJ(ZUcyk*y(6D8wHB8=>Y)fb_Pw0v)Xybk`Sw@hNEaHP$-n`DtYP ziJyiauEXtuMpWyQjg$gdJR?e+=8w+=5GO-OT8pRaVFP1k^vI|I&agGjN-O*bJEK!M z`kt^POhUexh+PA&@And|vk-*MirW?>qB(f%y{ux z*d44UXxQOs+C`e-x4KSWhPg-!gO~kavIL8X3?!Ac2ih-dkK~Ua2qlcs1b-AIWg*8u z0QvL~51vS$LnmJSOnV4JUCUzg&4;bSsR5r_=FD@y|)Y2R_--e zMWJ;~*r=vJssF5_*n?wF0DO_>Mja=g+HvT=Yd^uBU|aw zRixHUQJX0Pgt-nFV+8&|;-n>!jNUj!8Y_YzH*%M!-_uWt6& z|Ec+lAD``i^do;u_?<(RpzsYZVJ8~}|NjUFgXltofbjhf!v&208g^#0h-x?`z8cInq!9kfVwJ|HQ;VK>p_-fn@(3q?e51Keq(=U-7C0#as-q z8Or}Ps07>O2@AAXz_%3bTOh{tKm#uRe}Sqr=w6-Wz$FCdfF3qNabEaj`-OfipxaL- zPh2R*l&%ZbcV?lv4C3+t2DAVSFaRo20^W_n4|0t(_*`?KmmUHG2sNZ*CRZlCFIyZbJqLdBCj)~%if)g|4NJr(8!R!E0iBbm$;`m;1n2@(8*E%B zH!g{hK|WK?1jUfM9zX?hlV#l%!6^p$$P+~rg}OdKg|d^Ed4WTY1$1J@WWHr$Os_(L z;-Zu1FJqhR4LrCUl)C~E7gA!^wtA6YIh10In9rX@LGSjnTPtLp+gPGp6u z3}{?J1!yT~?FwqT;O_-1%37f#4ek&DL){N}MX3RbNfRb-T;U^wXhx#De&QssA$lu~ mWkA_K7-+yz9tH*t6hj_Qg(_m7JaeTomk=)l!_+yTk^le-`GmOu delta 34176 zcmX7vV`H6d(}mmEwr$(CZQE$vU^m*aZQE(=WXEZ2+l}qF_w)XN>&rEBu9;)4>7EB0 zo(HR^Mh47P)@z^^pH!4#b(O8!;$>N+S+v5K5f8RrQ+Qv0_oH#e!pI2>yt4ij>fI9l zW&-hsVAQg%dpn3NRy$kb_vbM2sr`>bZ48b35m{D=OqX;p8A${^Dp|W&J5mXvUl#_I zN!~GCBUzj~C%K?<7+UZ_q|L)EGG#_*2Zzko-&Kck)Qd2%CpS3{P1co1?$|Sj1?E;PO z7alI9$X(MDly9AIEZ-vDLhpAKd1x4U#w$OvBtaA{fW9)iD#|AkMrsSaNz(69;h1iM1#_ z?u?O_aKa>vk=j;AR&*V-p3SY`CI}Uo%eRO(Dr-Te<99WQhi>y&l%UiS%W2m(d#woD zW?alFl75!1NiUzVqgqY98fSQNjhX3uZ&orB08Y*DFD;sjIddWoJF;S_@{Lx#SQk+9 zvSQ-620z0D7cy8-u_7u?PqYt?R0m2k%PWj%V(L|MCO(@3%l&pzEy7ijNv(VXU9byn z@6=4zL|qk*7!@QWd9imT9i%y}1#6+%w=s%WmsHbw@{UVc^?nL*GsnACaLnTbr9A>B zK)H-$tB`>jt9LSwaY+4!F1q(YO!E7@?SX3X-Ug4r($QrmJnM8m#;#LN`kE>?<{vbCZbhKOrMpux zTU=02hy${;n&ikcP8PqufhT9nJU>s;dyl;&~|Cs+o{9pCu{cRF+0{iyuH~6=tIZXVd zR~pJBC3Hf-g%Y|bhTuGyd~3-sm}kaX5=T?p$V?48h4{h2;_u{b}8s~Jar{39PnL7DsXpxcX#3zx@f9K zkkrw9s2*>)&=fLY{=xeIYVICff2Id5cc*~l7ztSsU@xuXYdV1(lLGZ5)?mXyIDf1- zA7j3P{C5s?$Y-kg60&XML*y93zrir8CNq*EMx)Kw)XA(N({9t-XAdX;rjxk`OF%4-0x?ne@LlBQMJe5+$Ir{Oj`@#qe+_-z!g5qQ2SxKQy1ex_x^Huj%u+S@EfEPP-70KeL@7@PBfadCUBt%`huTknOCj{ z;v?wZ2&wsL@-iBa(iFd)7duJTY8z-q5^HR-R9d*ex2m^A-~uCvz9B-1C$2xXL#>ow z!O<5&jhbM&@m=l_aW3F>vjJyy27gY}!9PSU3kITbrbs#Gm0gD?~Tub8ZFFK$X?pdv-%EeopaGB#$rDQHELW!8bVt`%?&>0 zrZUQ0!yP(uzVK?jWJ8^n915hO$v1SLV_&$-2y(iDIg}GDFRo!JzQF#gJoWu^UW0#? z*OC-SPMEY!LYY*OO95!sv{#-t!3Z!CfomqgzFJld>~CTFKGcr^sUai5s-y^vI5K={ z)cmQthQuKS07e8nLfaIYQ5f}PJQqcmokx?%yzFH*`%k}RyXCt1Chfv5KAeMWbq^2MNft;@`hMyhWg50(!jdAn;Jyx4Yt)^^DVCSu?xRu^$*&&=O6#JVShU_N3?D)|$5pyP8A!f)`| z>t0k&S66T*es5(_cs>0F=twYJUrQMqYa2HQvy)d+XW&rai?m;8nW9tL9Ivp9qi2-` zOQM<}D*g`28wJ54H~1U!+)vQh)(cpuf^&8uteU$G{9BUhOL| zBX{5E1**;hlc0ZAi(r@)IK{Y*ro_UL8Ztf8n{Xnwn=s=qH;fxkK+uL zY)0pvf6-iHfX+{F8&6LzG;&d%^5g`_&GEEx0GU=cJM*}RecV-AqHSK@{TMir1jaFf&R{@?|ieOUnmb?lQxCN!GnAqcii9$ z{a!Y{Vfz)xD!m2VfPH=`bk5m6dG{LfgtA4ITT?Sckn<92rt@pG+sk>3UhTQx9ywF3 z=$|RgTN<=6-B4+UbYWxfQUOe8cmEDY3QL$;mOw&X2;q9x9qNz3J97)3^jb zdlzkDYLKm^5?3IV>t3fdWwNpq3qY;hsj=pk9;P!wVmjP|6Dw^ez7_&DH9X33$T=Q{>Nl zv*a*QMM1-2XQ)O=3n@X+RO~S`N13QM81^ZzljPJIFBh%x<~No?@z_&LAl)ap!AflS zb{yFXU(Uw(dw%NR_l7%eN2VVX;^Ln{I1G+yPQr1AY+0MapBnJ3k1>Zdrw^3aUig*! z?xQe8C0LW;EDY(qe_P!Z#Q^jP3u$Z3hQpy^w7?jI;~XTz0ju$DQNc4LUyX}+S5zh> zGkB%~XU+L?3pw&j!i|x6C+RyP+_XYNm9`rtHpqxvoCdV_MXg847oHhYJqO+{t!xxdbsw4Ugn($Cwkm^+36&goy$vkaFs zrH6F29eMPXyoBha7X^b+N*a!>VZ<&Gf3eeE+Bgz7PB-6X7 z_%2M~{sTwC^iQVjH9#fVa3IO6E4b*S%M;#WhHa^L+=DP%arD_`eW5G0<9Tk=Ci?P@ z6tJXhej{ZWF=idj32x7dp{zmQY;;D2*11&-(~wifGXLmD6C-XR=K3c>S^_+x!3OuB z%D&!EOk;V4Sq6eQcE{UEDsPMtED*;qgcJU^UwLwjE-Ww54d73fQ`9Sv%^H>juEKmxN+*aD=0Q+ZFH1_J(*$~9&JyUJ6!>(Nj zi3Z6zWC%Yz0ZjX>thi~rH+lqv<9nkI3?Ghn7@!u3Ef){G(0Pvwnxc&(YeC=Kg2-7z zr>a^@b_QClXs?Obplq@Lq-l5>W);Y^JbCYk^n8G`8PzCH^rnY5Zk-AN6|7Pn=oF(H zxE#8LkI;;}K7I^UK55Z)c=zn7OX_XVgFlEGSO}~H^y|wd7piw*b1$kA!0*X*DQ~O` z*vFvc5Jy7(fFMRq>XA8Tq`E>EF35{?(_;yAdbO8rrmrlb&LceV%;U3haVV}Koh9C| zTZnR0a(*yN^Hp9u*h+eAdn)d}vPCo3k?GCz1w>OOeme(Mbo*A7)*nEmmUt?eN_vA; z=~2}K_}BtDXJM-y5fn^v>QQo+%*FdZQFNz^j&rYhmZHgDA-TH47#Wjn_@iH4?6R{J z%+C8LYIy>{3~A@|y4kN8YZZp72F8F@dOZWp>N0-DyVb4UQd_t^`P)zsCoygL_>>x| z2Hyu7;n(4G&?wCB4YVUIVg0K!CALjRsb}&4aLS|}0t`C}orYqhFe7N~h9XQ_bIW*f zGlDCIE`&wwyFX1U>}g#P0xRRn2q9%FPRfm{-M7;}6cS(V6;kn@6!$y06lO>8AE_!O z{|W{HEAbI0eD$z9tQvWth7y>qpTKQ0$EDsJkQxAaV2+gE28Al8W%t`Pbh zPl#%_S@a^6Y;lH6BfUfZNRKwS#x_keQ`;Rjg@qj zZRwQXZd-rWngbYC}r6X)VCJ-=D54A+81%(L*8?+&r7(wOxDSNn!t(U}!;5|sjq zc5yF5$V!;%C#T+T3*AD+A({T)#p$H_<$nDd#M)KOLbd*KoW~9E19BBd-UwBX1<0h9 z8lNI&7Z_r4bx;`%5&;ky+y7PD9F^;Qk{`J@z!jJKyJ|s@lY^y!r9p^75D)_TJ6S*T zLA7AA*m}Y|5~)-`cyB+lUE9CS_`iB;MM&0fX**f;$n($fQ1_Zo=u>|n~r$HvkOUK(gv_L&@DE0b4#ya{HN)8bNQMl9hCva zi~j0v&plRsp?_zR zA}uI4n;^_Ko5`N-HCw_1BMLd#OAmmIY#ol4M^UjLL-UAat+xA+zxrFqKc@V5Zqan_ z+LoVX-Ub2mT7Dk_ z<+_3?XWBEM84@J_F}FDe-hl@}x@v-s1AR{_YD!_fMgagH6s9uyi6pW3gdhauG>+H? zi<5^{dp*5-9v`|m*ceT&`Hqv77oBQ+Da!=?dDO&9jo;=JkzrQKx^o$RqAgzL{ zjK@n)JW~lzxB>(o(21ibI}i|r3e;17zTjdEl5c`Cn-KAlR7EPp84M@!8~CywES-`mxKJ@Dsf6B18_!XMIq$Q3rTDeIgJ3X zB1)voa#V{iY^ju>*Cdg&UCbx?d3UMArPRHZauE}c@Fdk;z85OcA&Th>ZN%}=VU%3b9={Q(@M4QaeuGE(BbZ{U z?WPDG+sjJSz1OYFpdImKYHUa@ELn%n&PR9&I7B$<-c3e|{tPH*u@hs)Ci>Z@5$M?lP(#d#QIz}~()P7mt`<2PT4oHH}R&#dIx4uq943D8gVbaa2&FygrSk3*whGr~Jn zR4QnS@83UZ_BUGw;?@T zo5jA#potERcBv+dd8V$xTh)COur`TQ^^Yb&cdBcesjHlA3O8SBeKrVj!-D3+_p6%P zP@e{|^-G-C(}g+=bAuAy8)wcS{$XB?I=|r=&=TvbqeyXiuG43RR>R72Ry7d6RS;n^ zO5J-QIc@)sz_l6%Lg5zA8cgNK^GK_b-Z+M{RLYk5=O|6c%!1u6YMm3jJg{TfS*L%2 zA<*7$@wgJ(M*gyTzz8+7{iRP_e~(CCbGB}FN-#`&1ntct@`5gB-u6oUp3#QDxyF8v zOjxr}pS{5RpK1l7+l(bC)0>M;%7L?@6t}S&a zx0gP8^sXi(g2_g8+8-1~hKO;9Nn%_S%9djd*;nCLadHpVx(S0tixw2{Q}vOPCWvZg zjYc6LQ~nIZ*b0m_uN~l{&2df2*ZmBU8dv`#o+^5p>D5l%9@(Y-g%`|$%nQ|SSRm0c zLZV)45DS8d#v(z6gj&6|ay@MP23leodS8-GWIMH8_YCScX#Xr)mbuvXqSHo*)cY9g z#Ea+NvHIA)@`L+)T|f$Etx;-vrE3;Gk^O@IN@1{lpg&XzU5Eh3!w;6l=Q$k|%7nj^ z|HGu}c59-Ilzu^w<93il$cRf@C(4Cr2S!!E&7#)GgUH@py?O;Vl&joXrep=2A|3Vn zH+e$Ctmdy3B^fh%12D$nQk^j|v=>_3JAdKPt2YVusbNW&CL?M*?`K1mK*!&-9Ecp~>V1w{EK(429OT>DJAV21fG z=XP=%m+0vV4LdIi#(~XpaUY$~fQ=xA#5?V%xGRr_|5WWV=uoG_Z&{fae)`2~u{6-p zG>E>8j({w7njU-5Lai|2HhDPntQ(X@yB z9l?NGoKB5N98fWrkdN3g8ox7Vic|gfTF~jIfXkm|9Yuu-p>v3d{5&hC+ZD%mh|_=* zD5v*u(SuLxzX~owH!mJQi%Z=ALvdjyt9U6baVY<88B>{HApAJ~>`buHVGQd%KUu(d z5#{NEKk6Vy08_8*E(?hqZe2L?P2$>!0~26N(rVzB9KbF&JQOIaU{SumX!TsYzR%wB z<5EgJXDJ=1L_SNCNZcBWBNeN+Y`)B%R(wEA?}Wi@mp(jcw9&^1EMSM58?68gwnXF` zzT0_7>)ep%6hid-*DZ42eU)tFcFz7@bo=<~CrLXpNDM}tv*-B(ZF`(9^RiM9W4xC%@ZHv=>w(&~$Wta%)Z;d!{J;e@z zX1Gkw^XrHOfYHR#hAU=G`v43E$Iq}*gwqm@-mPac0HOZ0 zVtfu7>CQYS_F@n6n#CGcC5R%4{+P4m7uVlg3axX}B(_kf((>W?EhIO&rQ{iUO$16X zv{Abj3ZApUrcar7Ck}B1%RvnR%uocMlKsRxV9Qqe^Y_5C$xQW@9QdCcF%W#!zj;!xWc+0#VQ*}u&rJ7)zc+{vpw+nV?{tdd&Xs`NV zKUp|dV98WbWl*_MoyzM0xv8tTNJChwifP!9WM^GD|Mkc75$F;j$K%Y8K@7?uJjq-w zz*|>EH5jH&oTKlIzueAN2926Uo1OryC|CmkyoQZABt#FtHz)QmQvSX35o`f z<^*5XXxexj+Q-a#2h4(?_*|!5Pjph@?Na8Z>K%AAjNr3T!7RN;7c)1SqAJfHY|xAV z1f;p%lSdE8I}E4~tRH(l*rK?OZ>mB4C{3e%E-bUng2ymerg8?M$rXC!D?3O}_mka? zm*Y~JMu+_F7O4T;#nFv)?Ru6 z92r|old*4ZB$*6M40B;V&2w->#>4DEu0;#vHSgXdEzm{+VS48 z7U1tVn#AnQ3z#gP26$!dmS5&JsXsrR>~rWA}%qd{92+j zu+wYAqrJYOA%WC9nZ>BKH&;9vMSW_59z5LtzS4Q@o5vcrWjg+28#&$*8SMYP z!l5=|p@x6YnmNq>23sQ(^du5K)TB&K8t{P`@T4J5cEFL@qwtsCmn~p>>*b=37y!kB zn6x{#KjM{S9O_otGQub*K)iIjtE2NfiV~zD2x{4r)IUD(Y8%r`n;#)ujIrl8Sa+L{ z>ixGoZJ1K@;wTUbRRFgnltN_U*^EOJS zRo4Y+S`cP}e-zNtdl^S5#%oN#HLjmq$W^(Y6=5tM#RBK-M14RO7X(8Gliy3+&9fO; zXn{60%0sWh1_g1Z2r0MuGwSGUE;l4TI*M!$5dm&v9pO7@KlW@j_QboeDd1k9!7S)jIwBza-V#1)(7ht|sjY}a19sO!T z2VEW7nB0!zP=Sx17-6S$r=A)MZikCjlQHE)%_Ka|OY4+jgGOw=I3CM`3ui^=o0p7u z?xujpg#dRVZCg|{%!^DvoR*~;QBH8ia6%4pOh<#t+e_u!8gjuk_Aic=|*H24Yq~Wup1dTRQs0nlZOy+30f16;f7EYh*^*i9hTZ`h`015%{i|4 z?$7qC3&kt#(jI#<76Biz=bl=k=&qyaH>foM#zA7}N`Ji~)-f-t&tR4^do)-5t?Hz_Q+X~S2bZx{t+MEjwy3kGfbv(ij^@;=?H_^FIIu*HP_7mpV)NS{MY-Rr7&rvWo@Wd~{Lt!8|66rq`GdGu% z@<(<7bYcZKCt%_RmTpAjx=TNvdh+ZiLkMN+hT;=tC?%vQQGc7WrCPIYZwYTW`;x|N zrlEz1yf95FiloUU^(onr3A3>+96;;6aL?($@!JwiQ2hO|^i)b4pCJ7-y&a~B#J`#FO!3uBp{5GBvM2U@K85&o0q~6#LtppE&cVY z3Bv{xQ-;i}LN-60B2*1suMd=Fi%Y|7@52axZ|b=Wiwk^5eg{9X4}(q%4D5N5_Gm)` zg~VyFCwfkIKW(@@ZGAlTra6CO$RA_b*yz#){B82N7AYpQ9)sLQfhOAOMUV7$0|d$=_y&jl>va$3u-H z_+H*|UXBPLe%N2Ukwu1*)kt!$Y>(IH3`YbEt; znb1uB*{UgwG{pQnh>h@vyCE!6B~!k}NxEai#iY{$!_w54s5!6jG9%pr=S~3Km^EEA z)sCnnau+ZY)(}IK#(3jGGADw8V7#v~<&y5cF=5_Ypkrs3&7{}%(4KM7) zuSHVqo~g#1kzNwXc39%hL8atpa1Wd#V^uL=W^&E)fvGivt)B!M)?)Y#Ze&zU6O_I?1wj)*M;b*dE zqlcwgX#eVuZj2GKgBu@QB(#LHMd`qk<08i$hG1@g1;zD*#(9PHjVWl*5!;ER{Q#A9 zyQ%fu<$U?dOW=&_#~{nrq{RRyD8upRi}c-m!n)DZw9P>WGs>o1vefI}ujt_`O@l#Z z%xnOt4&e}LlM1-0*dd?|EvrAO-$fX8i{aTP^2wsmSDd!Xc9DxJB=x1}6|yM~QQPbl z0xrJcQNtWHgt*MdGmtj%x6SWYd?uGnrx4{m{6A9bYx`m z$*UAs@9?3s;@Jl19%$!3TxPlCkawEk12FADYJClt0N@O@Pxxhj+Kk(1jK~laR0*KGAc7%C4nI^v2NShTc4#?!p{0@p0T#HSIRndH;#Ts0YECtlSR}~{Uck+keoJq6iH)(Zc~C!fBe2~4(Wd> zR<4I1zMeW$<0xww(@09!l?;oDiq zk8qjS9Lxv$<5m#j(?4VLDgLz;8b$B%XO|9i7^1M;V{aGC#JT)c+L=BgCfO5k>CTlI zOlf~DzcopV29Dajzt*OcYvaUH{UJPaD$;spv%>{y8goE+bDD$~HQbON>W*~JD`;`- zZEcCPSdlCvANe z=?|+e{6AW$f(H;BND>uy1MvQ`pri>SafK5bK!YAE>0URAW9RS8#LWUHBOc&BNQ9T+ zJpg~Eky!u!9WBk)!$Z?!^3M~o_VPERYnk1NmzVYaGH;1h+;st==-;jzF~2LTn+x*k zvywHZg7~=aiJe=OhS@U>1fYGvT1+jsAaiaM;) zay2xsMKhO+FIeK?|K{G4SJOEt*eX?!>K8jpsZWW8c!X|JR#v(1+Ey5NM^TB1n|_40 z@Db2gH}PNT+3YEyqXP8U@)`E|Xat<{K5K;eK7O0yV72m|b!o43!e-!P>iW>7-9HN7 zmmc7)JX0^lPzF#>$#D~nU^3f!~Q zQWly&oZEb1847&czU;dg?=dS>z3lJkADL1innNtE(f?~OxM`%A_PBp?Lj;zDDomf$ z;|P=FTmqX|!sHO6uIfCmh4Fbgw@`DOn#`qAPEsYUiBvUlw zevH{)YWQu>FPXU$%1!h*2rtk_J}qNkkq+StX8Wc*KgG$yH#p-kcD&)%>)Yctb^JDB zJe>=!)5nc~?6hrE_3n^_BE<^;2{}&Z>Dr)bX>H{?kK{@R)`R5lnlO6yU&UmWy=d03 z*(jJIwU3l0HRW1PvReOb|MyZT^700rg8eFp#p<3Et%9msiCxR+jefK%x81+iN0=hG z;<`^RUVU+S)Iv-*5y^MqD@=cp{_cP4`s=z)Ti3!Bf@zCmfpZTwf|>|0t^E8R^s`ad z5~tA?0x7OM{*D;zb6bvPu|F5XpF11`U5;b*$p zNAq7E6c=aUnq>}$JAYsO&=L^`M|DdSSp5O4LA{|tO5^8%Hf1lqqo)sj=!aLNKn9(3 zvKk($N`p`f&u+8e^Z-?uc2GZ_6-HDQs@l%+pWh!|S9+y3!jrr3V%cr{FNe&U6(tYs zLto$0D+2}K_9kuxgFSeQ!EOXjJtZ$Pyl_|$mPQ9#fES=Sw8L% zO7Jij9cscU)@W+$jeGpx&vWP9ZN3fLDTp zaYM$gJD8ccf&g>n?a56X=y zec%nLN`(dVCpSl9&pJLf2BN;cR5F0Nn{(LjGe7RjFe7efp3R_2JmHOY#nWEc2TMhMSj5tBf-L zlxP3sV`!?@!mRnDTac{35I7h@WTfRjRiFw*Q*aD8)n)jdkJC@)jD-&mzAdK6Kqdct8P}~dqixq;n zjnX!pb^;5*Rr?5ycT7>AB9)RED^x+DVDmIbHKjcDv2lHK;apZOc=O@`4nJ;k|iikKk66v4{zN#lmSn$lh z_-Y3FC)iV$rFJH!#mNqWHF-DtSNbI)84+VLDWg$ph_tkKn_6+M1RZ!)EKaRhY={el zG-i@H!fvpH&4~$5Q+zHU(Ub=;Lzcrc3;4Cqqbr$O`c5M#UMtslK$3r+Cuz>xKl+xW?`t2o=q`1djXC=Q6`3C${*>dm~I{ z(aQH&Qd{{X+&+-4{epSL;q%n$)NOQ7kM}ea9bA++*F+t$2$%F!U!U}(&y7Sd0jQMV zkOhuJ$+g7^kb<`jqFiq(y1-~JjP13J&uB=hfjH5yAArMZx?VzW1~>tln~d5pt$uWR~TM!lIg+D)prR zocU0N2}_WTYpU`@Bsi1z{$le`dO{-pHFQr{M}%iEkX@0fv!AGCTcB90@e|slf#unz z*w4Cf>(^XI64l|MmWih1g!kwMJiifdt4C<5BHtaS%Ra>~3IFwjdu;_v*7BL|fPu+c zNp687`{}e@|%)5g4U*i=0zlSWXzz=YcZ*&Bg zr$r(SH0V5a%oHh*t&0y%R8&jDI=6VTWS_kJ!^WN!ET@XfEHYG-T1jJsDd`yEgh!^* z+!P62=v`R2=TBVjt=h}|JIg7N^RevZuyxyS+jsk>=iLA52Ak+7L?2$ZDUaWdi1PgB z_;*Uae_n&7o27ewV*y(wwK~8~tU<#Np6UUIx}zW6fR&dKiPq|$A{BwG_-wVfkm+EP zxHU@m`im3cD#fH63>_X`Il-HjZN_hqOVMG;(#7RmI13D-s_>41l|vDH1BglPsNJ+p zTniY{Hwoief+h%C^|@Syep#722=wmcTR7awIzimAcye?@F~f|n<$%=rM+Jkz9m>PF70$)AK@|h_^(zn?!;={;9Zo7{ zBI7O?6!J2Ixxk;XzS~ScO9{K1U9swGvR_d+SkromF040|Slk%$)M;9O_8h0@WPe4= z%iWM^ust8w$(NhO)7*8uq+9CycO$3m-l}O70sBi<4=j0CeE_&3iRUWJkDM$FIfrkR zHG2|hVh3?Nt$fdI$W?<|Qq@#hjDijk@7eUr1&JHYI>(_Q4^3$+Zz&R)Z`WqhBIvjo zX#EbA8P0Qla-yACvt)%oAVHa#kZi3Y8|(IOp_Z6J-t{)98*OXQ#8^>vTENsV@(M}^ z(>8BXw`{+)BfyZB!&85hT0!$>7$uLgp9hP9M7v=5@H`atsri1^{1VDxDqizj46-2^ z?&eA9udH#BD|QY2B7Zr$l;NJ-$L!u8G{MZoX)~bua5J=0p_JnM`$(D4S!uF}4smWq zVo%kQ~C~X?cWCH zo4s#FqJ)k|D{c_ok+sZ8`m2#-Uk8*o)io`B+WTD0PDA!G`DjtibftJXhPVjLZj~g& z=MM9nF$7}xvILx}BhM;J-Xnz0=^m1N2`Mhn6@ct+-!ijIcgi6FZ*oIPH(tGYJ2EQ0 z{;cjcc>_GkAlWEZ2zZLA_oa-(vYBp7XLPbHCBcGH$K9AK6nx}}ya%QB2=r$A;11*~ z_wfru1SkIQ0&QUqd)%eAY^FL!G;t@7-prQ|drDn#yDf%Uz8&kGtrPxKv?*TqkC(}g zUx10<;3Vhnx{gpWXM8H zKc0kkM~gIAts$E!X-?3DWG&^knj4h(q5(L;V81VWyC@_71oIpXfsb0S(^Js#N_0E} zJ%|XX&EeVPyu}? zz~(%slTw+tcY3ZMG$+diC8zed=CTN}1fB`RXD_v2;{evY z@MCG$l9Az+F()8*SqFyrg3jrN7k^x3?;A?L&>y{ZUi$T8!F7Dv8s}}4r9+Wo0h^m= zAob@CnJ;IR-{|_D;_w)? zcH@~&V^(}Ag}%A90);X2AhDj(-YB>$>GrW1F4C*1S5`u@N{T|;pYX1;E?gtBbPvS* zlv3r#rw2KCmLqX0kGT8&%#A6Sc(S>apOHtfn+UdYiN4qPawcL{Sb$>&I)Ie>Xs~ej z7)a=-92!sv-A{-7sqiG-ysG0k&beq6^nX1L!Fs$JU#fsV*CbsZqBQ|y z{)}zvtEwO%(&mIG|L?qs2Ou1rqTZHV@H+sm8Nth(+#dp0DW4VXG;;tCh`{BpY)THY z_10NNWpJuzCG%Q@#Aj>!v7Eq8eI6_JK3g2CsB2jz)2^bWiM{&U8clnV7<2?Qx5*k_ zl9B$P@LV7Sani>Xum{^yJ6uYxM4UHnw4zbPdM|PeppudXe}+OcX z!nr!xaUA|xYtA~jE|436iL&L={H3e}H`M1;2|pLG)Z~~Ug9X%_#D!DW>w}Es!D{=4 zxRPBf5UWm2{}D>Em;v43miQ~2{>%>O*`wA{7j;yh;*DV=C-bs;3p{AD;>VPcn>E;V zLgtw|Y{|Beo+_ABz`lofH+cdf33LjIf!RdcW~wWgmsE%2yCQGbst4TS_t%6nS8a+m zFEr<|9TQzQC@<(yNN9GR4S$H-SA?xiLIK2O2>*w-?cdzNPsG4D3&%$QOK{w)@Dk}W z|3_Z>U`XBu7j6Vc=es(tz}c7k4al1$cqDW4a~|xgE9zPX(C`IsN(QwNomzsBOHqjd zi{D|jYSv5 zC>6#uB~%#!!*?zXW`!yHWjbjwm!#eo3hm;>nJ!<`ZkJamE6i>>WqkoTpbm(~b%G_v z`t3Z#ERips;EoA_0c?r@WjEP|ulD+hue5r8946Sd0kuBD$A!=dxigTZn)u3>U;Y8l zX9j(R*(;;i&HrB&M|Xnitzf@><3#)aKy=bFCf5Hz@_);{nlL?J!U>%fL$Fk~Ocs3& zB@-Ek%W>h9#$QIYg07&lS_CG3d~LrygXclO!Ws-|PxMsn@n{?77wCaq?uj`dd7lllDCGd?ed&%5k{RqUhiN1u&?uz@Fq zNkv_4xmFcl?vs>;emR1R<$tg;*Ayp@rl=ik z=x2Hk zJqsM%++e|*+#camAiem6f;3-khtIgjYmNL0x|Mz|y{r{6<@_&a7^1XDyE>v*uo!qF zBq^I8PiF#w<-lFvFx9xKoi&0j)4LX~rWsK$%3hr@ebDv^($$T^4m4h#Q-(u*Mbt6F zE%y0Fvozv=WAaTj6EWZ)cX{|9=AZDvPQuq>2fUkU(!j1GmdgeYLX`B0BbGK(331ME zu3yZ3jQ@2)WW5!C#~y}=q5Av=_;+hNi!%gmY;}~~e!S&&^{4eJuNQ2kud%Olf8TRI zW-Dze987Il<^!hCO{AR5tLW{F1WLuZ>nhPjke@CSnN zzoW{m!+PSCb7byUf-1b;`{0GU^zg7b9c!7ueJF`>L;|akVzb&IzoLNNEfxp7b7xMN zKs9QG6v@t7X)yYN9}3d4>*ROMiK-Ig8(Do$3UI&E}z!vcH2t(VIk-cLyC-Y%`)~>Ce23A=dQsc<( ziy;8MmHki+5-(CR8$=lRt{(9B9W59Pz|z0^;`C!q<^PyE$KXt!KibFH*xcB9V%xTD zn;YlZ*tTukwr$(mWMka@|8CW-J8!zCXI{P1-&=wSvZf&%9SZ7m`1&2^nV#D z6T*)`Mz3wGUC69Fg0Xk!hwY}ykk!TE%mr57TLX*U4ygwvM^!#G`HYKLIN>gT;?mo% zAxGgzSnm{}vRG}K)8n(XjG#d+IyAFnozhk|uwiey(p@ zu>j#n4C|Mhtd=0G?Qn5OGh{{^MWR)V*geNY8d)py)@5a85G&_&OSCx4ASW8g&AEXa zC}^ET`eORgG*$$Q1L=9_8MCUO4Mr^1IA{^nsB$>#Bi(vN$l8+p(U^0dvN_{Cu-UUm zQyJc!8>RWp;C3*2dGp49QVW`CRR@no(t+D|@nl138lu@%c1VCy3|v4VoKZ4AwnnjF z__8f$usTzF)TQ$sQ^|#(M}-#0^3Ag%A0%5vA=KK$37I`RY({kF-z$(P50pf3_20YTr%G@w+bxE_V+Tt^YHgrlu$#wjp7igF!=o8e2rqCs|>XM9+M7~TqI&fcx z=pcX6_MQQ{TIR6a0*~xdgFvs<2!yaA1F*4IZgI!)xnzJCwsG&EElg_IpFbrT}nr)UQy}GiK;( zDlG$cksync34R3J^FqJ=={_y9x_pcd%$B*u&vr7^ItxqWFIAkJgaAQiA)pioK1JQ| zYB_6IUKc$UM*~f9{Xzw*tY$pUglV*?BDQuhsca*Fx!sm`9y`V&?lVTH%%1eJ74#D_ z7W+@8@7LAu{aq)sPys{MM~;`k>T%-wPA)E2QH7(Z4XEUrQ5YstG`Uf@w{n_Oc!wem z7=8z;k$N{T74B*zVyJI~4d60M09FYG`33;Wxh=^Ixhs69U_SG_deO~_OUO1s9K-8p z5{HmcXAaKqHrQ@(t?d@;63;Pnj2Kk<;Hx=kr>*Ko`F*l){%GVDj5nkohSU)B&5Vrc zo0u%|b%|VITSB)BXTRPQC=Bv=qplloSI#iKV#~z#t#q*jcS`3s&w-z^m--CYDI7n2 z%{LHFZ*(1u4DvhES|Dc*n%JL8%8?h7boNf|qxl8D)np@5t~VORwQn)TuSI07b-T=_ zo8qh+0yf|-6=x;Ra$w&WeVZhUO%3v6Ni*}i&sby3s_(?l5Er{K9%0_dE<`7^>8mLr zZ|~l#Bi@5}8{iZ$(d9)!`}@2~#sA~?uH|EbrJQcTw|ssG)MSJJIF96-_gf&* zy~I&$m6e0nnLz^M2;G|IeUk?s+afSZ){10*P~9W%RtYeSg{Nv5FG<2QaWpj?d`;}<4( z>V1i|wNTpH`jJtvTD0C3CTws410U9HS_%Ti2HaB~%^h6{+$@5`K9}T=eQL;dMZ?=Y zX^z?B3ZU_!E^OW%Z*-+t&B-(kLmDwikb9+F9bj;NFq-XHRB=+L)Rew{w|7p~7ph{#fRT}}K zWA)F7;kJBCk^aFILnkV^EMs=B~#qh*RG2&@F|x2$?7QTX_T6qL?i$c6J*-cNQC~E6dro zR)CGIoz;~V?=>;(NF4dihkz~Koqu}VNPE9^R{L@e6WkL{fK84H?C*uvKkO(!H-&y( zq|@B~juu*x#J_i3gBrS0*5U*%NDg+Ur9euL*5QaF^?-pxxieMM6k_xAP;S}sfKmIa zj(T6o{4RfARHz25YWzv=QaJ4P!O$LHE(L~6fB89$`6+olZR!#%y?_v+Cf+g)5#!ZM zkabT-y%v|ihYuV}Y%-B%pxL264?K%CXlbd_s<GY5BG*`kYQjao$QHiC_qPk5uE~AO+F=eOtTWJ1vm*cU(D5kvs3kity z$IYG{$L<8|&I>|WwpCWo5K3!On`)9PIx(uWAq>bSQTvSW`NqgprBIuV^V>C~?+d(w$ZXb39Vs`R=BX;4HISfN^qW!{4 z^amy@Nqw6oqqobiNlxzxU*z2>2Q;9$Cr{K;*&l!;Y??vi^)G|tefJG9utf|~4xh=r3UjmRlADyLC*i`r+m;$7?7*bL!oR4=yU<8<-3XVA z%sAb`xe&4RV(2vj+1*ktLs<&m~mGJ@RuJ)1c zLxZyjg~*PfOeAm8R>7e&#FXBsfU_?azU=uxBm=E6z7FSr7J>{XY z1qUT>dh`X(zHRML_H-7He^P_?148AkDqrb>;~1M-k+xHVy>;D7p!z=XBgxMGQX2{* z-xMCOwS33&K^~3%#k`eIjKWvNe1f3y#}U4;J+#-{;=Xne^6+eH@eGJK#i|`~dgV5S zdn%`RHBsC!=9Q=&=wNbV#pDv6rgl?k1wM03*mN`dQBT4K%uRoyoH{e=ZL5E*`~X|T zbKG9aWI}7NGTQtjc3BYDTY3LbkgBNSHG$5xVx8gc@dEuJqT~QPBD=Scf53#kZzZ6W zM^$vkvMx+-0$6R^{{hZ2qLju~e85Em>1nDcRN3-Mm7x;87W#@RSIW9G>TT6Q{4e~b z8DN%n83FvXWdpr|I_8TaMv~MCqq0TA{AXYO-(~l=ug42gpMUvOjG_pWSEdDJ2Bxqz z!em;9=7y3HW*XUtK+M^)fycd8A6Q@B<4biGAR)r%gQf>lWI%WmMbij;un)qhk$bff zQxb{&L;`-1uvaCE7Fm*83^0;!QA5-zeSvKY}WjbwE68)jqnOmj^CTBHaD zvK6}Mc$a39b~Y(AoS|$%ePoHgMjIIux?;*;=Y|3zyfo)^fM=1GBbn7NCuKSxp1J|z zC>n4!X_w*R8es1ofcPrD>%e=E*@^)7gc?+JC@mJAYsXP;10~gZv0!Egi~){3mjVzs z^PrgddFewu>Ax_G&tj-!L=TuRl0FAh#X0gtQE#~}(dSyPO=@7yd zNC6l_?zs_u5&x8O zQ|_JvKf!WHf43F0R%NQwGQi-Dy7~PGZ@KRKMp?kxlaLAV=X{UkKgaTu2!qzPi8aJ z-;n$}unR?%uzCkMHwb56T%IUV)h>qS(XiuRLh3fdlr!Cri|{fZf0x9GVYUOlsKgxLA7vHrkpQddcSsg4JfibzpB zwR!vYiL)7%u8JG7^x@^px(t-c_Xt|9Dm)C@_zGeW_3nMLZBA*9*!fLTV$Uf1a0rDt zJI@Z6pdB9J(a|&T_&AocM2WLNB;fpLnlOFtC9yE6cb39?*1@wy8UgruTtX?@=<6YW zF%82|(F7ANWQ`#HPyPqG6~ggFlhJW#R>%p@fzrpL^K)Kbwj(@#7s97r`)iJ{&-ToR z$7(mQI@~;lwY+8dSKP~0G|#sjL2lS0LQP3Oe=>#NZ|JKKYd6s6qwe#_6Xz_^L4PJ5TM_|#&~zy= zabr|kkr3Osj;bPz`B0s;c&kzzQ2C8|tC9tz;es~zr{hom8bT?t$c|t;M0t2F{xI;G z`0`ADc_nJSdT`#PYCWu4R0Rmbk#PARx(NBfdU>8wxzE(`jA}atMEsaG6zy8^^nCu| z9_tLj90r-&Xc~+p%1vyt>=q_hQsDYB&-hPj(-OGxFpesWm;A(Lh>UWy4SH9&+mB(A z2jkTQ2C&o(Q4wC_>|c()M8_kF?qKhNB+PW6__;U+?ZUoDp2GNr<|*j(CC*#v0{L2E zgVBw6|3c(~V4N*WgJsO(I3o>8)EO5;p7Xg8yU&%rZ3QSRB6Ig6MK7Wn5r+xo2V}fM z0QpfDB9^xJEi}W*Fv6>=p4%@eP`K5k%kCE0YF2Eu5L!DM1ZY7wh`kghC^NwxrL}90dRXjQx=H>8 zOWP@<+C!tcw8EL8aCt9{|4aT+x|70i6m*LP*lhp;kGr5f#OwRy`(60LK@rd=to5yk^%N z6MTSk)7)#!cGDV@pbQ>$N8i2rAD$f{8T{QM+|gaj^sBt%24UJGF4ufrG1_Ag$Rn?c zzICg9`ICT>9N_2vqvVG#_lf9IEd%G5gJ_!j)1X#d^KUJBkE9?|K03AEe zo>5Rql|WuUU=LhLRkd&0rH4#!!>sMg@4Wr=z2|}dpOa`4c;_DqN{3Pj`AgSnc;h%# z{ny1lK%7?@rwZO(ZACq#8mL)|vy8tO0d1^4l;^e?hU+zuH%-8Y^5YqM9}sRzr-XC0 zPzY1l($LC-yyy*1@eoEANoTLQAZ2lVto2r7$|?;PPQX`}rbxPDH-a$8ez@J#v0R5n z7P*qT3aHj02*cK)WzZmoXkw?e3XNu&DkElGZ0Nk~wBti%yLh+l2DYx&U1lD_NW_Yt zGN>yOF?u%ksMW?^+~2&p@NoPzk`T)8qifG_owD>@iwI3@u^Y;Mqaa!2DGUKi{?U3d z|Efe=CBc!_ZDoa~LzZr}%;J|I$dntN24m4|1(#&Tw0R}lP`a`?uT;>szf^0mDJx3u z6IJvpeOpS$OV!Xw21p>Xu~MZ(Nas5Iim-#QSLIYSNhYgx1V!AR>b zf5b7O`ITTvW5z%X8|7>&BeEs8~J1i47l;`7Y#MUMReQ4z!IL1rh8UauKNPG?7rV_;#Y zG*6Vrt^SsTMOpV7mkui}l_S8UNOBcYi+DzcMF>YKrs3*(q5fwVCr;_zO?gpGx*@%O zl`KOwYMSUs4e&}eM#FhB3(RIDJ9ZRn6NN{2Nf+ z2jcz%-u6IPq{n7N3wLH{9c+}4G(NyZa`UmDr5c-SPgj0Sy$VN#Vxxr;kF>-P;5k!w zuAdrP(H+v{Dybn78xM6^*Ym@UGxx?L)m}WY#R>6M2zXnPL_M9#h($ECz^+(4HmKN7 zA>E;`AEqouHJd7pegrq4zkk>kHh`TEb`^(_ea;v{?MW3Sr^FXegkqAQPM-h^)$#Jn z?bKbnXR@k~%*?q`TPL=sD8C+n^I#08(}d$H(@Y;3*{~nv4RLZLw`v=1M0-%j>CtT( zTp#U03GAv{RFAtj4vln4#E4eLOvt zs;=`m&{S@AJbcl1q^39VOtmN^Zm(*x(`(SUgF(=6#&^7oA8T_ojX>V5sJx@*cV|29 z)6_%P6}e}`58Sd;LY2cWv~w}fer&_c1&mlY0`YNNk9q=TRg@Khc5E$N`aYng=!afD z@ewAv^jl$`U5;q4OxFM4ab%X_Jv>V!98w$8ZN*`D-)0S7Y^6xW$pQ%g3_lEmW9Ef^ zGmFsQw`E!ATjDvy@%mdcqrD-uiKB}!)ZRwpZRmyu+x|RUXS+oQ*_jIZKAD~U=3B|t zz>9QQr91qJihg9j9rWHww{v@+SYBzCfc0kI=4Gr{ZLcC~mft^EkJ`CMl?8fZ z3G4ix71=2dQ`5QuTOYA0(}f`@`@U<#K?1TI(XO9c*()q!Hf}JUCaUmg#y?ffT9w1g zc)e=JcF-9J`hK{0##K#A>m^@ZFx!$g09WSBdc8O^IdP&JE@O{i0&G!Ztvt{L4q%x& zGE2s!RVi6ZN9)E*(c33HuMf7#X2*VPVThdmrVz-Fyqxcs&aI4DvP#bfW={h$9>K0HsBTUf z2&!G;( z^oOVIYJv~OM=-i`6=r4Z1*hC8Fcf3rI9?;a_rL*nr@zxwKNlxf(-#Kgn@C~4?BdKk zYvL?QcQeDwwR5_S(`sn&{PL6FYxwb-qSh_rUUo{Yi-GZz5rZotG4R<+!PfsGg`MVtomw z5kzOZJrh(#rMR_87KeP0Q=#^5~r_?y1*kN?3Fq% zvnzHw$r!w|Soxz8Nbx2d&{!#w$^Hua%fx!xUbc2SI-<{h>e2I;$rJL)4)hnT5cx^* zIq#+{3;Leun3Xo=C(XVjt_z)F#PIoAw%SqJ=~DMQeB zNWQ={d|1qtlDS3xFik}#j*8%DG0<^6fW~|NGL#P_weHnJ(cYEdJtI9#1-Pa8M}(r{ zwnPJB_qB?IqZw5h!hRwW2WIEb?&F<52Ruxpr77O2K>=t*3&Z@=5(c^Uy&JSph}{Q^ z0Tl|}gt=&vK;Rb9Tx{{jUvhtmF>;~k$8T7kp;EV`C!~FKW|r$n^d6=thh`)^uYgBd zydgnY9&mm$?B@pKK+_QreOm?wnl5l}-wA$RZCZukfC$slxbqv9uKq0o^QeSID96{Rm^084kZ)*`P zk))V~+<4-_7d6<~)PL%!+%JP`Dn23vUpH47h~xnA=B_a}rLy|7U-f0W+fH`{wnyh2 zD$JYdXuygeP5&OAqpl2)BZ|X){~G;E|7{liYf%AZFmXXyA@32qLA)tuuQz`n^iH1Y z=)pAzxK$jw0Xq?7`M`=kN2WeQFhz)p;QhjbKg#SB zP~_Vqo0SGbc5Q;v4Q7vm6_#iT+p9B>%{s`8H}r|hAL5I8Q|ceJAL*eruzD8~_m>fg26HvLpik&#{3Zd#|1C_>l&-RW2nBBzSO zQ3%G{nI*T}jBjr%3fjG*&G#ruH^ioDM>0 zb0vSM8ML?tPU*y%aoCq;V%x%~!W*HaebuDn9qeT*vk0%X>fq-4zrrQf{Uq5zI1rEy zjQ@V|Cp~$AoBu=VgnVl@Yiro>ZF{uB=5)~i1rZzmDTIzLBy`8Too!#Z4nE$Z{~uB( z_=o=gKuhVpy&`}-c&f%**M&(|;2iy+nZy2Su}GOAH_GT9z`!ogwn$+Bi&1ZhtPF zVS&LO5#Bq}cew$kvE7*t8W^{{7&7WaF{upy0mj*K&xbnXvSP9V$6m6cesHGC!&Us36ld9f*Pn8gbJb3`PPT|ZG zri2?uIu09i>6Y-0-8sREOU?WaGke0+rHPb^sp;*E{Z5P7kFJ@RiLZTO`cN2mRR#Nz zxjJ##Nk+Uy-2N-8K_@576L(kJ>$UhP+)|w!SQHkkz+e62*hpzyfmY4eQLZtZUhEdG zIZluDOoPDlt5#iw+2epC3vEATfok^?SDT`TzBwtgKjY z>ZImbO)i~T=IYAfw$3j2mF1Cj*_yqK(qw(U^r-!gcUKvWQrDG@E{lEyWDWOPtA9v{ z5($&mxw{nZWo_Ov??S#Bo1;+YwVfx%M23|o$24Hdf^&4hQeV=Cffa5MMYOu2NZLSC zQ4UxWvn+8%YVGDg(Y*1iHbUyT^=gP*COcE~QkU|&6_3h z-GOS6-@o9+Vd(D7x#NYt{Bvx2`P&ZuCx#^l0bR89Hr6Vm<||c3Waq(KO0eZ zH(|B;X}{FaZ8_4yyWLdK!G_q9AYZcoOY}Jlf3R;%oR5dwR(rk7NqyF%{r>F4s^>li z`R~-fh>YIAC1?%!O?mxLx!dq*=%IRCj;vXX628aZ;+^M0CDFUY0Rc<1P5e(OVX8n- z*1UOrX{J}b2N)6m5&_xw^WSN=Lp$I$T>f8K6|J_bj%ZsIYKNs1$TFt!RuCWF48;98`7D(XPVnk+~~i=U$} zR#;!ZRo4eVqlDxjDeE^3+8)bzG_o~VRwdxqvD^HNh#@o>1My$0*Y_`wfQ$y}az|Uz zM47oEaYNTH?J^w9EVNnvfmmbV+GHDe)Kf;$^@6?9DrSHnk@*{PuJ>ra|9KO!qQ-Fp zNNcZB4ZdAI>jEh@3Mt(E1Fy!^gH-Zx6&lr8%=duIgI^~gC{Q;4yoe;#F7B`w9daIe z{(I;y)=)anc;C;)#P`8H6~iAG_q-4rPJb(6rn4pjclGi6$_L79sFAj#CTv;t@94S6 zz`Id7?k!#3JItckcwOf?sj=Xr6oKvAyt1=jiWN@XBFoW6dw_+c9O9x2i4or?*~8f& zm<>yzc6Aw_E-gsGAa`6`cjK~k^TJt(^`E1^_h)5(8)1kzAsBxjd4+!hJ&&T!qklDN z`?j#za=(^wRCvEI75uE^K#IBe5!5g2XW}|lUqAmdmIQb7xJtP}G9^(=!V`ZS_7#RZ zjXq#Cekw>fE*YS-?Qea|7~H?)bbLK;G&(~%!B@H`o#LYAuu6;-c~jFfjY7GKZ|9~{ zE!`!d@@rhY_@5fDbuQ8gRI~R_vs4%fR5$?yot4hDPJ28k_Wzmc^0yzwMr#*(OXq@g zRUgQmJA?E>3GO=5N8iWIfBP{&QM%!Oa*iwTlbd0Fbm*QCX>oRb*2XfG-=Bz1Qz0$v zn#X!2C!LqE601LEMq;X7`P*5nurdKZAmmsI-zZ|rTH;AFxNDyZ_#hN2m4W(|YB64E z470#yh$;8QzsdA;6vbNvc95HLvZvyT4{C>F(fwy&izvNDuvfO1Z;`Ss#4a_c6pm*{0t|_i9z{@84^lffQa5zG4<{(+p5-S z^>lG-^GJR#V>;5f3~y%n=`U_jBp~WgB0cp;Lx5VZYPYCH&(evw#}AYRlGJ>vcoeVr z3%#-QUBgeH!GB>XLw;rT&oMI9ynP;leDwh4O2uM!oIWo&Qxk{^9#nX&^3GJ z(U~5{S9aw@yHH^yuQGso=~*JOC9Zdi6(TFP+IddkfK5Eu9q;+F9?PPNAe-O;;P_Aa zPJ{Dqa1gQb%dZ|0I{#B0(z|r(qq!A4CxlW92-LwXFjYfOzAT1DDK`9rm4AB~l&oVv zi6_{)M9L1%JP}i52y@`!T9RB~!CRel53wl?amNHqcuElq%hn)|#BPvW5_m51RVb|? zXQ&B*eAD}}QamG>o{?i~usG5X6IDa3+Xkb8w%7;C8|Cln70biA+ZH}fxkH^Wei$vZPnuqIT!Mmy26;mLfU z3Bbv4M^vvMlz-I+46=g>0^wWkmA!hlYj*I!%it^x9Kx(d{L|+L{rW?Y#hLHWJfd5X z>B=Swk8=;mRtIz}Hr3NE_garb5W*!7fnNM{+m2_>!cHZZlNEeof~7M#FBEQ+f&gJ3 z^zv*t?XV)jQi%0-Ra|ISiW-fx)DsK-> zI}Fv%uee$#-1PKJwr=lU89eh=M{>Nk7IlJ)U33U)lLW+OOU%A|9-Lf;`@c*+vX{W2 z{{?0QoP!#?8=5%yL=fP%iF+?n$0#iHz`P;1{Ra6iwr=V7v^8;NoLJ5)QxIyIx>ur?lMwV=mBo0BA?28kMow8SX=Ax5L%S~x4+EQi#Ig`(ht%)D(F#Pa!)SiHy&PvUp32=VtAsR|6|NZR@jkad zX^aEgojf9(-)rNOZ=NVA&a;6Cljkb=H-bY9m^_I)`pBHB16QW)sU27zF13ypefeATJc1Wzy39GrKF{UntHsIU59AdXp?j{eh2R)IbU&omd zk6(qzvE@hve1yM6dgkbz>5HDR&MD~yi$yymQ}?b;RfL$N-#l7(u?T^Wlu+Q;fo|jd zBe^jzGMHY(2=5l?bEIh+zgE$1TEQ&!p3fH;AW`P?W5Hkj3eJnT>dqg! zf~}A*SZU5HHDCbdywQ^l_PqssHRlrySYN=`hAv2sVrtcF!`kyEu%XeeRUTJU7vB%h zY0*)N$mLo6d=tJfe}IPIeiH~>AKwCpkn&WEfYgl?3anq5#-F$6$v-(G_j0*S9mdsn zg@ek_ut4(?+JP_9-n`YqoD(gAz+Ttm1#t za96D}oQR(o=e8wwes19_(p4g(A1vSGwPAp~Hh3hh!fc>u{1E^+^}AzwilFVf6^vbL zc&NnRs`u)N-P|Cu4()yTiuE{j_V&=K?iP!IUBf~ei2}~_KBvUAlXa;R#Wl`gOBtJ$Y5(L))@`riLB)v*r>9*8VfmQt<72?+fdwP{BA@?_qo>mN7yzICUCaeG(+>Rb~8wg~6U(P)NlDLuhQgjbC}=)HuZgC}0Z-qLX4lJ7^)8~!!*qP0=~`Y_(A z{@15*ZevZSI^s|OnpCeCwLXf#tgbq8y~R*GB5anmZ;_N!+-3>!wu@NBFCNJ$#y?{? zMI!?s*=_xA;V&aX)ROxzVW8*de+&P#2zucA|8mksdgCXBsZ*TM=%{L1Tk5LB_*^@&S?O=ot{h)1xRVSn27&Tk8>rF|6ruzYb;Nq) z;qvlmrP^SL$mhe4Ai)xpl6Wx&y;z8o!7-+6$qj;ZLXvfR71I@w(R|6lyuP6v-lP&r z@KK-TEmGQfMmk1c0^fd7!^si}T%b5a2%>T-Drh|^Cf z$}qxIv@zxbmJ#qjK6Q_aGDe{ciVT20V1lW52Xs!}x(4_j)sUXYdm4 zwYC9FOa;X*c*LxL;xE5ov?|?^7gWXyALy_D2GvDo-8%0-Y%9TkkO_Tcr2qIUg3(OC z%3wt?hyn*+e^z%(~2#!2dvMFa$mzgwk1I1X;naFMjXSbnmZ!zd%7u)=cgi z*0&@Scrl&BDfU(9Pks8#;!~v~r7~DN{G6WE&_;7i{{a*?oiCao(l%2ruxX0fAt69e2vLgL%Mf_)!*(Tz zNKW>sW@YB2vBfP>C&L|-pq)Uq^PsG_THu;8iEcqafO?0k$IQp1KyWyOoTxwmKvlc^ zO9$%Tt8;%qQxwy5;CsJ)V}a7I6}SvQ%0_H53Kcqx=m83fIzpLSGgfVe^SPdc*xPdciI5dg}#{Etv$e<)gGD=qm0v=!aN@*?$s zLhzD%4w{vf-g6FHQjG9XyC+4=bewb?Mz%!u8%oP{G9{UJFTLTcCi3R(=Nm&t&Sl(? zr>pj?=ECdDVa}-g%`LF^1EY@>7d}%VhYpKFSDPH)D(zB+gPe1m7E}W>TiW=8L0&(D&YG=0<&7G4Bu{;-#Ud;-1%Ta9V}U6fyK1YX z`Rq|i-X(loPZ)M$H%m@j7bGx>uj~y=0)!t#dc|c}+hT%~Sq>fefez0Ul|jOJHta~u zx7*mV6~Jpt(FkY(pQN91>aFk7VS%Sa^oLaq$*)W?fy`xuFJgH<2s=!Rz}_(qdmdF~ zlr2f=)q_vpi8X;Jq>5^$GweJ{iS`Khw2f)fsvKpgh;U~13a+9 zfaw}UuGiBy;q10pI^Avb#X3D=k_r(T{N;-xA)OM}2Py5L##<96NU*Sr7GQqhfrPej z?;B$Bt_sTxuSAPXfTSC{zr?@$$0iHxC@z*5F52j*PG87hh`0w3At8jPf*rjNE~_Gj z2)fjeUFJ(#l9uWuw&5#@13|AQ1;pdA?EL4YKq0JDR5T8I?aWGxI=J9}vdyH;gQ@iE z>+UnC2iwT0f80-VuE^bY!N@(}9?bOXyy%rTqSNDN4rO4Zt#(kZwcGgTp&3((F+nsd ze~B)%K6oP4WX_w1>|QImC;9q zy}4p+s%^Too2(gE>yo%+yY#F{)phtmNqsJPVQQ0lGR|H9q>aA&AtU4M+EZ%`xvQLb zbigBOc`dL}&j3er?EOI`!W)N#>+uwp_!h^5FspaEylq!e(FPY-6T3~WeNmZ<$?Y6y z-!bM1kD7ZF8xl+Pi6fiv1?)q%`aNxn#pK%)ct||L&Xnf8Gu&3g;Of{B8Pt=u`e+Mn zA(DmU#3cF#Nr7W;X0V4ksFHMcNDAf4G&D8VjLeZ^|5-f$>_|71>P3xuu)?4NJed*w z6GR_RB5HQLzT(h+`Y?-3esxeue{-Q%b+!&o>IJ!#=}#_&q+hwJga>fkt(*(WdoN5vSta z#$mMN6}YzYRpaBZ)j)EL91-oL1(|d(>%UclsTUOyXyWM&(hNqLwqtn`!E>HJM{ zh>M~xa1@*U^cwx-k5QjePr5=B6u*jpJ)C0{C?f7Yga+I^4$TleyX$x&jm9z@c!?cC z<2kY7)p^+W{AXd@l1C09_yB*TG|yzb96BYk z8Wpj81vB>zcR+qM4m~A44w1n7$fxB$-?MV}S?Fh}c_|2FXg`cZ?750i;Cdl-_nGK# zta)h)6!*AsQ-z8caSh)%5JY>_yCeJs~FpAzdY8 zF@SU_hN#~ip5I;UACFzx1v0yf{j97l&)e-=`d#1Kp6A(Kj&HC!%vK!wEdK3HFJ?|6 za;WwUczZ+&<$g!Td^48@lJtfW@doXL#jY6)dK_RDCQAZ}l&OdD+?Yl5-bqpsHZR^( zF{u_cR(x>u(c4i5f(^8!h6CV0#ZxRFhLlunWiGDLO6yoRb(wV<(P^8=fOU7Hp{AHE z;Yg%kg@6&tL3Z*IrbkDeQ$%rbalVP39D@LVrC2xSavnTp%PorXPf1DVzHyqjDsDnS zL=mv0a2s60bHKGQM)ue>npH0SCp;XtZFUzm?R-x7D*(PxMmuJ4J*K2eY&ebe0yQHe zVG&*qe{pot{PM^xQv`H_rn2FcYOrEN+I#uX^1`Id%J$;Hi2cNCU!0Hlc0TjxLzkss zHxmC;hQBu5U4J0XflWM;{uH`_47Sg)QyZ{8D&T0;bdc3{^^<=q7P?C_2E-}PQn>*= z2T5q^J|Q_2+x%Qt`i3m6=6V$)BxIx{2KAFkMb#q`iMCD|L>+}_dYVA$wBr1Zr}YOF z^MMGO@PHGGh>g|^yF`PvvtDwN@kxt?ClLcG<+murHMz1Asj!$l=b)4{d}SqOJ}>Y< zSeAyP@ZEcpx`ayIdp>{--UVLYC_cZZURh_!4u2(*#x@Tk(QJa}4BqqZ$6%LhF-HB~ zAcc?$I6KP}IxANcAteEBX$Ys?T=JB|Fnd3*UAO0mYAXCgWf~?7Z_G7G5`H4;S^QKK zG*2l75vI@DHQC*es>6&|r^#RHKRQ5rwv_l4`!(!I3%)Z$P1fnZ8N@27zyg}54ElO%SjQ_4uujX)4ta@Gz2)_>4b~vX|rhRIH-eqdD zL)xaEpW3K|a>daQRRR*_$W>rWOsW-IE4VQl3L$3}=-PFU)s@XG&9+DFivH-;2&w~$ES_nJZJH!?1mO!CnP)Jb{mW9=f`bDpo^PI6i4|YurK)Q1 z^Ys1oHRdr!$X4RuyR%kgp!a*Lz*_AAoJ$EVAdsNCoPA^VZE1pGO@D3UStACE+%vs6 z$io@E>DmB|3VV~GbOt2oc+K;t zdn3gaFvYz;vRN-+2+Qk{8|O}e86nVck)fZn3sg$j#dLVham{yGkc$I#!HF7mRS%f* z!+NdzG49K(qaO^SBlp@K@D?|^rAq;8{*@kRc4sYSNQmoy7@_RS_ksWl2T_38h2A)# ziU2WXWD03(NqS&Mu*?0-iK8X_Z3w`}c7MPv0qZ7iM|L3xdTnR{y!7{#82$}uJCiGT zqa=8<9L05hu6 z1N+2n7OzT{NEf?gS@eq7@buCDFe9mAxY%THo^b@BHckKK>jg6{@)>n z43cPs%$Qi0iwyZ+{C491>FRu5+6baJ{&XXXC@Sp+b!QE|{7_d?lm5K=B z)myKEcxjFm74+drF|JCYcxdY%ASig#YoRBRUV7An7f-%rqj%PHECbxh#5476cEq@NQL?dI6gUqvS@w zq!WmD(aR0{NxItAZCKDCVw=Zu{9WGDu^i?2g zLerPiOU*HSaXg^3CdOX^F6c9MiHINP339N%)a96`^Z-c#&EogcxMSYo0Cb4{-}q1( zRrJine`P|6WRkm8u4Ja1QRYq$AR>b7tugd#EsT-VmXN-t!TYjZy}i!uKi6$u>EJ?w zvdHZg+hp+5ree?>fdJAX)5#Wtm#2M-{~2jfX2{G`)?D6UD1MevdeeU;;HCi}AtJr( SGW6ptSs!X7{rG*o_g?|vpSEZK diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 865f1ba80d1e6..fcbbad6dd644c 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip +distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index 865f1ba80d1e6..fcbbad6dd644c 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip +distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/x-pack/plugin/ent-search/build.gradle b/x-pack/plugin/ent-search/build.gradle index 4551011b03ca1..efd33cd163fdc 100644 --- a/x-pack/plugin/ent-search/build.gradle +++ b/x-pack/plugin/ent-search/build.gradle @@ -34,8 +34,6 @@ dependencies { javaRestTestImplementation(project(path: xpackModule('core'))) javaRestTestImplementation(testArtifact(project(xpackModule('core')))) javaRestTestImplementation(project(':modules:lang-mustache')) - - module ':modules:search-business-rules' } testClusters.configureEach { From d6d1edd52959f358ce1df438d01ddf03ef8b5c21 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 25 Mar 2024 09:22:20 -0400 Subject: [PATCH 38/79] ESQL: Fix typo in docs readme s/and/are/ --- docs/reference/esql/functions/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/esql/functions/README.md b/docs/reference/esql/functions/README.md index fd310ebacfe7e..0f0f3b6e3cbb8 100644 --- a/docs/reference/esql/functions/README.md +++ b/docs/reference/esql/functions/README.md @@ -1,4 +1,4 @@ -The files in these subdirectories and generated by ESQL's test suite: +The files in these subdirectories are generated by ESQL's test suite: * `description` - description of each function scraped from `@FunctionInfo#description` * `examples` - examples of each function scraped from `@FunctionInfo#examples` * `parameters` - description of each function's parameters scraped from `@Param` From 8010b4e8e5dd1a78db6350248f68eee19f92422c Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 25 Mar 2024 14:45:13 +0100 Subject: [PATCH 39/79] SharedBlobCacheService.maybeFetchRegion should use computeCacheFileRegionSize (#106685) This method computes the exact ranges to fetch using the length of the blob, but that does not work for SharedBlobCacheService implementations that use a specific computeCacheFileRegionSize which is not based on blob length. --- docs/changelog/106685.yaml | 5 +++++ .../blobcache/shared/SharedBlobCacheService.java | 6 +----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/106685.yaml diff --git a/docs/changelog/106685.yaml b/docs/changelog/106685.yaml new file mode 100644 index 0000000000000..ed4a16ba0666c --- /dev/null +++ b/docs/changelog/106685.yaml @@ -0,0 +1,5 @@ +pr: 106685 +summary: '`SharedBlobCacheService.maybeFetchRegion` should use `computeCacheFileRegionSize`' +area: Snapshot/Restore +type: bug +issues: [] diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 934aeef26843f..0d51a4271e85b 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -561,12 +561,8 @@ public void maybeFetchRegion( listener.onResponse(false); return; } - long regionLength = regionSize; try { - if (region == getEndingRegion(blobLength)) { - regionLength = blobLength - getRegionStart(region); - } - ByteRange regionRange = ByteRange.of(0, regionLength); + ByteRange regionRange = ByteRange.of(0, computeCacheFileRegionSize(blobLength, region)); if (regionRange.isEmpty()) { listener.onResponse(false); return; From 1cc61107221a71890ad534567e31d4ef6007eb99 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 25 Mar 2024 13:50:32 +0000 Subject: [PATCH 40/79] Add isPatchFrom method to check for future patch versions (#106712) --- .../org/elasticsearch/TransportVersion.java | 24 +++++++++++++++++++ .../elasticsearch/TransportVersionTests.java | 12 ++++++++++ 2 files changed, 36 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 22e02652e9f68..48970b97e480e 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -101,6 +101,30 @@ public static TransportVersion fromString(String str) { return TransportVersion.fromId(Integer.parseInt(str)); } + /** + * Returns {@code true} if this version is a patch version at or after {@code version}. + *

    + * This should not be used normally. It is used for matching patch versions of the same base version, + * using the standard version number format specified in {@link TransportVersions}. + * When a patch version of an existing transport version is created, {@code transportVersion.isPatchFrom(patchVersion)} + * will match any transport version at or above {@code patchVersion} that is also of the same base version. + *

    + * For example, {@code version.isPatchFrom(8_800_00_4)} will return the following for the given {@code version}: + *

      + *
    • {@code 8_799_00_0.isPatchFrom(8_800_00_4)}: {@code false}
    • + *
    • {@code 8_799_00_9.isPatchFrom(8_800_00_4)}: {@code false}
    • + *
    • {@code 8_800_00_0.isPatchFrom(8_800_00_4)}: {@code false}
    • + *
    • {@code 8_800_00_3.isPatchFrom(8_800_00_4)}: {@code false}
    • + *
    • {@code 8_800_00_4.isPatchFrom(8_800_00_4)}: {@code true}
    • + *
    • {@code 8_800_00_9.isPatchFrom(8_800_00_4)}: {@code true}
    • + *
    • {@code 8_800_01_0.isPatchFrom(8_800_00_4)}: {@code false}
    • + *
    • {@code 8_801_00_0.isPatchFrom(8_800_00_4)}: {@code false}
    • + *
    + */ + public boolean isPatchFrom(TransportVersion version) { + return onOrAfter(version) && id < version.id + 10 - (version.id % 10); + } + /** * Returns a string representing the Elasticsearch release version of this transport version, * if applicable for this deployment, otherwise the raw version number. diff --git a/server/src/test/java/org/elasticsearch/TransportVersionTests.java b/server/src/test/java/org/elasticsearch/TransportVersionTests.java index b8b8380ee4a96..2de973622248b 100644 --- a/server/src/test/java/org/elasticsearch/TransportVersionTests.java +++ b/server/src/test/java/org/elasticsearch/TransportVersionTests.java @@ -159,6 +159,18 @@ public void testMax() { } } + public void testIsPatchFrom() { + TransportVersion patchVersion = TransportVersion.fromId(8_800_00_4); + assertThat(TransportVersion.fromId(8_799_00_0).isPatchFrom(patchVersion), is(false)); + assertThat(TransportVersion.fromId(8_799_00_9).isPatchFrom(patchVersion), is(false)); + assertThat(TransportVersion.fromId(8_800_00_0).isPatchFrom(patchVersion), is(false)); + assertThat(TransportVersion.fromId(8_800_00_3).isPatchFrom(patchVersion), is(false)); + assertThat(TransportVersion.fromId(8_800_00_4).isPatchFrom(patchVersion), is(true)); + assertThat(TransportVersion.fromId(8_800_00_9).isPatchFrom(patchVersion), is(true)); + assertThat(TransportVersion.fromId(8_800_01_0).isPatchFrom(patchVersion), is(false)); + assertThat(TransportVersion.fromId(8_801_00_0).isPatchFrom(patchVersion), is(false)); + } + public void testVersionConstantPresent() { Set ignore = Set.of(TransportVersions.ZERO, TransportVersion.current(), TransportVersions.MINIMUM_COMPATIBLE); assertThat(TransportVersion.current(), sameInstance(TransportVersion.fromId(TransportVersion.current().id()))); From d44b9b64844a6ff6928f8b9f10803340d8cb3184 Mon Sep 17 00:00:00 2001 From: Tommaso Teofili Date: Mon, 25 Mar 2024 15:08:54 +0100 Subject: [PATCH 41/79] Test knn with qvb assert ids and result size (#106686) yamlRestTest for knn with query_vector_builder assert document ids directly and check result size --- .../ml/search_knn_query_vector_builder.yml | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml index 50f687f704994..4cab2c7908748 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml @@ -75,17 +75,17 @@ setup: index: index-with-embedded-text refresh: true body: | - {"index": {}} + {"index": {"_id": "0"}} {"source_text": "the octopus comforter smells", "embedding":[0.3925197124481201, 0.9145996570587158, 0.01372915506362915, 0.9987854957580566, 0.3240084648132324, 0.6188188195228577, 0.926924467086792, 0.12143599987030029, 0.175662100315094, 0.16076070070266724, 0.7671306133270264, 0.9518267512321472, 0.4557478427886963, 0.5410670638084412, 0.7594802975654602, 0.5035953521728516, 0.4115469455718994, 0.038427770137786865, 0.5419668555259705, 0.6362232565879822, 0.17554593086242676, 0.01821446418762207, 0.2931918501853943, 0.294437050819397, 0.6901726722717285, 0.1679999828338623, 0.7995195984840393, 0.8781598210334778, 0.18507736921310425, 0.8614458441734314, 0.690071702003479, 0.7859554886817932, 0.803643524646759, 0.0048452019691467285, 0.19700628519058228, 0.22210919857025146, 0.7043975591659546, 0.6320799589157104, 0.542057991027832, 0.8704766035079956, 0.32195907831192017, 0.7272325158119202, 0.4066658020019531, 0.89588862657547, 0.7947880029678345, 0.06543421745300293, 0.2873639464378357, 0.8773637413978577, 0.36480581760406494, 0.692948043346405, 0.19171112775802612, 0.14275449514389038, 0.17054951190948486, 0.8969640135765076, 0.39838290214538574, 0.26756417751312256, 0.5369327664375305, 0.4736328721046448, 0.21181154251098633, 0.2695402503013611, 0.8651300072669983, 0.8051849603652954, 0.7073134779930115, 0.5963589549064636, 0.09601861238479614, 0.5362404584884644, 0.23020631074905396, 0.8515381813049316, 0.5730932354927063, 0.7235705256462097, 0.08228331804275513, 0.5840849280357361, 0.6030013561248779, 0.2084050178527832, 0.7312950491905212, 0.6159517168998718, 0.6482340693473816, 0.07220339775085449, 0.5136227607727051, 0.9152160286903381, 0.8169018030166626, 0.15515869855880737, 0.7978536486625671, 0.564482569694519, 0.4757157564163208, 0.2718064785003662, 0.6910138726234436, 0.5675734877586365, 0.702862024307251, 0.19079893827438354, 0.8995556235313416, 0.4988499879837036, 0.6378234028816223, 0.2683940529823303, 0.21990180015563965, 0.8442690372467041, 0.8502047061920166, 0.9857811331748962, 0.3549607992172241, 0.7605474591255188]} - {"index": {}} + {"index": {"_id": "1"}} {"source_text": "the machine is leaking", "embedding":[0.09775793552398682, 0.9594467282295227, 0.7915146946907043, 0.9140479564666748, 0.5148435235023499, 0.8556410670280457, 0.6022665500640869, 0.05222177505493164, 0.9821935296058655, 0.49276530742645264, 0.23147249221801758, 0.2428399920463562, 0.3865380883216858, 0.5778483748435974, 0.5600519776344299, 0.9427472352981567, 0.48832541704177856, 0.8807493448257446, 0.32909590005874634, 0.8452557325363159, 0.811530590057373, 0.13344216346740723, 0.15256845951080322, 0.5025331974029541, 0.4288772940635681, 0.6590417623519897, 0.9282752871513367, 0.8842046856880188, 0.7873250842094421, 0.356731653213501, 0.9959152936935425, 0.07572609186172485, 0.5062583088874817, 0.36245888471603394, 0.6189196705818176, 0.7766605019569397, 0.5198523998260498, 0.7379586100578308, 0.0553441047668457, 0.5035901665687561, 0.24139636754989624, 0.10798943042755127, 0.272808313369751, 0.38171595335006714, 0.24275553226470947, 0.956981897354126, 0.8182021379470825, 0.9383817315101624, 0.06551980972290039, 0.6892690658569336, 0.7068917751312256, 0.5184322595596313, 0.6103079319000244, 0.7020677328109741, 0.7181660532951355, 0.6477184295654297, 0.26282840967178345, 0.9316624402999878, 0.8318467140197754, 0.1487215757369995, 0.39937925338745117, 0.6842989921569824, 0.3496543765068054, 0.6008991003036499, 0.9530165791511536, 0.4209877848625183, 0.5675879716873169, 0.7883706092834473, 0.9547191858291626, 0.6292906403541565, 0.49566715955734253, 0.6907342672348022, 0.0834314227104187, 0.19785481691360474, 0.4896165728569031, 0.8460168838500977, 0.9680339097976685, 0.43386441469192505, 0.7068926095962524, 0.19123870134353638, 0.5661664009094238, 0.610595166683197, 0.23599380254745483, 0.2831611633300781, 0.7919651865959167, 0.0018386244773864746, 0.15559959411621094, 0.4622604250907898, 0.02038663625717163, 0.42241227626800537, 0.4200526475906372, 0.1223069429397583, 0.7035380005836487, 0.09902423620223999, 0.7804107666015625, 0.05339455604553223, 0.6485095024108887, 0.29347676038742065, 0.9716366529464722, 0.30257928371429443]} - {"index": {}} + {"index": {"_id": "2"}} {"source_text": "these are my words", "embedding":[0.7000167369842529, 0.590781033039093, 0.009879708290100098, 0.7874260544776917, 0.797156572341919, 0.1791083812713623, 0.07826781272888184, 0.25102007389068604, 0.09334254264831543, 0.3819708824157715, 0.7312374711036682, 0.02819347381591797, 0.20099765062332153, 0.7702597975730896, 0.9443559050559998, 0.35520339012145996, 0.25699591636657715, 0.5596823692321777, 0.23947590589523315, 0.47478222846984863, 0.23411548137664795, 0.9809996485710144, 0.3806597590446472, 0.5006771087646484, 0.5724453926086426, 0.21510547399520874, 0.07062828540802002, 0.9858258962631226, 0.9636645317077637, 0.36034029722213745, 0.07260054349899292, 0.06882566213607788, 0.18354403972625732, 0.06756395101547241, 0.5749042630195618, 0.05275309085845947, 0.1865217685699463, 0.5852730870246887, 0.1086682677268982, 0.10090464353561401, 0.32582908868789673, 0.5494027733802795, 0.873362123966217, 0.02236837148666382, 0.37973177433013916, 0.5556552410125732, 0.5083678364753723, 0.8081125020980835, 0.09164196252822876, 0.2207810878753662, 0.8086426258087158, 0.271828293800354, 0.5981417298316956, 0.7745779156684875, 0.40872830152511597, 0.6035888195037842, 0.5598325133323669, 0.19086670875549316, 0.02406853437423706, 0.8299782872200012, 0.4994274377822876, 0.0300295352935791, 0.47190529108047485, 0.8889331817626953, 0.34195321798324585, 0.9380808472633362, 0.4418332576751709, 0.5789303779602051, 0.0526617169380188, 0.7349719405174255, 0.44571834802627563, 0.6602563261985779, 0.3819742202758789, 0.16881734132766724, 0.45588219165802, 0.028081774711608887, 0.6681976914405823, 0.8183007836341858, 0.7887755632400513, 0.4506028890609741, 0.8040162324905396, 0.431918203830719, 0.7408918738365173, 0.39756304025650024, 0.7438145875930786, 0.6120601892471313, 0.5724676251411438, 0.08701330423355103, 0.18344634771347046, 0.7226220369338989, 0.3648560643196106, 0.9813777208328247, 0.2615315318107605, 0.9847549796104431, 0.32967478036880493, 0.47099196910858154, 0.3591546416282654, 0.4132147431373596, 0.48631107807159424, 0.04420149326324463]} - {"index": {}} + {"index": {"_id": "3"}} {"source_text": "washing machine", "embedding":[0.7044712901115417, 0.12284207344055176, 0.5008929967880249, 0.04643195867538452, 0.3666788339614868, 0.26660799980163574, 0.24114298820495605, 0.0761682391166687, 0.5294214487075806, 0.16935181617736816, 0.6257967948913574, 0.2804388999938965, 0.6417903900146484, 0.169958233833313, 0.4216839075088501, 0.6773303747177124, 0.9472144842147827, 0.21874648332595825, 0.5095921754837036, 0.839306116104126, 0.6176233291625977, 0.5847064852714539, 0.6748610734939575, 0.3264034390449524, 0.4112023115158081, 0.13818275928497314, 0.08356589078903198, 0.4147903323173523, 0.5626787543296814, 0.7167286276817322, 0.6314535737037659, 0.23092854022979736, 0.34547603130340576, 0.7425565719604492, 0.2837678790092468, 0.47037917375564575, 0.1555209755897522, 0.5618507266044617, 0.2076261043548584, 0.3026384711265564, 0.04561811685562134, 0.1691250205039978, 0.2504339814186096, 0.5350574851036072, 0.26857447624206543, 0.23607933521270752, 0.16938960552215576, 0.23708534240722656, 0.026302993297576904, 0.16901731491088867, 0.2847784757614136, 0.944273829460144, 0.28171658515930176, 0.9864799380302429, 0.6811433434486389, 0.9383156895637512, 0.5682582259178162, 0.14361613988876343, 0.7900274395942688, 0.27808505296707153, 0.05677521228790283, 0.08594226837158203, 0.6450491547584534, 0.06500720977783203, 0.36045730113983154, 0.1987738013267517, 0.07287931442260742, 0.5315744280815125, 0.04742676019668579, 0.7842378616333008, 0.0881078839302063, 0.7612627744674683, 0.2528950572013855, 0.27305954694747925, 0.03027820587158203, 0.4686838984489441, 0.13311690092086792, 0.048372089862823486, 0.808062732219696, 0.44010263681411743, 0.5726178288459778, 0.15828031301498413, 0.4597446322441101, 0.6375324130058289, 0.8452948927879333, 0.9763500690460205, 0.5094607472419739, 0.3535742163658142, 0.664739191532135, 0.40749913454055786, 0.8537857532501221, 0.5830079913139343, 0.7949922680854797, 0.6309236288070679, 0.07258343696594238, 0.1224660873413086, 0.24250483512878418, 0.36189037561416626, 0.5156043171882629, 0.1819135546684265]} - {"index": {}} + {"index": {"_id": "4"}} {"source_text": "washing machine smells", "embedding":[0.7249823808670044, 0.3981819152832031, 0.4572623372077942, 0.7442894577980042, 0.15898281335830688, 0.6481881737709045, 0.1513708233833313, 0.8945682644844055, 0.7708938121795654, 0.5494217276573181, 0.48253726959228516, 0.39402270317077637, 0.6369197368621826, 0.7152248024940491, 0.6326345205307007, 0.7362181544303894, 0.350342333316803, 0.16101288795471191, 0.4180338382720947, 0.04114532470703125, 0.002633512020111084, 0.20396709442138672, 0.8963556885719299, 0.1552276611328125, 0.7476853728294373, 0.9651047587394714, 0.7527561187744141, 0.7041972279548645, 0.12461084127426147, 0.6282403469085693, 0.9631509184837341, 0.16590750217437744, 0.4101366400718689, 0.31320667266845703, 0.13579899072647095, 0.2895740270614624, 0.9905323386192322, 0.02118372917175293, 0.637545645236969, 0.5133231282234192, 0.679695188999176, 0.04641437530517578, 0.21913814544677734, 0.16534924507141113, 0.02987360954284668, 0.14805591106414795, 0.16874665021896362, 0.9378783702850342, 0.8607399463653564, 0.7287217974662781, 0.5402306318283081, 0.9973209500312805, 0.26169413328170776, 0.3835873603820801, 0.1874808669090271, 0.8038567304611206, 0.18557673692703247, 0.8631893992424011, 0.7676172256469727, 0.3599127531051636, 0.48698097467422485, 0.926689088344574, 0.6542723774909973, 0.49722349643707275, 0.7027173638343811, 0.13385021686553955, 0.9873734712600708, 0.17187494039535522, 0.7995050549507141, 0.5259199142456055, 0.33804380893707275, 0.21665722131729126, 0.952264130115509, 0.8337767720222473, 0.879487156867981, 0.5553549528121948, 0.6160674095153809, 0.1315295696258545, 0.8010737895965576, 0.834412693977356, 0.20340144634246826, 0.8993185758590698, 0.6493895649909973, 0.9454924464225769, 0.38529330492019653, 0.6891772150993347, 0.5530646443367004, 0.18555349111557007, 0.8361382484436035, 0.11815804243087769, 0.38942235708236694, 0.945141613483429, 0.6417409181594849, 0.39776402711868286, 0.5133314728736877, 0.5431299805641174, 0.2615429759025574, 0.8987119793891907, 0.023733675479888916, 0.4941052794456482]} - {"index": {}} + {"index": {"_id": "5"}} {"source_text": "my words", "embedding":[0.19087255001068115, 0.5498749017715454, 0.9536173939704895, 0.25011056661605835, 0.37642204761505127, 0.18271470069885254, 0.670674741268158, 0.5553990006446838, 0.3306507468223572, 0.3368762731552124, 0.053364574909210205, 0.047215282917022705, 0.4221981167793274, 0.7591024041175842, 0.998794436454773, 0.6113318204879761, 0.8178470730781555, 0.8554672598838806, 0.40100908279418945, 0.6486459374427795, 0.804382860660553, 0.6775466799736023, 0.2916865944862366, 0.7019925117492676, 0.9812073707580566, 0.4414554834365845, 0.08203905820846558, 0.9167835116386414, 0.3082762360572815, 0.5454868674278259, 0.6665160655975342, 0.06828844547271729, 0.36014634370803833, 0.01810687780380249, 0.2640475630760193, 0.1856365203857422, 0.4734996557235718, 0.8153479695320129, 0.9614933133125305, 0.4851576089859009, 0.003343045711517334, 0.17352384328842163, 0.26423048973083496, 0.24217921495437622, 0.5694647431373596, 0.8538861274719238, 0.06464511156082153, 0.038984060287475586, 0.7695011496543884, 0.008188009262084961, 0.3858819007873535, 0.7950196862220764, 0.7225212454795837, 0.3982154130935669, 0.4996080994606018, 0.28709208965301514, 0.6753579378128052, 0.6779837608337402, 0.4815831184387207, 0.27917128801345825, 0.8400004506111145, 0.9022405743598938, 0.8253144025802612, 0.6251398324966431, 0.25444501638412476, 0.7694959044456482, 0.006821691989898682, 0.7958594560623169, 0.9144708514213562, 0.8688076138496399, 0.9641174077987671, 0.44437146186828613, 0.06135892868041992, 0.2638128399848938, 0.05436718463897705, 0.9926314353942871, 0.8661795854568481, 0.9176243543624878, 0.5521496534347534, 0.6017677783966064, 0.22096896171569824, 0.7030748128890991, 0.16923701763153076, 0.8178754448890686, 0.47008246183395386, 0.28875309228897095, 0.14314061403274536, 0.3431167006492615, 0.9301973581314087, 0.5416158437728882, 0.563427209854126, 0.7897542119026184, 0.2761036157608032, 0.16855067014694214, 0.42684781551361084, 0.7562968730926514, 0.2551668882369995, 0.7754542827606201, 0.218039870262146, 0.7080662846565247]} - do: headers: @@ -106,23 +106,22 @@ setup: --- "Test vector search with query_vector_builder": - skip: - version: all - reason: AwaitsFix https://github.com/elastic/elasticsearch/issues/106650 -# version: " - 8.13.99" -# reason: "introduced after 8.13" + version: " - 8.13.99" + reason: "introduced after 8.13" - do: search: index: index-with-embedded-text body: + size: 3 query: knn: field: embedding - num_candidates: 3 query_vector_builder: text_embedding: model_id: text_embedding_model model_text: "the octopus comforter smells" - - match: { hits.total.value: 3 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._id: "0" } --- "nested kNN search with inner_hits size": From 12441f505ac108230e5e06a94d29e1f89278f7b9 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 25 Mar 2024 15:47:54 +0100 Subject: [PATCH 42/79] Reduce InternalComposite in a streaming fashion (#106566) Use a priority queue with a hashmap to keep track of the competitive buckets. We still delayed the merging of the child aggregations by introducing a DelayedMultiBucketAggregatorsReducer. --- .../DelayedMultiBucketAggregatorsReducer.java | 73 ++++++++ .../bucket/MultiBucketAggregatorsReducer.java | 2 +- .../bucket/composite/InternalComposite.java | 157 ++++++++++-------- 3 files changed, 159 insertions(+), 73 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedMultiBucketAggregatorsReducer.java diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedMultiBucketAggregatorsReducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedMultiBucketAggregatorsReducer.java new file mode 100644 index 0000000000000..7fc7c96badaaa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedMultiBucketAggregatorsReducer.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket; + +import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.aggregations.AggregatorsReducer; +import org.elasticsearch.search.aggregations.InternalAggregations; + +import java.util.ArrayList; +import java.util.List; + +/** + * Class for reducing a list of {@link MultiBucketsAggregation.Bucket} to a single + * {@link InternalAggregations} and the number of documents in a delayable fashion. + * + * This class can be reused by calling {@link #reset()}. + * + * @see MultiBucketAggregatorsReducer + */ +public final class DelayedMultiBucketAggregatorsReducer { + + private final AggregationReduceContext context; + // the maximum size of this array is the number of shards to be reduced. We currently do it in a batches of 256 + // if we expect bigger batches, we might consider to use ObjectArray. + private final List internalAggregations; + private long count = 0; + + public DelayedMultiBucketAggregatorsReducer(AggregationReduceContext context) { + this.context = context; + this.internalAggregations = new ArrayList<>(); + } + + /** + * Adds a {@link MultiBucketsAggregation.Bucket} for reduction. + */ + public void accept(MultiBucketsAggregation.Bucket bucket) { + count += bucket.getDocCount(); + internalAggregations.add(bucket.getAggregations()); + } + + /** + * Reset the content of this reducer. + */ + public void reset() { + count = 0L; + internalAggregations.clear(); + } + + /** + * returns the reduced {@link InternalAggregations}. + */ + public InternalAggregations get() { + try (AggregatorsReducer aggregatorsReducer = new AggregatorsReducer(context, internalAggregations.size())) { + for (InternalAggregations agg : internalAggregations) { + aggregatorsReducer.accept(agg); + } + return aggregatorsReducer.get(); + } + } + + /** + * returns the number of docs + */ + public long getDocCount() { + return count; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketAggregatorsReducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketAggregatorsReducer.java index 176ca2f918fff..e7d0e6a17e4c6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketAggregatorsReducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketAggregatorsReducer.java @@ -21,7 +21,7 @@ public final class MultiBucketAggregatorsReducer implements Releasable { private final AggregatorsReducer aggregatorsReducer; - long count = 0; + private long count = 0; public MultiBucketAggregatorsReducer(AggregationReduceContext context, int size) { this.aggregatorsReducer = new AggregatorsReducer(context, size); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 31cd5c9426755..fc13dcb6a22ee 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -12,6 +12,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; +import org.elasticsearch.common.util.ObjectObjectPagedHashMap; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationReduceContext; @@ -20,6 +22,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.KeyComparable; +import org.elasticsearch.search.aggregations.bucket.DelayedMultiBucketAggregatorsReducer; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -201,56 +204,29 @@ int[] getReverseMuls() { @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - final ObjectArrayPriorityQueue pq = new ObjectArrayPriorityQueue<>(size, reduceContext.bigArrays()) { - @Override - protected boolean lessThan(BucketIterator a, BucketIterator b) { - return a.compareTo(b) < 0; - } - }; + final BucketsQueue queue = new BucketsQueue(reduceContext); boolean earlyTerminated = false; @Override public void accept(InternalAggregation aggregation) { InternalComposite sortedAgg = (InternalComposite) aggregation; earlyTerminated |= sortedAgg.earlyTerminated; - BucketIterator it = new BucketIterator(sortedAgg.buckets); - if (it.next() != null) { - pq.add(it); + for (InternalBucket bucket : sortedAgg.getBuckets()) { + if (queue.add(bucket) == false) { + // if the bucket is not competitive, we can break + // because incoming buckets are sorted + break; + } } } @Override public InternalAggregation get() { - InternalBucket lastBucket = null; - List buckets = new ArrayList<>(); - List result = new ArrayList<>(); - while (pq.size() > 0) { - BucketIterator bucketIt = pq.top(); - if (lastBucket != null && bucketIt.current.compareKey(lastBucket) != 0) { - InternalBucket reduceBucket = reduceBucket(buckets, reduceContext); - buckets.clear(); - result.add(reduceBucket); - if (result.size() >= getSize()) { - break; - } - } - lastBucket = bucketIt.current; - buckets.add(bucketIt.current); - if (bucketIt.next() != null) { - pq.updateTop(); - } else { - pq.pop(); - } - } - if (buckets.size() > 0) { - InternalBucket reduceBucket = reduceBucket(buckets, reduceContext); - result.add(reduceBucket); - } - + final List result = queue.get(); List reducedFormats = formats; CompositeKey lastKey = null; - if (result.size() > 0) { - lastBucket = result.get(result.size() - 1); + if (result.isEmpty() == false) { + InternalBucket lastBucket = result.get(result.size() - 1); /* Attach the formats from the last bucket to the reduced composite * so that we can properly format the after key. */ reducedFormats = lastBucket.formats; @@ -275,11 +251,82 @@ public InternalAggregation get() { @Override public void close() { - Releasables.close(pq); + Releasables.close(queue); } }; } + private class BucketsQueue implements Releasable { + private final ObjectObjectPagedHashMap bucketReducers; + private final ObjectArrayPriorityQueue queue; + private final AggregationReduceContext reduceContext; + + private BucketsQueue(AggregationReduceContext reduceContext) { + this.reduceContext = reduceContext; + bucketReducers = new ObjectObjectPagedHashMap<>(getSize(), reduceContext.bigArrays()); + queue = new ObjectArrayPriorityQueue<>(getSize(), reduceContext.bigArrays()) { + @Override + protected boolean lessThan(InternalBucket a, InternalBucket b) { + return b.compareKey(a) < 0; + } + }; + } + + /** adds a bucket to the queue. Return false if the bucket is not competitive, otherwise true.*/ + boolean add(InternalBucket bucket) { + DelayedMultiBucketAggregatorsReducer delayed = bucketReducers.get(bucket.key); + if (delayed == null) { + final InternalBucket out = queue.insertWithOverflow(bucket); + if (out == null) { + // bucket is added + delayed = new DelayedMultiBucketAggregatorsReducer(reduceContext); + } else if (out == bucket) { + // bucket is not competitive + return false; + } else { + // bucket replaces existing bucket + delayed = bucketReducers.remove(out.key); + assert delayed != null; + delayed.reset(); + } + bucketReducers.put(bucket.key, delayed); + } + delayed.accept(bucket); + return true; + } + + /** Return the list of reduced buckets */ + List get() { + final int bucketsSize = (int) bucketReducers.size(); + final InternalBucket[] result = new InternalBucket[bucketsSize]; + for (int i = bucketsSize - 1; i >= 0; i--) { + final InternalBucket bucket = queue.pop(); + assert bucket != null; + /* Use the formats from the bucket because they'll be right to format + * the key. The formats on the InternalComposite doing the reducing are + * just whatever formats make sense for *its* index. This can be real + * trouble when the index doing the reducing is unmapped. */ + final var reducedFormats = bucket.formats; + final DelayedMultiBucketAggregatorsReducer reducer = Objects.requireNonNull(bucketReducers.get(bucket.key)); + result[i] = new InternalBucket( + sourceNames, + reducedFormats, + bucket.key, + reverseMuls, + missingOrders, + reducer.getDocCount(), + reducer.get() + ); + } + return List.of(result); + } + + @Override + public void close() { + Releasables.close(bucketReducers, queue); + } + } + @Override public InternalAggregation finalizeSampling(SamplingContext samplingContext) { return new InternalComposite( @@ -296,22 +343,6 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { ); } - private InternalBucket reduceBucket(List buckets, AggregationReduceContext context) { - assert buckets.isEmpty() == false; - long docCount = 0; - for (InternalBucket bucket : buckets) { - docCount += bucket.docCount; - } - final List aggregations = new BucketAggregationList<>(buckets); - final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); - /* Use the formats from the bucket because they'll be right to format - * the key. The formats on the InternalComposite doing the reducing are - * just whatever formats make sense for *its* index. This can be real - * trouble when the index doing the reducing is unmapped. */ - final var reducedFormats = buckets.get(0).formats; - return new InternalBucket(sourceNames, reducedFormats, buckets.get(0).key, reverseMuls, missingOrders, docCount, aggs); - } - @Override public boolean equals(Object obj) { if (this == obj) return true; @@ -331,24 +362,6 @@ public int hashCode() { return Objects.hash(super.hashCode(), size, buckets, afterKey, Arrays.hashCode(reverseMuls), Arrays.hashCode(missingOrders)); } - private static class BucketIterator implements Comparable { - final Iterator it; - InternalBucket current; - - private BucketIterator(List buckets) { - this.it = buckets.iterator(); - } - - @Override - public int compareTo(BucketIterator other) { - return current.compareKey(other.current); - } - - InternalBucket next() { - return current = it.hasNext() ? it.next() : null; - } - } - public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements CompositeAggregation.Bucket, From a85599f125292e28aa966433a47dedae8edb9ef5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 25 Mar 2024 09:04:29 -0700 Subject: [PATCH 43/79] Fix missing docs in time series source reader (#106705) While working on rate aggregation, I noticed that the time series source might miss documents. Specifically, when we reach the max_page_size limit and the tsid changes, we skip that document because we call nextDoc again when resuming reading. Also, I think this operator should honor the max_page_size limit and avoid emitting pages that exceed this threshold. --- ...TimeSeriesSortedSourceOperatorFactory.java | 45 ++-- .../TimeSeriesSortedSourceOperatorTests.java | 201 +++++++----------- 2 files changed, 92 insertions(+), 154 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java index b1211c8ea5ff4..f9df90da6aa2d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -228,15 +228,10 @@ protected boolean lessThan(Leaf a, Leaf b) { void consume() throws IOException { if (queue != null) { currentTsid = BytesRef.deepCopyOf(queue.top().timeSeriesHash); - boolean breakOnNextTsidChange = false; while (queue.size() > 0) { - if (remainingDocs <= 0) { + if (remainingDocs <= 0 || currentPagePos >= maxPageSize) { break; } - if (currentPagePos > maxPageSize) { - breakOnNextTsidChange = true; - } - currentPagePos++; remainingDocs--; Leaf leaf = queue.top(); @@ -244,46 +239,32 @@ void consume() throws IOException { docsBuilder.appendInt(leaf.iterator.docID()); timestampIntervalBuilder.appendLong(leaf.timestamp); tsOrdBuilder.appendInt(globalTsidOrd); + final Leaf newTop; if (leaf.nextDoc()) { // TODO: updating the top is one of the most expensive parts of this operation. // Ideally we would do this a less as possible. Maybe the top can be updated every N docs? - Leaf newTop = queue.updateTop(); - if (newTop.timeSeriesHash.equals(currentTsid) == false) { - globalTsidOrd++; - currentTsid = BytesRef.deepCopyOf(newTop.timeSeriesHash); - if (breakOnNextTsidChange) { - break; - } - } + newTop = queue.updateTop(); } else { queue.pop(); + newTop = queue.size() > 0 ? queue.top() : null; + } + if (newTop != null && newTop.timeSeriesHash.equals(currentTsid) == false) { + globalTsidOrd++; + currentTsid = BytesRef.deepCopyOf(newTop.timeSeriesHash); } } } else { - int previousTsidOrd = leaf.timeSeriesHashOrd; - boolean breakOnNextTsidChange = false; // Only one segment, so no need to use priority queue and use segment ordinals as tsid ord. while (leaf.nextDoc()) { - if (remainingDocs <= 0) { - break; - } - if (currentPagePos > maxPageSize) { - breakOnNextTsidChange = true; - } - if (breakOnNextTsidChange) { - if (previousTsidOrd != leaf.timeSeriesHashOrd) { - break; - } - } - - currentPagePos++; - remainingDocs--; - tsOrdBuilder.appendInt(leaf.timeSeriesHashOrd); timestampIntervalBuilder.appendLong(leaf.timestamp); // Don't append segment ord, because there is only one segment. docsBuilder.appendInt(leaf.iterator.docID()); - previousTsidOrd = leaf.timeSeriesHashOrd; + currentPagePos++; + remainingDocs--; + if (remainingDocs <= 0 || currentPagePos >= maxPageSize) { + break; + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index 3b47597d6ea2f..9a5150bdf4fff 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Randomness; import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; @@ -33,6 +34,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AnyOperatorTestCase; import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; @@ -48,15 +50,15 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.function.Function; -import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class TimeSeriesSortedSourceOperatorTests extends AnyOperatorTestCase { @@ -73,81 +75,28 @@ public void testSimple() { int numTimeSeries = 3; int numSamplesPerTS = 10; long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - List results = runDriver(1024, 1024, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); - assertThat(results, hasSize(1)); - Page page = results.get(0); - assertThat(page.getBlockCount(), equalTo(5)); - - DocVector docVector = (DocVector) page.getBlock(0).asVector(); - assertThat(docVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); - - IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); - assertThat(tsidVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); - - LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - assertThat(timestampVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); - - LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); - assertThat(voltageVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); - - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); - assertThat(hostnameVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); - + int maxPageSize = between(1, 1024); + List results = runDriver(1024, maxPageSize, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + // for now we emit at most one time series each page int offset = 0; - for (int expectedTsidOrd = 0; expectedTsidOrd < numTimeSeries; expectedTsidOrd++) { - String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); - long expectedVoltage = 5L + expectedTsidOrd; - for (int j = 0; j < numSamplesPerTS; j++) { - long expectedTimestamp = timestampStart + ((numSamplesPerTS - j - 1) * 10_000L); - - assertThat(docVector.shards().getInt(offset), equalTo(0)); - assertThat(voltageVector.getLong(offset), equalTo(expectedVoltage)); - assertThat(hostnameVector.getBytesRef(offset, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); - assertThat(tsidVector.getInt(offset), equalTo(expectedTsidOrd)); - assertThat(timestampVector.getLong(offset), equalTo(expectedTimestamp)); - offset++; - } - } - } - - public void testMaxPageSize() { - int numTimeSeries = 3; - int numSamplesPerTS = 10; - long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - List results = runDriver(1024, 1, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); - // A time series shouldn't be split over multiple pages. - assertThat(results, hasSize(numTimeSeries)); - for (int i = 0; i < numTimeSeries; i++) { - Page page = results.get(i); + for (Page page : results) { assertThat(page.getBlockCount(), equalTo(5)); - DocVector docVector = (DocVector) page.getBlock(0).asVector(); - assertThat(docVector.getPositionCount(), equalTo(numSamplesPerTS)); - IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); - assertThat(tsidVector.getPositionCount(), equalTo(numSamplesPerTS)); - LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - assertThat(timestampVector.getPositionCount(), equalTo(numSamplesPerTS)); - LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); - assertThat(voltageVector.getPositionCount(), equalTo(numSamplesPerTS)); - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); - assertThat(hostnameVector.getPositionCount(), equalTo(numSamplesPerTS)); - - int offset = 0; - int expectedTsidOrd = i; - String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); - long expectedVoltage = 5L + expectedTsidOrd; - for (int j = 0; j < numSamplesPerTS; j++) { - long expectedTimestamp = timestampStart + ((numSamplesPerTS - j - 1) * 10_000L); - - assertThat(docVector.shards().getInt(offset), equalTo(0)); - assertThat(voltageVector.getLong(offset), equalTo(expectedVoltage)); - assertThat(hostnameVector.getBytesRef(offset, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); - assertThat(tsidVector.getInt(offset), equalTo(expectedTsidOrd)); - assertThat(timestampVector.getLong(offset), equalTo(expectedTimestamp)); + for (int i = 0; i < page.getPositionCount(); i++) { + int expectedTsidOrd = offset / numSamplesPerTS; + String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); + long expectedVoltage = 5L + expectedTsidOrd; + int sampleIndex = offset - expectedTsidOrd * numSamplesPerTS; + long expectedTimestamp = timestampStart + ((numSamplesPerTS - sampleIndex - 1) * 10_000L); + assertThat(docVector.shards().getInt(i), equalTo(0)); + assertThat(voltageVector.getLong(i), equalTo(expectedVoltage)); + assertThat(hostnameVector.getBytesRef(i, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); + assertThat(tsidVector.getInt(i), equalTo(expectedTsidOrd)); + assertThat(timestampVector.getLong(i), equalTo(expectedTimestamp)); offset++; } } @@ -158,7 +107,7 @@ public void testLimit() { int numSamplesPerTS = 10; int limit = 1; long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - List results = runDriver(limit, 1024, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + List results = runDriver(limit, randomIntBetween(1, 1024), randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); assertThat(results, hasSize(1)); Page page = results.get(0); assertThat(page.getBlockCount(), equalTo(5)); @@ -186,57 +135,67 @@ public void testLimit() { } public void testRandom() { - int numDocs = 1024; - var ctx = driverContext(); - long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - var timeSeriesFactory = createTimeSeriesSourceOperator(Integer.MAX_VALUE, Integer.MAX_VALUE, randomBoolean(), writer -> { - int commitEvery = 64; - long timestamp = timestampStart; - for (int i = 0; i < numDocs; i++) { - String hostname = String.format(Locale.ROOT, "host-%02d", i % 20); - int voltage = i % 5; - writeTS(writer, timestamp, new Object[] { "hostname", hostname }, new Object[] { "voltage", voltage }); - if (i % commitEvery == 0) { - writer.commit(); - } - timestamp += 10_000; + record Doc(int host, long timestamp, long metric) {} + int numDocs = between(1, 1000); + List docs = new ArrayList<>(); + Map timestamps = new HashMap<>(); + long t0 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + for (int i = 0; i < numDocs; i++) { + int tsid = randomIntBetween(0, 9); + long timestamp = timestamps.compute(tsid, (k, curr) -> { + long t = curr != null ? curr : t0; + return t + randomIntBetween(1, 5000); + }); + docs.add(new Doc(tsid, timestamp, randomIntBetween(1, 10000))); + } + int maxPageSize = between(1, 1024); + int limit = randomBoolean() ? between(1, 100000) : Integer.MAX_VALUE; + var timeSeriesFactory = createTimeSeriesSourceOperator(limit, maxPageSize, randomBoolean(), writer -> { + Randomness.shuffle(docs); + for (Doc doc : docs) { + writeTS(writer, doc.timestamp, new Object[] { "hostname", "h" + doc.host }, new Object[] { "metric", doc.metric }); } - return numDocs; + return docs.size(); }); + DriverContext driverContext = driverContext(); List results = new ArrayList<>(); - - var voltageField = new NumberFieldMapper.NumberFieldType("voltage", NumberFieldMapper.NumberType.LONG); + var metricField = new NumberFieldMapper.NumberFieldType("metric", NumberFieldMapper.NumberType.LONG); OperatorTestCase.runDriver( new Driver( - ctx, - timeSeriesFactory.get(ctx), - List.of(ValuesSourceReaderOperatorTests.factory(reader, voltageField, ElementType.LONG).get(ctx)), + driverContext, + timeSeriesFactory.get(driverContext), + List.of(ValuesSourceReaderOperatorTests.factory(reader, metricField, ElementType.LONG).get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) ); - OperatorTestCase.assertDriverContext(ctx); - assertThat(results, hasSize(1)); - Page page = results.get(0); - assertThat(page.getBlockCount(), equalTo(4)); - - DocVector docVector = (DocVector) page.getBlock(0).asVector(); - assertThat(docVector.getPositionCount(), equalTo(numDocs)); - - IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); - assertThat(tsidVector.getPositionCount(), equalTo(numDocs)); - - LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - assertThat(timestampVector.getPositionCount(), equalTo(numDocs)); - - LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); - assertThat(voltageVector.getPositionCount(), equalTo(numDocs)); - for (int i = 0; i < page.getBlockCount(); i++) { - assertThat(docVector.shards().getInt(0), equalTo(0)); - assertThat(voltageVector.getLong(i), either(greaterThanOrEqualTo(0L)).or(lessThanOrEqualTo(4L))); - assertThat(tsidVector.getInt(i), either(greaterThanOrEqualTo(0)).or(lessThan(20))); - assertThat(timestampVector.getLong(i), greaterThanOrEqualTo(timestampStart)); + docs.sort(Comparator.comparing(Doc::host).thenComparing(Comparator.comparingLong(Doc::timestamp).reversed())); + Map hostToTsidOrd = new HashMap<>(); + timestamps.keySet().stream().sorted().forEach(n -> hostToTsidOrd.put(n, hostToTsidOrd.size())); + int offset = 0; + for (int p = 0; p < results.size(); p++) { + Page page = results.get(p); + if (p < results.size() - 1) { + assertThat(page.getPositionCount(), equalTo(maxPageSize)); + } else { + assertThat(page.getPositionCount(), lessThanOrEqualTo(limit)); + assertThat(page.getPositionCount(), lessThanOrEqualTo(maxPageSize)); + } + assertThat(page.getBlockCount(), equalTo(4)); + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + LongVector metricVector = (LongVector) page.getBlock(3).asVector(); + for (int i = 0; i < page.getPositionCount(); i++) { + Doc doc = docs.get(offset); + offset++; + assertThat(docVector.shards().getInt(0), equalTo(0)); + assertThat(tsidVector.getInt(i), equalTo(hostToTsidOrd.get(doc.host))); + assertThat(timestampVector.getLong(i), equalTo(doc.timestamp)); + assertThat(metricVector.getLong(i), equalTo(doc.metric)); + } } + assertThat(offset, equalTo(Math.min(limit, numDocs))); } @Override @@ -289,6 +248,10 @@ List runDriver(int limit, int maxPageSize, boolean forceMerge, int numTime ) ); OperatorTestCase.assertDriverContext(ctx); + for (Page result : results) { + assertThat(result.getPositionCount(), lessThanOrEqualTo(maxPageSize)); + assertThat(result.getPositionCount(), lessThanOrEqualTo(limit)); + } return results; } @@ -298,7 +261,6 @@ TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperator( boolean forceMerge, CheckedFunction indexingLogic ) { - int numDocs; Sort sort = new Sort( new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) @@ -311,23 +273,18 @@ TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperator( ) ) { - numDocs = indexingLogic.apply(writer); + int numDocs = indexingLogic.apply(writer); if (forceMerge) { writer.forceMerge(1); } reader = writer.getReader(); + assertThat(reader.numDocs(), equalTo(numDocs)); } catch (IOException e) { throw new UncheckedIOException(e); } var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); Function queryFunction = c -> new MatchAllDocsQuery(); - return TimeSeriesSortedSourceOperatorFactory.create( - Math.min(numDocs, limit), - Math.min(numDocs, maxPageSize), - 1, - List.of(ctx), - queryFunction - ); + return TimeSeriesSortedSourceOperatorFactory.create(limit, maxPageSize, 1, List.of(ctx), queryFunction); } static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { From 2196576aed458889c9fd6c0c04aed891cec3a8d9 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 25 Mar 2024 10:26:45 -0700 Subject: [PATCH 44/79] Use confined arena for CloseableByteBuffer (#106723) The jdk implementation of CloseableByteBuffer currently uses a shared arena. The assumption was that a buffer might be shared across threads. However, in practice for compression/decompression that is not true, and the shared arena has a noticeable impact on deallocation when the buffer is closed. This commit switches to a confined arena, limtting buffer creation and compress/decompress calls to a single thread. relates #103374 --- .../elasticsearch/nativeaccess/CloseableByteBuffer.java | 9 +++++++++ .../org/elasticsearch/nativeaccess/NativeAccess.java | 6 ++++++ .../nativeaccess/jdk/JdkCloseableByteBuffer.java | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java index aa5d94080afa9..6590aff307cc6 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java @@ -10,7 +10,16 @@ import java.nio.ByteBuffer; +/** + * A wrapper around a native {@link ByteBuffer} which allows that buffer to be + * closed synchronously. This is in contrast to JDK created native buffers + * which are deallocated only after GC has cleaned up references to + * the buffer. + */ public interface CloseableByteBuffer extends AutoCloseable { + /** + * Returns the wrapped {@link ByteBuffer}. + */ ByteBuffer buffer(); @Override diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java index 5b2be93dadc1f..f7019a4fd7a96 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java @@ -35,5 +35,11 @@ static NativeAccess instance() { */ Zstd getZstd(); + /** + * Creates a new {@link CloseableByteBuffer}. The buffer must be used within the same thread + * that it is created. + * @param len the number of bytes the buffer should allocate + * @return the buffer + */ CloseableByteBuffer newBuffer(int len); } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java index d802fd8be7a67..97e6bf2f5580a 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java @@ -18,7 +18,7 @@ class JdkCloseableByteBuffer implements CloseableByteBuffer { private final ByteBuffer bufferView; JdkCloseableByteBuffer(int len) { - this.arena = Arena.ofShared(); + this.arena = Arena.ofConfined(); this.bufferView = this.arena.allocate(len).asByteBuffer(); } From 092f95e06e7b6ad123dc45f3ae31e7a46703a010 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 25 Mar 2024 18:25:24 +0000 Subject: [PATCH 45/79] [ML] Always update tokenisation options for chunked inference (#106718) Fixes an issue where if the defaults were used the input was truncated --- .../ElasticsearchInternalService.java | 4 +- .../services/elser/ElserInternalService.java | 7 ++- .../ElasticsearchInternalServiceTests.java | 60 ++++++++++++++++++ .../elser/ElserInternalServiceTests.java | 61 +++++++++++++++++++ 4 files changed, 127 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index a07ebe56a9258..02090ee84e708 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -251,9 +251,9 @@ public void chunkedInfer( return; } - var configUpdate = chunkingOptions.settingsArePresent() + var configUpdate = chunkingOptions != null ? new TokenizationConfigUpdate(chunkingOptions.windowSize(), chunkingOptions.span()) - : TextEmbeddingConfigUpdate.EMPTY_INSTANCE; + : new TokenizationConfigUpdate(null, null); var request = InferTrainedModelDeploymentAction.Request.forTextInput( model.getConfigurations().getInferenceEntityId(), diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java index 5069724697818..bb88193612ff4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java @@ -15,6 +15,7 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; @@ -288,7 +289,7 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, + @Nullable ChunkingOptions chunkingOptions, ActionListener> listener ) { try { @@ -298,9 +299,9 @@ public void chunkedInfer( return; } - var configUpdate = chunkingOptions.settingsArePresent() + var configUpdate = chunkingOptions != null ? new TokenizationConfigUpdate(chunkingOptions.windowSize(), chunkingOptions.span()) - : TextExpansionConfigUpdate.EMPTY_UPDATE; + : new TokenizationConfigUpdate(null, null); var request = InferTrainedModelDeploymentAction.Request.forTextInput( model.getConfigurations().getInferenceEntityId(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 0757012b234bd..073712beb8050 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextEmbeddingResultsTests; import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; import java.util.ArrayList; @@ -38,6 +39,8 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; @@ -410,6 +413,63 @@ public void testChunkInfer() { assertTrue("Listener not called", gotResults.get()); } + @SuppressWarnings("unchecked") + public void testChunkInferSetsTokenization() { + var expectedSpan = new AtomicInteger(); + var expectedWindowSize = new AtomicReference(); + + Client client = mock(Client.class); + ThreadPool threadpool = new TestThreadPool("test"); + try { + when(client.threadPool()).thenReturn(threadpool); + doAnswer(invocationOnMock -> { + var request = (InferTrainedModelDeploymentAction.Request) invocationOnMock.getArguments()[1]; + assertThat(request.getUpdate(), instanceOf(TokenizationConfigUpdate.class)); + var update = (TokenizationConfigUpdate) request.getUpdate(); + assertEquals(update.getSpanSettings().span(), expectedSpan.get()); + assertEquals(update.getSpanSettings().maxSequenceLength(), expectedWindowSize.get()); + return null; + }).when(client) + .execute( + same(InferTrainedModelDeploymentAction.INSTANCE), + any(InferTrainedModelDeploymentAction.Request.class), + any(ActionListener.class) + ); + + var model = new MultilingualE5SmallModel( + "foo", + TaskType.TEXT_EMBEDDING, + "e5", + new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform") + ); + var service = createService(client); + + expectedSpan.set(-1); + expectedWindowSize.set(null); + service.chunkedInfer( + model, + List.of("foo", "bar"), + Map.of(), + InputType.SEARCH, + null, + ActionListener.wrap(r -> fail("unexpected result"), e -> fail(e.getMessage())) + ); + + expectedSpan.set(-1); + expectedWindowSize.set(256); + service.chunkedInfer( + model, + List.of("foo", "bar"), + Map.of(), + InputType.SEARCH, + new ChunkingOptions(256, null), + ActionListener.wrap(r -> fail("unexpected result"), e -> fail(e.getMessage())) + ); + } finally { + terminate(threadpool); + } + } + private ElasticsearchInternalService createService(Client client) { var context = new InferenceServiceExtension.InferenceServiceFactoryContext(client); return new ElasticsearchInternalService(context); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java index f2fd195ab8c5a..dbb50260edaf1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextExpansionResultsTests; import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; import java.util.ArrayList; import java.util.Collections; @@ -35,6 +36,8 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; @@ -394,6 +397,64 @@ public void testChunkInfer() { assertTrue("Listener not called", gotResults.get()); } + @SuppressWarnings("unchecked") + public void testChunkInferSetsTokenization() { + var expectedSpan = new AtomicInteger(); + var expectedWindowSize = new AtomicReference(); + + ThreadPool threadpool = new TestThreadPool("test"); + Client client = mock(Client.class); + try { + when(client.threadPool()).thenReturn(threadpool); + doAnswer(invocationOnMock -> { + var request = (InferTrainedModelDeploymentAction.Request) invocationOnMock.getArguments()[1]; + assertThat(request.getUpdate(), instanceOf(TokenizationConfigUpdate.class)); + var update = (TokenizationConfigUpdate) request.getUpdate(); + assertEquals(update.getSpanSettings().span(), expectedSpan.get()); + assertEquals(update.getSpanSettings().maxSequenceLength(), expectedWindowSize.get()); + return null; + }).when(client) + .execute( + same(InferTrainedModelDeploymentAction.INSTANCE), + any(InferTrainedModelDeploymentAction.Request.class), + any(ActionListener.class) + ); + + var model = new ElserInternalModel( + "foo", + TaskType.SPARSE_EMBEDDING, + "elser", + new ElserInternalServiceSettings(1, 1, "elser"), + new ElserMlNodeTaskSettings() + ); + var service = createService(client); + + expectedSpan.set(-1); + expectedWindowSize.set(null); + service.chunkedInfer( + model, + List.of("foo", "bar"), + Map.of(), + InputType.SEARCH, + null, + ActionListener.wrap(r -> fail("unexpected result"), e -> fail(e.getMessage())) + ); + + expectedSpan.set(-1); + expectedWindowSize.set(256); + service.chunkedInfer( + model, + List.of("foo", "bar"), + Map.of(), + InputType.SEARCH, + new ChunkingOptions(256, null), + ActionListener.wrap(r -> fail("unexpected result"), e -> fail(e.getMessage())) + ); + } finally { + terminate(threadpool); + } + } + private ElserInternalService createService(Client client) { var context = new InferenceServiceExtension.InferenceServiceFactoryContext(client); return new ElserInternalService(context); From 78115fbc9e9049ec99df9595eb2bb53db1365833 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 25 Mar 2024 14:27:43 -0400 Subject: [PATCH 46/79] [ESQL] Migrate BooleanFunctionEqualsElimination optimization (#106692) Relates to #105217 This copies the BooleanFunctionEqualsElimination logical optimization into ESQL, following the pattern established in #106499. I've copied the optimization rule into the ESQL version of OptimizerRules, and the tests into OpitmizerRulesTests, and changed the imports &c to point to the appropriate ESQL classes instead of their QL counterparts. I only saw two tests for this one. --- .../esql/optimizer/LogicalPlanOptimizer.java | 3 +- .../xpack/esql/optimizer/OptimizerRules.java | 32 ++++++++++++++++ .../esql/optimizer/OptimizerRulesTests.java | 38 ++++++++++++++++++- 3 files changed, 69 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index af8ad7a1fc435..93505fa4f20fc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -46,7 +46,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; -import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.BooleanFunctionEqualsElimination; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.ConstantFolding; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.LiteralsOnTheRight; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PruneLiteralsInOrderBy; @@ -127,7 +126,7 @@ protected static Batch operators() { // needs to occur before BinaryComparison combinations (see class) new org.elasticsearch.xpack.esql.optimizer.OptimizerRules.PropagateEquals(), new PropagateNullable(), - new BooleanFunctionEqualsElimination(), + new org.elasticsearch.xpack.esql.optimizer.OptimizerRules.BooleanFunctionEqualsElimination(), new org.elasticsearch.xpack.esql.optimizer.OptimizerRules.CombineDisjunctionsToIn(), new SimplifyComparisonsArithmetics(EsqlDataTypes::areCompatible), // prune/elimination diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 3ae662580a200..38ac596135abb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -41,10 +41,12 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.expression.function.Function; import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.Range; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.BinaryLogic; +import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.plan.QueryPlan; @@ -65,6 +67,7 @@ import java.util.Set; import static org.elasticsearch.xpack.ql.common.Failure.fail; +import static org.elasticsearch.xpack.ql.expression.Literal.FALSE; import static org.elasticsearch.xpack.ql.expression.Literal.TRUE; import static org.elasticsearch.xpack.ql.expression.predicate.Predicates.combineOr; import static org.elasticsearch.xpack.ql.expression.predicate.Predicates.splitOr; @@ -257,6 +260,35 @@ protected Expression rule(Or or) { } } + /** + * This rule must always be placed after {@link org.elasticsearch.xpack.ql.optimizer.OptimizerRules.LiteralsOnTheRight}, since it looks + * at TRUE/FALSE literals' existence on the right hand-side of the {@link Equals}/{@link NotEquals} expressions. + */ + public static final class BooleanFunctionEqualsElimination extends + org.elasticsearch.xpack.ql.optimizer.OptimizerRules.OptimizerExpressionRule { + + BooleanFunctionEqualsElimination() { + super(org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.UP); + } + + @Override + protected Expression rule(BinaryComparison bc) { + if ((bc instanceof Equals || bc instanceof NotEquals) && bc.left() instanceof Function) { + // for expression "==" or "!=" TRUE/FALSE, return the expression itself or its negated variant + + // TODO: Replace use of QL Not with ESQL Not + if (TRUE.equals(bc.right())) { + return bc instanceof Equals ? bc.left() : new Not(bc.left().source(), bc.left()); + } + if (FALSE.equals(bc.right())) { + return bc instanceof Equals ? new Not(bc.left().source(), bc.left()) : bc.left(); + } + } + + return bc; + } + } + /** * Propagate Equals to eliminate conjuncted Ranges or BinaryComparisons. * When encountering a different Equals, non-containing {@link Range} or {@link BinaryComparison}, the conjunction becomes false. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java index 1aac8efbe6f65..01fcd222a5141 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.Range; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; +import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.tree.Source; @@ -32,11 +34,10 @@ import java.util.List; import static java.util.Arrays.asList; -import static org.elasticsearch.xpack.ql.TestUtils.equalsOf; -import static org.elasticsearch.xpack.ql.TestUtils.nullEqualsOf; import static org.elasticsearch.xpack.ql.TestUtils.rangeOf; import static org.elasticsearch.xpack.ql.TestUtils.relation; import static org.elasticsearch.xpack.ql.expression.Literal.FALSE; +import static org.elasticsearch.xpack.ql.expression.Literal.NULL; import static org.elasticsearch.xpack.ql.expression.Literal.TRUE; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; @@ -182,6 +183,39 @@ public void testOrWithNonCombinableExpressions() { assertThat(in.list(), contains(ONE, THREE)); } + // Test BooleanFunctionEqualsElimination + public void testBoolEqualsSimplificationOnExpressions() { + OptimizerRules.BooleanFunctionEqualsElimination s = new OptimizerRules.BooleanFunctionEqualsElimination(); + Expression exp = new GreaterThan(EMPTY, getFieldAttribute(), new Literal(EMPTY, 0, DataTypes.INTEGER), null); + + assertEquals(exp, s.rule(new Equals(EMPTY, exp, TRUE))); + // TODO: Replace use of QL Not with ESQL Not + assertEquals(new Not(EMPTY, exp), s.rule(new Equals(EMPTY, exp, FALSE))); + } + + public void testBoolEqualsSimplificationOnFields() { + OptimizerRules.BooleanFunctionEqualsElimination s = new OptimizerRules.BooleanFunctionEqualsElimination(); + + FieldAttribute field = getFieldAttribute(); + + List comparisons = asList( + new Equals(EMPTY, field, TRUE), + new Equals(EMPTY, field, FALSE), + notEqualsOf(field, TRUE), + notEqualsOf(field, FALSE), + new Equals(EMPTY, NULL, TRUE), + new Equals(EMPTY, NULL, FALSE), + notEqualsOf(NULL, TRUE), + notEqualsOf(NULL, FALSE) + ); + + for (BinaryComparison comparison : comparisons) { + assertEquals(comparison, s.rule(comparison)); + } + } + + // Test Propagate Equals + // a == 1 AND a == 2 -> FALSE public void testDualEqualsConjunction() { FieldAttribute fa = getFieldAttribute(); From 96230f7a7d65c7dca9d86e6e65f3f961bc59cff1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 25 Mar 2024 11:30:27 -0700 Subject: [PATCH 47/79] Use CloseableByteBuffer in compress/decompress signatures (#106724) CloseableByteBuffer is backed by native memory segments, but the interfaces for compress and decompress methods of zstd take ByteBuffer. Although both Jna and the Jdk can deal with turning the native ByteBuffer back into an address to pass to the native method, the jdk may have a more significant cost to that action. This commit changes the signature of compress and decompress to take in CloseableByteBuffer so that each implementation can do its own unwrapping to get the appropriate native address. relates #103374 --- .../jna/JnaCloseableByteBuffer.java | 2 +- .../nativeaccess/jna/JnaZstdLibrary.java | 35 ++++++++++++++----- .../org/elasticsearch/nativeaccess/Zstd.java | 12 ++----- .../nativeaccess/lib/ZstdLibrary.java | 6 ++-- .../jdk/JdkCloseableByteBuffer.java | 5 ++- .../nativeaccess/jdk/JdkZstdLibrary.java | 30 +++++++++++----- .../elasticsearch/nativeaccess/ZstdTests.java | 26 +++++++------- 7 files changed, 71 insertions(+), 45 deletions(-) diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java index e47b17e234705..e987f8042691b 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java @@ -15,7 +15,7 @@ import java.nio.ByteBuffer; class JnaCloseableByteBuffer implements CloseableByteBuffer { - private final Memory memory; + final Memory memory; private final ByteBuffer bufferView; JnaCloseableByteBuffer(int len) { diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java index f0581633ea969..f2c4a85c8f2bc 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java @@ -10,23 +10,23 @@ import com.sun.jna.Library; import com.sun.jna.Native; +import com.sun.jna.Pointer; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; -import java.nio.ByteBuffer; - class JnaZstdLibrary implements ZstdLibrary { private interface NativeFunctions extends Library { long ZSTD_compressBound(int scrLen); - long ZSTD_compress(ByteBuffer dst, int dstLen, ByteBuffer src, int srcLen, int compressionLevel); + long ZSTD_compress(Pointer dst, int dstLen, Pointer src, int srcLen, int compressionLevel); boolean ZSTD_isError(long code); String ZSTD_getErrorName(long code); - long ZSTD_decompress(ByteBuffer dst, int dstLen, ByteBuffer src, int srcLen); + long ZSTD_decompress(Pointer dst, int dstLen, Pointer src, int srcLen); } private final NativeFunctions functions; @@ -41,8 +41,18 @@ public long compressBound(int scrLen) { } @Override - public long compress(ByteBuffer dst, ByteBuffer src, int compressionLevel) { - return functions.ZSTD_compress(dst, dst.remaining(), src, src.remaining(), compressionLevel); + public long compress(CloseableByteBuffer dst, CloseableByteBuffer src, int compressionLevel) { + assert dst instanceof JnaCloseableByteBuffer; + assert src instanceof JnaCloseableByteBuffer; + var nativeDst = (JnaCloseableByteBuffer) dst; + var nativeSrc = (JnaCloseableByteBuffer) src; + return functions.ZSTD_compress( + nativeDst.memory.share(dst.buffer().position()), + dst.buffer().remaining(), + nativeSrc.memory.share(src.buffer().position()), + src.buffer().remaining(), + compressionLevel + ); } @Override @@ -56,7 +66,16 @@ public String getErrorName(long code) { } @Override - public long decompress(ByteBuffer dst, ByteBuffer src) { - return functions.ZSTD_decompress(dst, dst.remaining(), src, src.remaining()); + public long decompress(CloseableByteBuffer dst, CloseableByteBuffer src) { + assert dst instanceof JnaCloseableByteBuffer; + assert src instanceof JnaCloseableByteBuffer; + var nativeDst = (JnaCloseableByteBuffer) dst; + var nativeSrc = (JnaCloseableByteBuffer) src; + return functions.ZSTD_decompress( + nativeDst.memory.share(dst.buffer().position()), + dst.buffer().remaining(), + nativeSrc.memory.share(src.buffer().position()), + src.buffer().remaining() + ); } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java index 6a0d348d5251b..60e65383bf9a2 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java @@ -25,13 +25,9 @@ public final class Zstd { * Compress the content of {@code src} into {@code dst} at compression level {@code level}, and return the number of compressed bytes. * {@link ByteBuffer#position()} and {@link ByteBuffer#limit()} of both {@link ByteBuffer}s are left unmodified. */ - public int compress(ByteBuffer dst, ByteBuffer src, int level) { + public int compress(CloseableByteBuffer dst, CloseableByteBuffer src, int level) { Objects.requireNonNull(dst, "Null destination buffer"); Objects.requireNonNull(src, "Null source buffer"); - assert dst.isDirect(); - assert dst.isReadOnly() == false; - assert src.isDirect(); - assert src.isReadOnly() == false; long ret = zstdLib.compress(dst, src, level); if (zstdLib.isError(ret)) { throw new IllegalArgumentException(zstdLib.getErrorName(ret)); @@ -45,13 +41,9 @@ public int compress(ByteBuffer dst, ByteBuffer src, int level) { * Compress the content of {@code src} into {@code dst}, and return the number of decompressed bytes. {@link ByteBuffer#position()} and * {@link ByteBuffer#limit()} of both {@link ByteBuffer}s are left unmodified. */ - public int decompress(ByteBuffer dst, ByteBuffer src) { + public int decompress(CloseableByteBuffer dst, CloseableByteBuffer src) { Objects.requireNonNull(dst, "Null destination buffer"); Objects.requireNonNull(src, "Null source buffer"); - assert dst.isDirect(); - assert dst.isReadOnly() == false; - assert src.isDirect(); - assert src.isReadOnly() == false; long ret = zstdLib.decompress(dst, src); if (zstdLib.isError(ret)) { throw new IllegalArgumentException(zstdLib.getErrorName(ret)); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java index feb1dbe8e3d61..ea4c8efa5318a 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java @@ -8,17 +8,17 @@ package org.elasticsearch.nativeaccess.lib; -import java.nio.ByteBuffer; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; public non-sealed interface ZstdLibrary extends NativeLibrary { long compressBound(int scrLen); - long compress(ByteBuffer dst, ByteBuffer src, int compressionLevel); + long compress(CloseableByteBuffer dst, CloseableByteBuffer src, int compressionLevel); boolean isError(long code); String getErrorName(long code); - long decompress(ByteBuffer dst, ByteBuffer src); + long decompress(CloseableByteBuffer dst, CloseableByteBuffer src); } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java index 97e6bf2f5580a..daa012d35598e 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java @@ -11,15 +11,18 @@ import org.elasticsearch.nativeaccess.CloseableByteBuffer; import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; import java.nio.ByteBuffer; class JdkCloseableByteBuffer implements CloseableByteBuffer { private final Arena arena; + final MemorySegment segment; private final ByteBuffer bufferView; JdkCloseableByteBuffer(int len) { this.arena = Arena.ofConfined(); - this.bufferView = this.arena.allocate(len).asByteBuffer(); + this.segment = arena.allocate(len); + this.bufferView = segment.asByteBuffer(); } @Override diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java index d193750939b23..e3e972bc19d72 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java @@ -8,12 +8,12 @@ package org.elasticsearch.nativeaccess.jdk; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; import java.lang.foreign.FunctionDescriptor; import java.lang.foreign.MemorySegment; import java.lang.invoke.MethodHandle; -import java.nio.ByteBuffer; import static java.lang.foreign.ValueLayout.ADDRESS; import static java.lang.foreign.ValueLayout.JAVA_BOOLEAN; @@ -49,11 +49,17 @@ public long compressBound(int srcLen) { } @Override - public long compress(ByteBuffer dst, ByteBuffer src, int compressionLevel) { - var nativeDst = MemorySegment.ofBuffer(dst); - var nativeSrc = MemorySegment.ofBuffer(src); + public long compress(CloseableByteBuffer dst, CloseableByteBuffer src, int compressionLevel) { + assert dst instanceof JdkCloseableByteBuffer; + assert src instanceof JdkCloseableByteBuffer; + var nativeDst = (JdkCloseableByteBuffer) dst; + var nativeSrc = (JdkCloseableByteBuffer) src; + var dstSize = dst.buffer().remaining(); + var srcSize = src.buffer().remaining(); + var segmentDst = nativeDst.segment.asSlice(dst.buffer().position(), dstSize); + var segmentSrc = nativeSrc.segment.asSlice(src.buffer().position(), srcSize); try { - return (long) compress$mh.invokeExact(nativeDst, dst.remaining(), nativeSrc, src.remaining(), compressionLevel); + return (long) compress$mh.invokeExact(segmentDst, dstSize, segmentSrc, srcSize, compressionLevel); } catch (Throwable t) { throw new AssertionError(t); } @@ -79,11 +85,17 @@ public String getErrorName(long code) { } @Override - public long decompress(ByteBuffer dst, ByteBuffer src) { - var nativeDst = MemorySegment.ofBuffer(dst); - var nativeSrc = MemorySegment.ofBuffer(src); + public long decompress(CloseableByteBuffer dst, CloseableByteBuffer src) { + assert dst instanceof JdkCloseableByteBuffer; + assert src instanceof JdkCloseableByteBuffer; + var nativeDst = (JdkCloseableByteBuffer) dst; + var nativeSrc = (JdkCloseableByteBuffer) src; + var dstSize = dst.buffer().remaining(); + var srcSize = src.buffer().remaining(); + var segmentDst = nativeDst.segment.asSlice(dst.buffer().position(), dstSize); + var segmentSrc = nativeSrc.segment.asSlice(src.buffer().position(), srcSize); try { - return (long) decompress$mh.invokeExact(nativeDst, dst.remaining(), nativeSrc, src.remaining()); + return (long) decompress$mh.invokeExact(segmentDst, dstSize, segmentSrc, srcSize); } catch (Throwable t) { throw new AssertionError(t); } diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java index d051961b06c5f..1282b1fee9206 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java @@ -41,16 +41,16 @@ public void testCompressValidation() { var srcBuf = src.buffer(); var dstBuf = dst.buffer(); - var npe1 = expectThrows(NullPointerException.class, () -> zstd.compress(null, srcBuf, 0)); + var npe1 = expectThrows(NullPointerException.class, () -> zstd.compress(null, src, 0)); assertThat(npe1.getMessage(), equalTo("Null destination buffer")); - var npe2 = expectThrows(NullPointerException.class, () -> zstd.compress(dstBuf, null, 0)); + var npe2 = expectThrows(NullPointerException.class, () -> zstd.compress(dst, null, 0)); assertThat(npe2.getMessage(), equalTo("Null source buffer")); // dst capacity too low for (int i = 0; i < srcBuf.remaining(); ++i) { srcBuf.put(i, randomByte()); } - var e = expectThrows(IllegalArgumentException.class, () -> zstd.compress(dstBuf, srcBuf, 0)); + var e = expectThrows(IllegalArgumentException.class, () -> zstd.compress(dst, src, 0)); assertThat(e.getMessage(), equalTo("Destination buffer is too small")); } } @@ -64,21 +64,21 @@ public void testDecompressValidation() { var originalBuf = original.buffer(); var compressedBuf = compressed.buffer(); - var npe1 = expectThrows(NullPointerException.class, () -> zstd.decompress(null, originalBuf)); + var npe1 = expectThrows(NullPointerException.class, () -> zstd.decompress(null, original)); assertThat(npe1.getMessage(), equalTo("Null destination buffer")); - var npe2 = expectThrows(NullPointerException.class, () -> zstd.decompress(compressedBuf, null)); + var npe2 = expectThrows(NullPointerException.class, () -> zstd.decompress(compressed, null)); assertThat(npe2.getMessage(), equalTo("Null source buffer")); // Invalid compressed format for (int i = 0; i < originalBuf.remaining(); ++i) { originalBuf.put(i, (byte) i); } - var e = expectThrows(IllegalArgumentException.class, () -> zstd.decompress(compressedBuf, originalBuf)); + var e = expectThrows(IllegalArgumentException.class, () -> zstd.decompress(compressed, original)); assertThat(e.getMessage(), equalTo("Unknown frame descriptor")); - int compressedLength = zstd.compress(compressedBuf, originalBuf, 0); + int compressedLength = zstd.compress(compressed, original, 0); compressedBuf.limit(compressedLength); - e = expectThrows(IllegalArgumentException.class, () -> zstd.decompress(restored.buffer(), compressedBuf)); + e = expectThrows(IllegalArgumentException.class, () -> zstd.decompress(restored, compressed)); assertThat(e.getMessage(), equalTo("Destination buffer is too small")); } @@ -109,9 +109,9 @@ private void doTestRoundtrip(byte[] data) { var restored = nativeAccess.newBuffer(data.length) ) { original.buffer().put(0, data); - int compressedLength = zstd.compress(compressed.buffer(), original.buffer(), randomIntBetween(-3, 9)); + int compressedLength = zstd.compress(compressed, original, randomIntBetween(-3, 9)); compressed.buffer().limit(compressedLength); - int decompressedLength = zstd.decompress(restored.buffer(), compressed.buffer()); + int decompressedLength = zstd.decompress(restored, compressed); assertThat(restored.buffer(), equalTo(original.buffer())); assertThat(decompressedLength, equalTo(data.length)); } @@ -127,15 +127,15 @@ private void doTestRoundtrip(byte[] data) { original.buffer().put(decompressedOffset, data); original.buffer().position(decompressedOffset); compressed.buffer().position(compressedOffset); - int compressedLength = zstd.compress(compressed.buffer(), original.buffer(), randomIntBetween(-3, 9)); + int compressedLength = zstd.compress(compressed, original, randomIntBetween(-3, 9)); compressed.buffer().limit(compressedOffset + compressedLength); restored.buffer().position(decompressedOffset); - int decompressedLength = zstd.decompress(restored.buffer(), compressed.buffer()); + int decompressedLength = zstd.decompress(restored, compressed); + assertThat(decompressedLength, equalTo(data.length)); assertThat( restored.buffer().slice(decompressedOffset, data.length), equalTo(original.buffer().slice(decompressedOffset, data.length)) ); - assertThat(decompressedLength, equalTo(data.length)); } } } From 49cf3cb37b352ff8d50bf893f492e041d87cf6f6 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 25 Mar 2024 12:24:45 -0700 Subject: [PATCH 48/79] Add rate aggregation function (#106703) This PR introduces a rate aggregation function to ESQL, with two main changes: 1. Define of the grouping state for the rate aggregation function: - For raw input, the function expects data to arrive in descending timestamp order without gaps; hence, perform a reduction with each incoming entry. Each grouping should consist of at most two entries: one for the starting time and one for the ending time. - For intermediate input, the function buffers data as they can arrive out of order, although non-overlapping. This shouldn't have significant issues, as we expect at most two entries per participating pipeline. - The intermediate output consists of three blocks: timestamps, values, and resets. Both timestamps and values can contain multiple entries sorted in descending order by timestamp. - This rate function does not support non-grouping aggregation. However, I can enable it if we think otherwise. 2. Modifies the GroupingAggregatorImplementer code generator to include the timestamp vector block. I explored several options to generate multiple input blocks. However, both generated and code generator are much more complicated in a generic solution. And it's unlikely that we will need another function requires multiple input blocks. Hence, I decided to tweak this class to append the timestamps long vector block when specified. --- .../compute/ann/GroupingAggregator.java | 5 + x-pack/plugin/esql/compute/build.gradle | 20 +- ...AggregatorFunctionSupplierImplementer.java | 23 +- .../compute/gen/AggregatorProcessor.java | 16 +- .../gen/GroupingAggregatorImplementer.java | 47 ++- .../aggregation/RateDoubleAggregator.java | 277 +++++++++++++++++ .../aggregation/RateIntAggregator.java | 278 +++++++++++++++++ .../aggregation/RateLongAggregator.java | 277 +++++++++++++++++ .../RateDoubleAggregatorFunctionSupplier.java | 41 +++ .../RateDoubleGroupingAggregatorFunction.java | 225 ++++++++++++++ .../RateIntAggregatorFunctionSupplier.java | 41 +++ .../RateIntGroupingAggregatorFunction.java | 225 ++++++++++++++ .../RateLongAggregatorFunctionSupplier.java | 41 +++ .../RateLongGroupingAggregatorFunction.java | 225 ++++++++++++++ .../aggregation/X-RateAggregator.java.st | 280 ++++++++++++++++++ .../TimeSeriesSortedSourceOperatorTests.java | 137 +++++++++ 16 files changed, 2145 insertions(+), 13 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java index bb7b2cc888c2c..7e92fc5c2734e 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java @@ -17,4 +17,9 @@ public @interface GroupingAggregator { IntermediateState[] value() default {}; + + /** + * If {@code true} then the @timestamp LongVector will be appended to the input blocks of the aggregation function. + */ + boolean includeTimestamps() default false; } diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index e5d076aa0e041..d04daf6631447 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -386,7 +386,25 @@ tasks.named('stringTemplates').configure { it.inputFile = valuesAggregatorInputFile it.outputFile = "org/elasticsearch/compute/aggregation/ValuesBytesRefAggregator.java" } - File multivalueDedupeInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st") + + File rateAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st") + template { + it.properties = intProperties + it.inputFile = rateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/RateIntAggregator.java" + } + template { + it.properties = longProperties + it.inputFile = rateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/RateLongAggregator.java" + } + template { + it.properties = doubleProperties + it.inputFile = rateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/RateDoubleAggregator.java" + } + + File multivalueDedupeInputFile = file("src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st") template { it.properties = intProperties it.inputFile = multivalueDedupeInputFile diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java index a9bea3105ee10..3f031db2978f9 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java @@ -54,8 +54,12 @@ public AggregatorFunctionSupplierImplementer( this.groupingAggregatorImplementer = groupingAggregatorImplementer; Set createParameters = new LinkedHashSet<>(); - createParameters.addAll(aggregatorImplementer.createParameters()); - createParameters.addAll(groupingAggregatorImplementer.createParameters()); + if (aggregatorImplementer != null) { + createParameters.addAll(aggregatorImplementer.createParameters()); + } + if (groupingAggregatorImplementer != null) { + createParameters.addAll(groupingAggregatorImplementer.createParameters()); + } this.createParameters = new ArrayList<>(createParameters); this.createParameters.add(0, new Parameter(LIST_INTEGER, "channels")); @@ -84,7 +88,11 @@ private TypeSpec type() { createParameters.stream().forEach(p -> p.declareField(builder)); builder.addMethod(ctor()); - builder.addMethod(aggregator()); + if (aggregatorImplementer != null) { + builder.addMethod(aggregator()); + } else { + builder.addMethod(unsupportedNonGroupingAggregator()); + } builder.addMethod(groupingAggregator()); builder.addMethod(describe()); return builder.build(); @@ -96,6 +104,15 @@ private MethodSpec ctor() { return builder.build(); } + private MethodSpec unsupportedNonGroupingAggregator() { + MethodSpec.Builder builder = MethodSpec.methodBuilder("aggregator") + .addParameter(DRIVER_CONTEXT, "driverContext") + .returns(Types.AGGREGATOR_FUNCTION); + builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); + builder.addStatement("throw new UnsupportedOperationException($S)", "non-grouping aggregator is not supported"); + return builder.build(); + } + private MethodSpec aggregator() { MethodSpec.Builder builder = MethodSpec.methodBuilder("aggregator") .addParameter(DRIVER_CONTEXT, "driverContext") diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java index b724ee9152ca8..d07b24047b7e2 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java @@ -86,17 +86,21 @@ public boolean process(Set set, RoundEnvironment roundEnv write(aggClass, "aggregator", implementer.sourceFile(), env); } GroupingAggregatorImplementer groupingAggregatorImplementer = null; - if (aggClass.getAnnotation(Aggregator.class) != null) { - assert aggClass.getAnnotation(GroupingAggregator.class) != null; + if (aggClass.getAnnotation(GroupingAggregator.class) != null) { IntermediateState[] intermediateState = aggClass.getAnnotation(GroupingAggregator.class).value(); - if (intermediateState.length == 0) { + if (intermediateState.length == 0 && aggClass.getAnnotation(Aggregator.class) != null) { intermediateState = aggClass.getAnnotation(Aggregator.class).value(); } - - groupingAggregatorImplementer = new GroupingAggregatorImplementer(env.getElementUtils(), aggClass, intermediateState); + boolean includeTimestamps = aggClass.getAnnotation(GroupingAggregator.class).includeTimestamps(); + groupingAggregatorImplementer = new GroupingAggregatorImplementer( + env.getElementUtils(), + aggClass, + intermediateState, + includeTimestamps + ); write(aggClass, "grouping aggregator", groupingAggregatorImplementer.sourceFile(), env); } - if (implementer != null && groupingAggregatorImplementer != null) { + if (implementer != null || groupingAggregatorImplementer != null) { write( aggClass, "aggregator function supplier", diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index cc55e19b7d421..1be01f445691d 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -47,6 +47,8 @@ import static org.elasticsearch.compute.gen.Types.INT_VECTOR; import static org.elasticsearch.compute.gen.Types.LIST_AGG_FUNC_DESC; import static org.elasticsearch.compute.gen.Types.LIST_INTEGER; +import static org.elasticsearch.compute.gen.Types.LONG_BLOCK; +import static org.elasticsearch.compute.gen.Types.LONG_VECTOR; import static org.elasticsearch.compute.gen.Types.PAGE; import static org.elasticsearch.compute.gen.Types.SEEN_GROUP_IDS; @@ -71,8 +73,14 @@ public class GroupingAggregatorImplementer { private final List createParameters; private final ClassName implementation; private final List intermediateState; + private final boolean includeTimestampVector; - public GroupingAggregatorImplementer(Elements elements, TypeElement declarationType, IntermediateState[] interStateAnno) { + public GroupingAggregatorImplementer( + Elements elements, + TypeElement declarationType, + IntermediateState[] interStateAnno, + boolean includeTimestampVector + ) { this.declarationType = declarationType; this.init = findRequiredMethod(declarationType, new String[] { "init", "initGrouping" }, e -> true); @@ -103,6 +111,7 @@ public GroupingAggregatorImplementer(Elements elements, TypeElement declarationT intermediateState = Arrays.stream(interStateAnno) .map(AggregatorImplementer.IntermediateStateDesc::newIntermediateStateDesc) .toList(); + this.includeTimestampVector = includeTimestampVector; } public ClassName implementation() { @@ -264,15 +273,24 @@ private MethodSpec prepareProcessPage() { builder.addStatement("$T valuesBlock = page.getBlock(channels.get(0))", valueBlockType(init, combine)); builder.addStatement("$T valuesVector = valuesBlock.asVector()", valueVectorType(init, combine)); + if (includeTimestampVector) { + builder.addStatement("$T timestampsBlock = page.getBlock(channels.get(1))", LONG_BLOCK); + builder.addStatement("$T timestampsVector = timestampsBlock.asVector()", LONG_VECTOR); + + builder.beginControlFlow("if (timestampsVector == null) "); + builder.addStatement("throw new IllegalStateException($S)", "expected @timestamp vector; but got a block"); + builder.endControlFlow(); + } builder.beginControlFlow("if (valuesVector == null)"); + String extra = includeTimestampVector ? ", timestampsVector" : ""; { builder.beginControlFlow("if (valuesBlock.mayHaveNulls())"); builder.addStatement("state.enableGroupIdTracking(seenGroupIds)"); builder.endControlFlow(); - builder.addStatement("return $L", addInput(b -> b.addStatement("addRawInput(positionOffset, groupIds, valuesBlock)"))); + builder.addStatement("return $L", addInput(b -> b.addStatement("addRawInput(positionOffset, groupIds, valuesBlock$L)", extra))); } builder.endControlFlow(); - builder.addStatement("return $L", addInput(b -> b.addStatement("addRawInput(positionOffset, groupIds, valuesVector)"))); + builder.addStatement("return $L", addInput(b -> b.addStatement("addRawInput(positionOffset, groupIds, valuesVector$L)", extra))); return builder.build(); } @@ -308,6 +326,9 @@ private MethodSpec addRawInputLoop(TypeName groupsType, TypeName valuesType) { MethodSpec.Builder builder = MethodSpec.methodBuilder(methodName); builder.addModifiers(Modifier.PRIVATE); builder.addParameter(TypeName.INT, "positionOffset").addParameter(groupsType, "groups").addParameter(valuesType, "values"); + if (includeTimestampVector) { + builder.addParameter(LONG_VECTOR, "timestamps"); + } if (valuesIsBytesRef) { // Add bytes_ref scratch var that will be used for bytes_ref blocks/vectors builder.addStatement("$T scratch = new $T()", BYTES_REF, BYTES_REF); @@ -354,6 +375,10 @@ private void combineRawInput(MethodSpec.Builder builder, String blockVariable, S combineRawInputForBytesRef(builder, blockVariable, offsetVariable); return; } + if (includeTimestampVector) { + combineRawInputWithTimestamp(builder, offsetVariable); + return; + } TypeName valueType = TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType()); if (valueType.isPrimitive() == false) { throw new IllegalArgumentException("second parameter to combine must be a primitive"); @@ -403,6 +428,22 @@ private void combineRawInputForVoid( ); } + private void combineRawInputWithTimestamp(MethodSpec.Builder builder, String offsetVariable) { + TypeName valueType = TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType()); + String blockType = valueType.toString().substring(0, 1).toUpperCase(Locale.ROOT) + valueType.toString().substring(1); + if (offsetVariable.contains(" + ")) { + builder.addStatement("var valuePosition = $L", offsetVariable); + offsetVariable = "valuePosition"; + } + builder.addStatement( + "$T.combine(state, groupId, timestamps.getLong($L), values.get$L($L))", + declarationType, + offsetVariable, + blockType, + offsetVariable + ); + } + private void combineRawInputForBytesRef(MethodSpec.Builder builder, String blockVariable, String offsetVariable) { // scratch is a BytesRef var that must have been defined before the iteration starts builder.addStatement("$T.combine(state, groupId, $L.getBytesRef($L, scratch))", declarationType, blockVariable, offsetVariable); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java new file mode 100644 index 0000000000000..016bf9387ca4b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java @@ -0,0 +1,277 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * A rate grouping aggregation definition for double. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + includeTimestamps = true, + value = { + @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), + @IntermediateState(name = "values", type = "DOUBLE_BLOCK"), + @IntermediateState(name = "resets", type = "DOUBLE") } +) +public class RateDoubleAggregator { + public static DoubleRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { + // TODO: pass BlockFactory instead bigArrays so we can use the breaker + return new DoubleRateGroupingState(bigArrays, unitInMillis); + } + + public static void combine(DoubleRateGroupingState current, int groupId, long timestamp, double value) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + DoubleRateGroupingState current, + int groupId, + LongBlock timestamps, + DoubleBlock values, + double reset, + int otherPosition + ) { + current.combine(groupId, timestamps, values, reset, otherPosition); + } + + public static void combineStates( + DoubleRateGroupingState current, + int currentGroupId, // make the stylecheck happy + DoubleRateGroupingState state, + int statePosition + ) { + throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + } + + public static Block evaluateFinal(DoubleRateGroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext.blockFactory()); + } + + private static class DoubleRateState implements Accountable { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(DoubleRateState.class); + final long[] timestamps; // descending order + final double[] values; + double reset = 0; + + DoubleRateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new double[initialSize]; + } + + DoubleRateState(long[] ts, double[] vs) { + this.timestamps = ts; + this.values = vs; + } + + private double dv(double v0, double v1) { + // counter reset detection + return v0 > v1 ? v1 : v1 - v0; + } + + void append(long t, double v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + reset += dv(v, values[1]) + dv(values[1], values[0]) - dv(v, values[0]); + timestamps[1] = t; + values[1] = v; + } + + int entries() { + return timestamps.length; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_USAGE; + } + } + + public static final class DoubleRateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final long unitInMillis; + private final BigArrays bigArrays; + + DoubleRateGroupingState(BigArrays bigArrays, long unitInMillis) { + this.bigArrays = bigArrays; + this.states = bigArrays.newObjectArray(1); + this.unitInMillis = unitInMillis; + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void append(int groupId, long timestamp, double value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new DoubleRateState(new long[] { timestamp }, new double[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + state = new DoubleRateState(new long[] { state.timestamps[0], timestamp }, new double[] { state.values[0], value }); + states.set(groupId, state); + } else { + state.append(timestamp, value); + } + } + } + + void combine(int groupId, LongBlock timestamps, DoubleBlock values, double reset, int otherPosition) { + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new DoubleRateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getDouble(firstIndex + i); + } + } else { + var newState = new DoubleRateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + } + state.reset += reset; + } + + void merge(DoubleRateState curr, DoubleRateState dst, int firstIndex, int rightCount, LongBlock timestamps, DoubleBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + while (i < leftCount && j < rightCount) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getDouble(firstIndex + j); + ++j; + } + ++k; + } + if (i < leftCount) { + System.arraycopy(curr.timestamps, i, dst.timestamps, k, leftCount - i); + System.arraycopy(curr.values, i, dst.values, k, leftCount - i); + } + while (j < rightCount) { + dst.timestamps[k] = timestamps.getLong(firstIndex + j); + dst.values[k] = values.getDouble(firstIndex + j); + ++k; + ++j; + } + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed(); + } + + @Override + public void close() { + Releasables.close(states); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + DoubleBlock.Builder values = blockFactory.newDoubleBlockBuilder(positionCount * 2); + DoubleVector.Builder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (double v : state.values) { + values.appendDouble(v); + } + values.endPositionEntry(); + + resets.appendDouble(state.reset); + } else { + timestamps.appendNull(); + values.appendNull(); + resets.appendDouble(0); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + blocks[offset + 2] = resets.build().asBlock(); + } + } + + Block evaluateFinal(IntVector selected, BlockFactory blockFactory) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = blockFactory.newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null) { + rates.appendNull(); + continue; + } + int len = state.entries(); + long dt = state.timestamps[0] - state.timestamps[len - 1]; + if (dt == 0) { + // TODO: maybe issue warning when we don't have enough sample? + rates.appendNull(); + } else { + double reset = state.reset; + for (int i = 1; i < len; i++) { + if (state.values[i - 1] < state.values[i]) { + reset += state.values[i]; + } + } + double dv = state.values[0] - state.values[len - 1] + reset; + rates.appendDouble(dv * unitInMillis / dt); + } + } + return rates.build(); + } + } + + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java new file mode 100644 index 0000000000000..fbf43f7d72c46 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java @@ -0,0 +1,278 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * A rate grouping aggregation definition for int. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + includeTimestamps = true, + value = { + @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), + @IntermediateState(name = "values", type = "INT_BLOCK"), + @IntermediateState(name = "resets", type = "DOUBLE") } +) +public class RateIntAggregator { + public static IntRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { + // TODO: pass BlockFactory instead bigArrays so we can use the breaker + return new IntRateGroupingState(bigArrays, unitInMillis); + } + + public static void combine(IntRateGroupingState current, int groupId, long timestamp, int value) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + IntRateGroupingState current, + int groupId, + LongBlock timestamps, + IntBlock values, + double reset, + int otherPosition + ) { + current.combine(groupId, timestamps, values, reset, otherPosition); + } + + public static void combineStates( + IntRateGroupingState current, + int currentGroupId, // make the stylecheck happy + IntRateGroupingState state, + int statePosition + ) { + throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + } + + public static Block evaluateFinal(IntRateGroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext.blockFactory()); + } + + private static class IntRateState implements Accountable { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(IntRateState.class); + final long[] timestamps; // descending order + final int[] values; + double reset = 0; + + IntRateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new int[initialSize]; + } + + IntRateState(long[] ts, int[] vs) { + this.timestamps = ts; + this.values = vs; + } + + private int dv(int v0, int v1) { + // counter reset detection + return v0 > v1 ? v1 : v1 - v0; + } + + void append(long t, int v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + reset += dv(v, values[1]) + dv(values[1], values[0]) - dv(v, values[0]); + timestamps[1] = t; + values[1] = v; + } + + int entries() { + return timestamps.length; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_USAGE; + } + } + + public static final class IntRateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final long unitInMillis; + private final BigArrays bigArrays; + + IntRateGroupingState(BigArrays bigArrays, long unitInMillis) { + this.bigArrays = bigArrays; + this.states = bigArrays.newObjectArray(1); + this.unitInMillis = unitInMillis; + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void append(int groupId, long timestamp, int value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new IntRateState(new long[] { timestamp }, new int[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + state = new IntRateState(new long[] { state.timestamps[0], timestamp }, new int[] { state.values[0], value }); + states.set(groupId, state); + } else { + state.append(timestamp, value); + } + } + } + + void combine(int groupId, LongBlock timestamps, IntBlock values, double reset, int otherPosition) { + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new IntRateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getInt(firstIndex + i); + } + } else { + var newState = new IntRateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + } + state.reset += reset; + } + + void merge(IntRateState curr, IntRateState dst, int firstIndex, int rightCount, LongBlock timestamps, IntBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + while (i < leftCount && j < rightCount) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getInt(firstIndex + j); + ++j; + } + ++k; + } + if (i < leftCount) { + System.arraycopy(curr.timestamps, i, dst.timestamps, k, leftCount - i); + System.arraycopy(curr.values, i, dst.values, k, leftCount - i); + } + while (j < rightCount) { + dst.timestamps[k] = timestamps.getLong(firstIndex + j); + dst.values[k] = values.getInt(firstIndex + j); + ++k; + ++j; + } + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed(); + } + + @Override + public void close() { + Releasables.close(states); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + IntBlock.Builder values = blockFactory.newIntBlockBuilder(positionCount * 2); + DoubleVector.Builder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (int v : state.values) { + values.appendInt(v); + } + values.endPositionEntry(); + + resets.appendDouble(state.reset); + } else { + timestamps.appendNull(); + values.appendNull(); + resets.appendDouble(0); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + blocks[offset + 2] = resets.build().asBlock(); + } + } + + Block evaluateFinal(IntVector selected, BlockFactory blockFactory) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = blockFactory.newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null) { + rates.appendNull(); + continue; + } + int len = state.entries(); + long dt = state.timestamps[0] - state.timestamps[len - 1]; + if (dt == 0) { + // TODO: maybe issue warning when we don't have enough sample? + rates.appendNull(); + } else { + double reset = state.reset; + for (int i = 1; i < len; i++) { + if (state.values[i - 1] < state.values[i]) { + reset += state.values[i]; + } + } + double dv = state.values[0] - state.values[len - 1] + reset; + rates.appendDouble(dv * unitInMillis / dt); + } + } + return rates.build(); + } + } + + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java new file mode 100644 index 0000000000000..b5d0dfc8aabdb --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java @@ -0,0 +1,277 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * A rate grouping aggregation definition for long. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + includeTimestamps = true, + value = { + @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), + @IntermediateState(name = "values", type = "LONG_BLOCK"), + @IntermediateState(name = "resets", type = "DOUBLE") } +) +public class RateLongAggregator { + public static LongRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { + // TODO: pass BlockFactory instead bigArrays so we can use the breaker + return new LongRateGroupingState(bigArrays, unitInMillis); + } + + public static void combine(LongRateGroupingState current, int groupId, long timestamp, long value) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + LongRateGroupingState current, + int groupId, + LongBlock timestamps, + LongBlock values, + double reset, + int otherPosition + ) { + current.combine(groupId, timestamps, values, reset, otherPosition); + } + + public static void combineStates( + LongRateGroupingState current, + int currentGroupId, // make the stylecheck happy + LongRateGroupingState state, + int statePosition + ) { + throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + } + + public static Block evaluateFinal(LongRateGroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext.blockFactory()); + } + + private static class LongRateState implements Accountable { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(LongRateState.class); + final long[] timestamps; // descending order + final long[] values; + double reset = 0; + + LongRateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new long[initialSize]; + } + + LongRateState(long[] ts, long[] vs) { + this.timestamps = ts; + this.values = vs; + } + + private long dv(long v0, long v1) { + // counter reset detection + return v0 > v1 ? v1 : v1 - v0; + } + + void append(long t, long v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + reset += dv(v, values[1]) + dv(values[1], values[0]) - dv(v, values[0]); + timestamps[1] = t; + values[1] = v; + } + + int entries() { + return timestamps.length; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_USAGE; + } + } + + public static final class LongRateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final long unitInMillis; + private final BigArrays bigArrays; + + LongRateGroupingState(BigArrays bigArrays, long unitInMillis) { + this.bigArrays = bigArrays; + this.states = bigArrays.newObjectArray(1); + this.unitInMillis = unitInMillis; + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void append(int groupId, long timestamp, long value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new LongRateState(new long[] { timestamp }, new long[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + state = new LongRateState(new long[] { state.timestamps[0], timestamp }, new long[] { state.values[0], value }); + states.set(groupId, state); + } else { + state.append(timestamp, value); + } + } + } + + void combine(int groupId, LongBlock timestamps, LongBlock values, double reset, int otherPosition) { + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new LongRateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getLong(firstIndex + i); + } + } else { + var newState = new LongRateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + } + state.reset += reset; + } + + void merge(LongRateState curr, LongRateState dst, int firstIndex, int rightCount, LongBlock timestamps, LongBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + while (i < leftCount && j < rightCount) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getLong(firstIndex + j); + ++j; + } + ++k; + } + if (i < leftCount) { + System.arraycopy(curr.timestamps, i, dst.timestamps, k, leftCount - i); + System.arraycopy(curr.values, i, dst.values, k, leftCount - i); + } + while (j < rightCount) { + dst.timestamps[k] = timestamps.getLong(firstIndex + j); + dst.values[k] = values.getLong(firstIndex + j); + ++k; + ++j; + } + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed(); + } + + @Override + public void close() { + Releasables.close(states); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + LongBlock.Builder values = blockFactory.newLongBlockBuilder(positionCount * 2); + DoubleVector.Builder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (long v : state.values) { + values.appendLong(v); + } + values.endPositionEntry(); + + resets.appendDouble(state.reset); + } else { + timestamps.appendNull(); + values.appendNull(); + resets.appendDouble(0); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + blocks[offset + 2] = resets.build().asBlock(); + } + } + + Block evaluateFinal(IntVector selected, BlockFactory blockFactory) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = blockFactory.newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null) { + rates.appendNull(); + continue; + } + int len = state.entries(); + long dt = state.timestamps[0] - state.timestamps[len - 1]; + if (dt == 0) { + // TODO: maybe issue warning when we don't have enough sample? + rates.appendNull(); + } else { + double reset = state.reset; + for (int i = 1; i < len; i++) { + if (state.values[i - 1] < state.values[i]) { + reset += state.values[i]; + } + } + double dv = state.values[0] - state.values[len - 1] + reset; + rates.appendDouble(dv * unitInMillis / dt); + } + } + return rates.build(); + } + } + + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..8806e1ed865c2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleAggregatorFunctionSupplier.java @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link RateDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final long unitInMillis; + + public RateDoubleAggregatorFunctionSupplier(List channels, long unitInMillis) { + this.channels = channels; + this.unitInMillis = unitInMillis; + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public RateDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return RateDoubleGroupingAggregatorFunction.create(channels, driverContext, unitInMillis); + } + + @Override + public String describe() { + return "rate of doubles"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..608221614c483 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java @@ -0,0 +1,225 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link RateDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.DOUBLE), + new IntermediateStateDesc("resets", ElementType.DOUBLE) ); + + private final RateDoubleAggregator.DoubleRateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final long unitInMillis; + + public RateDoubleGroupingAggregatorFunction(List channels, + RateDoubleAggregator.DoubleRateGroupingState state, DriverContext driverContext, + long unitInMillis) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.unitInMillis = unitInMillis; + } + + public static RateDoubleGroupingAggregatorFunction create(List channels, + DriverContext driverContext, long unitInMillis) { + return new RateDoubleGroupingAggregatorFunction(channels, RateDoubleAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + DoubleBlock valuesBlock = page.getBlock(channels.get(0)); + DoubleVector valuesVector = valuesBlock.asVector(); + LongBlock timestampsBlock = page.getBlock(channels.get(1)); + LongVector timestampsVector = timestampsBlock.asVector(); + if (timestampsVector == null) { + throw new IllegalStateException("expected @timestamp vector; but got a block"); + } + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateDoubleAggregator.combine(state, groupId, timestamps.getLong(v), values.getDouble(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + var valuePosition = groupPosition + positionOffset; + RateDoubleAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getDouble(valuePosition)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateDoubleAggregator.combine(state, groupId, timestamps.getLong(v), values.getDouble(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, DoubleVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + var valuePosition = groupPosition + positionOffset; + RateDoubleAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getDouble(valuePosition)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + Block resetsUncast = page.getBlock(channels.get(2)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + RateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + RateDoubleAggregator.DoubleRateGroupingState inState = ((RateDoubleGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + RateDoubleAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = RateDoubleAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..a98f0217ef90e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntAggregatorFunctionSupplier.java @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link RateIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final long unitInMillis; + + public RateIntAggregatorFunctionSupplier(List channels, long unitInMillis) { + this.channels = channels; + this.unitInMillis = unitInMillis; + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public RateIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return RateIntGroupingAggregatorFunction.create(channels, driverContext, unitInMillis); + } + + @Override + public String describe() { + return "rate of ints"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..df954d92a6d2a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java @@ -0,0 +1,225 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link RateIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateIntGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.INT), + new IntermediateStateDesc("resets", ElementType.DOUBLE) ); + + private final RateIntAggregator.IntRateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final long unitInMillis; + + public RateIntGroupingAggregatorFunction(List channels, + RateIntAggregator.IntRateGroupingState state, DriverContext driverContext, + long unitInMillis) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.unitInMillis = unitInMillis; + } + + public static RateIntGroupingAggregatorFunction create(List channels, + DriverContext driverContext, long unitInMillis) { + return new RateIntGroupingAggregatorFunction(channels, RateIntAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + IntBlock valuesBlock = page.getBlock(channels.get(0)); + IntVector valuesVector = valuesBlock.asVector(); + LongBlock timestampsBlock = page.getBlock(channels.get(1)); + LongVector timestampsVector = timestampsBlock.asVector(); + if (timestampsVector == null) { + throw new IllegalStateException("expected @timestamp vector; but got a block"); + } + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateIntAggregator.combine(state, groupId, timestamps.getLong(v), values.getInt(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + var valuePosition = groupPosition + positionOffset; + RateIntAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getInt(valuePosition)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateIntAggregator.combine(state, groupId, timestamps.getLong(v), values.getInt(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + var valuePosition = groupPosition + positionOffset; + RateIntAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getInt(valuePosition)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + Block resetsUncast = page.getBlock(channels.get(2)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + RateIntAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + RateIntAggregator.IntRateGroupingState inState = ((RateIntGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + RateIntAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = RateIntAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..b8100dbbe4455 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongAggregatorFunctionSupplier.java @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link RateLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final long unitInMillis; + + public RateLongAggregatorFunctionSupplier(List channels, long unitInMillis) { + this.channels = channels; + this.unitInMillis = unitInMillis; + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public RateLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return RateLongGroupingAggregatorFunction.create(channels, driverContext, unitInMillis); + } + + @Override + public String describe() { + return "rate of longs"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..fb536465ed973 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java @@ -0,0 +1,225 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link RateLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateLongGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.LONG), + new IntermediateStateDesc("resets", ElementType.DOUBLE) ); + + private final RateLongAggregator.LongRateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final long unitInMillis; + + public RateLongGroupingAggregatorFunction(List channels, + RateLongAggregator.LongRateGroupingState state, DriverContext driverContext, + long unitInMillis) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.unitInMillis = unitInMillis; + } + + public static RateLongGroupingAggregatorFunction create(List channels, + DriverContext driverContext, long unitInMillis) { + return new RateLongGroupingAggregatorFunction(channels, RateLongAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + LongBlock valuesBlock = page.getBlock(channels.get(0)); + LongVector valuesVector = valuesBlock.asVector(); + LongBlock timestampsBlock = page.getBlock(channels.get(1)); + LongVector timestampsVector = timestampsBlock.asVector(); + if (timestampsVector == null) { + throw new IllegalStateException("expected @timestamp vector; but got a block"); + } + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, LongBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateLongAggregator.combine(state, groupId, timestamps.getLong(v), values.getLong(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + var valuePosition = groupPosition + positionOffset; + RateLongAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getLong(valuePosition)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, LongBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateLongAggregator.combine(state, groupId, timestamps.getLong(v), values.getLong(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, LongVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + var valuePosition = groupPosition + positionOffset; + RateLongAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getLong(valuePosition)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + Block resetsUncast = page.getBlock(channels.get(2)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + RateLongAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + RateLongAggregator.LongRateGroupingState inState = ((RateLongGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + RateLongAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = RateLongAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st new file mode 100644 index 0000000000000..9ace663fec990 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st @@ -0,0 +1,280 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +$if(int)$ +import org.elasticsearch.compute.data.IntBlock; +$endif$ +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * A rate grouping aggregation definition for $type$. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + includeTimestamps = true, + value = { + @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), + @IntermediateState(name = "values", type = "$TYPE$_BLOCK"), + @IntermediateState(name = "resets", type = "DOUBLE") } +) +public class Rate$Type$Aggregator { + public static $Type$RateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { + // TODO: pass BlockFactory instead bigArrays so we can use the breaker + return new $Type$RateGroupingState(bigArrays, unitInMillis); + } + + public static void combine($Type$RateGroupingState current, int groupId, long timestamp, $type$ value) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + $Type$RateGroupingState current, + int groupId, + LongBlock timestamps, + $Type$Block values, + double reset, + int otherPosition + ) { + current.combine(groupId, timestamps, values, reset, otherPosition); + } + + public static void combineStates( + $Type$RateGroupingState current, + int currentGroupId, // make the stylecheck happy + $Type$RateGroupingState state, + int statePosition + ) { + throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + } + + public static Block evaluateFinal($Type$RateGroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext.blockFactory()); + } + + private static class $Type$RateState implements Accountable { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$RateState.class); + final long[] timestamps; // descending order + final $type$[] values; + double reset = 0; + + $Type$RateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new $type$[initialSize]; + } + + $Type$RateState(long[] ts, $type$[] vs) { + this.timestamps = ts; + this.values = vs; + } + + private $type$ dv($type$ v0, $type$ v1) { + // counter reset detection + return v0 > v1 ? v1 : v1 - v0; + } + + void append(long t, $type$ v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + reset += dv(v, values[1]) + dv(values[1], values[0]) - dv(v, values[0]); + timestamps[1] = t; + values[1] = v; + } + + int entries() { + return timestamps.length; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_USAGE; + } + } + + public static final class $Type$RateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray<$Type$RateState> states; + private final long unitInMillis; + private final BigArrays bigArrays; + + $Type$RateGroupingState(BigArrays bigArrays, long unitInMillis) { + this.bigArrays = bigArrays; + this.states = bigArrays.newObjectArray(1); + this.unitInMillis = unitInMillis; + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void append(int groupId, long timestamp, $type$ value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new $Type$RateState(new long[] { timestamp }, new $type$[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + state = new $Type$RateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); + states.set(groupId, state); + } else { + state.append(timestamp, value); + } + } + } + + void combine(int groupId, LongBlock timestamps, $Type$Block values, double reset, int otherPosition) { + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new $Type$RateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.get$Type$(firstIndex + i); + } + } else { + var newState = new $Type$RateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + } + state.reset += reset; + } + + void merge($Type$RateState curr, $Type$RateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + while (i < leftCount && j < rightCount) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.get$Type$(firstIndex + j); + ++j; + } + ++k; + } + if (i < leftCount) { + System.arraycopy(curr.timestamps, i, dst.timestamps, k, leftCount - i); + System.arraycopy(curr.values, i, dst.values, k, leftCount - i); + } + while (j < rightCount) { + dst.timestamps[k] = timestamps.getLong(firstIndex + j); + dst.values[k] = values.get$Type$(firstIndex + j); + ++k; + ++j; + } + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed(); + } + + @Override + public void close() { + Releasables.close(states); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + $Type$Block.Builder values = blockFactory.new$Type$BlockBuilder(positionCount * 2); + DoubleVector.Builder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for ($type$ v : state.values) { + values.append$Type$(v); + } + values.endPositionEntry(); + + resets.appendDouble(state.reset); + } else { + timestamps.appendNull(); + values.appendNull(); + resets.appendDouble(0); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + blocks[offset + 2] = resets.build().asBlock(); + } + } + + Block evaluateFinal(IntVector selected, BlockFactory blockFactory) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = blockFactory.newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null) { + rates.appendNull(); + continue; + } + int len = state.entries(); + long dt = state.timestamps[0] - state.timestamps[len - 1]; + if (dt == 0) { + // TODO: maybe issue warning when we don't have enough sample? + rates.appendNull(); + } else { + double reset = state.reset; + for (int i = 1; i < len; i++) { + if (state.values[i - 1] < state.values[i]) { + reset += state.values[i]; + } + } + double dv = state.values[0] - state.values[len - 1] + reset; + rates.appendDouble(dv * unitInMillis / dt); + } + } + return rates.build(); + } + } + + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index 9a5150bdf4fff..7bd3c426ae1c3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -26,8 +26,13 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Randomness; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.RateLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongVector; @@ -35,11 +40,13 @@ import org.elasticsearch.compute.operator.AnyOperatorTestCase; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -57,6 +64,7 @@ import java.util.Map; import java.util.function.Function; +import static org.elasticsearch.index.mapper.DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -198,6 +206,135 @@ record Doc(int host, long timestamp, long metric) {} assertThat(offset, equalTo(Math.min(limit, numDocs))); } + public void testBasicRate() { + long[] v1 = { 1, 1, 3, 0, 2, 9, 21, 3, 7, 7, 9, 12 }; + long[] t1 = { 1, 5, 11, 20, 21, 59, 88, 91, 92, 97, 99, 112 }; + + long[] v2 = { 7, 2, 0, 11, 24, 0, 4, 1, 10, 2 }; + long[] t2 = { 1, 2, 4, 5, 6, 8, 10, 11, 12, 14 }; + + long[] v3 = { 0, 1, 0, 1, 1, 4, 2, 2, 2, 2, 3, 5, 5 }; + long[] t3 = { 2, 3, 5, 7, 8, 9, 10, 12, 14, 15, 18, 20, 22 }; + List pods = List.of(new Pod("p1", t1, v1), new Pod("p2", t2, v2), new Pod("p3", t3, v3)); + long unit = between(1, 5); + Map actualRates = runRateTest(pods, TimeValue.timeValueMillis(unit)); + assertThat(actualRates, equalTo(Map.of("p1", 35.0 * unit / 111.0, "p2", 42.0 * unit / 13.0, "p3", 10.0 * unit / 20.0))); + } + + public void testRandomRate() { + int numPods = between(1, 10); + List pods = new ArrayList<>(); + Map expectedRates = new HashMap<>(); + TimeValue unit = TimeValue.timeValueSeconds(1); + for (int p = 0; p < numPods; p++) { + int numValues = between(2, 100); + long[] values = new long[numValues]; + long[] times = new long[numValues]; + long t = DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + for (int i = 0; i < numValues; i++) { + values[i] = randomIntBetween(0, 100); + t += TimeValue.timeValueSeconds(between(1, 10)).millis(); + times[i] = t; + } + Pod pod = new Pod("p" + p, times, values); + pods.add(pod); + if (numValues == 1) { + expectedRates.put(pod.name, null); + } else { + expectedRates.put(pod.name, pod.expectedRate(unit)); + } + } + Map actualRates = runRateTest(pods, unit); + assertThat(actualRates, equalTo(expectedRates)); + } + + record Pod(String name, long[] times, long[] values) { + Pod { + assert times.length == values.length : times.length + "!=" + values.length; + } + + double expectedRate(TimeValue unit) { + double dv = 0; + for (int i = 0; i < values.length - 1; i++) { + if (values[i + 1] < values[i]) { + dv += values[i]; + } + } + dv += (values[values.length - 1] - values[0]); + long dt = times[times.length - 1] - times[0]; + return (dv * unit.millis()) / dt; + } + } + + Map runRateTest(List pods, TimeValue unit) { + long unitInMillis = unit.millis(); + record Doc(String pod, long timestamp, long requests) { + + } + var sourceOperatorFactory = createTimeSeriesSourceOperator(Integer.MAX_VALUE, between(1, 100), randomBoolean(), writer -> { + List docs = new ArrayList<>(); + for (Pod pod : pods) { + for (int i = 0; i < pod.times.length; i++) { + docs.add(new Doc(pod.name, pod.times[i], pod.values[i])); + } + } + Randomness.shuffle(docs); + for (Doc doc : docs) { + writeTS(writer, doc.timestamp, new Object[] { "pod", doc.pod }, new Object[] { "requests", doc.requests }); + } + return docs.size(); + }); + var ctx = driverContext(); + HashAggregationOperator initialHash = new HashAggregationOperator( + List.of(new RateLongAggregatorFunctionSupplier(List.of(4, 2), unitInMillis).groupingAggregatorFactory(AggregatorMode.INITIAL)), + () -> BlockHash.build( + List.of(new HashAggregationOperator.GroupSpec(3, ElementType.BYTES_REF)), + ctx.blockFactory(), + randomIntBetween(1, 1000), + randomBoolean() + ), + ctx + ); + + HashAggregationOperator finalHash = new HashAggregationOperator( + List.of(new RateLongAggregatorFunctionSupplier(List.of(1, 2, 3), unitInMillis).groupingAggregatorFactory(AggregatorMode.FINAL)), + () -> BlockHash.build( + List.of(new HashAggregationOperator.GroupSpec(0, ElementType.BYTES_REF)), + ctx.blockFactory(), + randomIntBetween(1, 1000), + randomBoolean() + ), + ctx + ); + List results = new ArrayList<>(); + var requestsField = new NumberFieldMapper.NumberFieldType("requests", NumberFieldMapper.NumberType.LONG); + var podField = new KeywordFieldMapper.KeywordFieldType("pod"); + OperatorTestCase.runDriver( + new Driver( + ctx, + sourceOperatorFactory.get(ctx), + List.of( + ValuesSourceReaderOperatorTests.factory(reader, podField, ElementType.BYTES_REF).get(ctx), + ValuesSourceReaderOperatorTests.factory(reader, requestsField, ElementType.LONG).get(ctx), + initialHash, + finalHash + ), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + Map rates = new HashMap<>(); + for (Page result : results) { + BytesRefBlock keysBlock = result.getBlock(0); + DoubleBlock ratesBlock = result.getBlock(1); + for (int i = 0; i < result.getPositionCount(); i++) { + rates.put(keysBlock.getBytesRef(i, new BytesRef()).utf8ToString(), ratesBlock.getDouble(i)); + } + result.releaseBlocks(); + } + return rates; + } + @Override protected Operator.OperatorFactory simple() { return createTimeSeriesSourceOperator(1, 1, false, writer -> { From c1467e02c874b2c7128c0c0dafb57cd44dc01608 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 25 Mar 2024 13:02:36 -0700 Subject: [PATCH 49/79] Reload Lucene in time series source when thread changes (#106727) The timeseries source operator should reload Lucene structures, such as doc values and scorers, when the executing thread changes. --- ...TimeSeriesSortedSourceOperatorFactory.java | 35 +++++++++++++++---- .../TimeSeriesSortedSourceOperatorTests.java | 2 +- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java index f9df90da6aa2d..ad884538ac85f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -228,6 +228,9 @@ protected boolean lessThan(Leaf a, Leaf b) { void consume() throws IOException { if (queue != null) { currentTsid = BytesRef.deepCopyOf(queue.top().timeSeriesHash); + if (queue.size() > 0) { + queue.top().reinitializeIfNeeded(Thread.currentThread()); + } while (queue.size() > 0) { if (remainingDocs <= 0 || currentPagePos >= maxPageSize) { break; @@ -249,12 +252,14 @@ void consume() throws IOException { newTop = queue.size() > 0 ? queue.top() : null; } if (newTop != null && newTop.timeSeriesHash.equals(currentTsid) == false) { + newTop.reinitializeIfNeeded(Thread.currentThread()); globalTsidOrd++; currentTsid = BytesRef.deepCopyOf(newTop.timeSeriesHash); } } } else { // Only one segment, so no need to use priority queue and use segment ordinals as tsid ord. + leaf.reinitializeIfNeeded(Thread.currentThread()); while (leaf.nextDoc()) { tsOrdBuilder.appendInt(leaf.timeSeriesHashOrd); timestampIntervalBuilder.appendLong(leaf.timestamp); @@ -280,37 +285,55 @@ boolean completed() { static class Leaf { private final int segmentOrd; - private final SortedDocValues tsids; - private final SortedNumericDocValues timestamps; - private final DocIdSetIterator iterator; + private final Weight weight; + private final LeafReaderContext leaf; + private SortedDocValues tsids; + private SortedNumericDocValues timestamps; + private DocIdSetIterator iterator; + private Thread createdThread; private long timestamp; private int timeSeriesHashOrd; private BytesRef timeSeriesHash; + private int docID = -1; Leaf(Weight weight, LeafReaderContext leaf) throws IOException { this.segmentOrd = leaf.ord; + this.weight = weight; + this.leaf = leaf; + this.createdThread = Thread.currentThread(); tsids = leaf.reader().getSortedDocValues("_tsid"); timestamps = leaf.reader().getSortedNumericDocValues("@timestamp"); iterator = weight.scorer(leaf).iterator(); } boolean nextDoc() throws IOException { - int docID = iterator.nextDoc(); + docID = iterator.nextDoc(); if (docID == DocIdSetIterator.NO_MORE_DOCS) { return false; } - boolean advanced = tsids.advanceExact(iterator.docID()); + boolean advanced = tsids.advanceExact(docID); assert advanced; timeSeriesHashOrd = tsids.ordValue(); timeSeriesHash = tsids.lookupOrd(timeSeriesHashOrd); - advanced = timestamps.advanceExact(iterator.docID()); + advanced = timestamps.advanceExact(docID); assert advanced; timestamp = timestamps.nextValue(); return true; } + void reinitializeIfNeeded(Thread executingThread) throws IOException { + if (executingThread != createdThread) { + tsids = leaf.reader().getSortedDocValues("_tsid"); + timestamps = leaf.reader().getSortedNumericDocValues("@timestamp"); + iterator = weight.scorer(leaf).iterator(); + if (docID != -1) { + iterator.advance(docID); + } + createdThread = executingThread; + } + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index 7bd3c426ae1c3..16340909a4fd3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -144,7 +144,7 @@ public void testLimit() { public void testRandom() { record Doc(int host, long timestamp, long metric) {} - int numDocs = between(1, 1000); + int numDocs = between(1, 5000); List docs = new ArrayList<>(); Map timestamps = new HashMap<>(); long t0 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); From 61982c461f995416d004fc482ac843322db45adc Mon Sep 17 00:00:00 2001 From: Larisa Motova Date: Mon, 25 Mar 2024 10:26:00 -1000 Subject: [PATCH 50/79] Improve error message when rolling over DS alias (#106708) Currently a null pointer exception is thrown when trying to execute a rollover on a data stream alias. This commit checks before trying to execute if we're attempting to rollover a data stream alias or not. Fixes #106137 --------- Co-authored-by: James Baiera --- docs/changelog/106708.yaml | 6 +++ .../rollover/TransportRolloverAction.java | 10 +++++ .../TransportRolloverActionTests.java | 41 +++++++++++++++++++ 3 files changed, 57 insertions(+) create mode 100644 docs/changelog/106708.yaml diff --git a/docs/changelog/106708.yaml b/docs/changelog/106708.yaml new file mode 100644 index 0000000000000..b8fdd37e5f03f --- /dev/null +++ b/docs/changelog/106708.yaml @@ -0,0 +1,6 @@ +pr: 106708 +summary: Improve error message when rolling over DS alias +area: Data streams +type: bug +issues: + - 106137 diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c295ccde01623..774bfae53fb94 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -215,6 +215,16 @@ protected void masterOperation( } } + final IndexAbstraction rolloverTargetAbstraction = clusterState.metadata() + .getIndicesLookup() + .get(rolloverRequest.getRolloverTarget()); + if (rolloverTargetAbstraction.getType() == IndexAbstraction.Type.ALIAS && rolloverTargetAbstraction.isDataStreamRelated()) { + listener.onFailure( + new IllegalStateException("Aliases to data streams cannot be rolled over. Please rollover the data stream itself.") + ); + return; + } + IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(rolloverRequest.getRolloverTarget()) .clear() .indicesOptions(IndicesOptions.fromOptions(true, false, true, true)) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 814cff37e0708..db156f983220e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -552,6 +552,47 @@ public void testLazyRolloverFails() throws Exception { } } + public void testRolloverAliasToDataStreamFails() throws Exception { + final IndexMetadata backingIndexMetadata = IndexMetadata.builder(".ds-logs-ds-000001") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + final DataStream dataStream = new DataStream( + "logs-ds", + List.of(backingIndexMetadata.getIndex()), + 1, + Map.of(), + false, + false, + false, + false, + IndexMode.STANDARD + ); + Metadata.Builder metadataBuilder = Metadata.builder().put(backingIndexMetadata, false).put(dataStream); + metadataBuilder.put("ds-alias", dataStream.getName(), true, null); + final ClusterState stateBefore = ClusterState.builder(ClusterName.DEFAULT).metadata(metadataBuilder).build(); + + final TransportRolloverAction transportRolloverAction = new TransportRolloverAction( + mock(TransportService.class), + mockClusterService, + mockThreadPool, + mockActionFilters, + mockIndexNameExpressionResolver, + rolloverService, + mockClient, + mockAllocationService, + mockMetadataDataStreamService, + dataStreamAutoShardingService + ); + + final PlainActionFuture future = new PlainActionFuture<>(); + RolloverRequest rolloverRequest = new RolloverRequest("ds-alias", null); + transportRolloverAction.masterOperation(mock(CancellableTask.class), rolloverRequest, stateBefore, future); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, future::actionGet); + assertThat(illegalStateException.getMessage(), containsString("Aliases to data streams cannot be rolled over.")); + } + private IndicesStatsResponse createIndicesStatResponse(String indexName, long totalDocs, long primariesDocs) { final CommonStats primaryStats = mock(CommonStats.class); when(primaryStats.getDocs()).thenReturn(new DocsStats(primariesDocs, 0, between(1, 10000))); From dc40eef38b0eee58ae1ea9572532fbd136413308 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Mon, 25 Mar 2024 22:16:00 +0100 Subject: [PATCH 51/79] [Inference API] OpenAI Completions API (#106476) --- .../org/elasticsearch/TransportVersions.java | 1 + .../org/elasticsearch/inference/TaskType.java | 1 + .../results/ChatCompletionResults.java | 124 ++++++++ .../action/openai/OpenAiActionCreator.java | 8 + .../action/openai/OpenAiActionVisitor.java | 3 + .../openai/OpenAiChatCompletionAction.java | 60 ++++ ...nAiCompletionExecutableRequestCreator.java | 62 ++++ .../OpenAiChatCompletionResponseHandler.java | 25 ++ .../openai/OpenAiResponseHandler.java | 7 +- .../openai/OpenAiChatCompletionRequest.java | 96 ++++++ .../OpenAiChatCompletionRequestEntity.java | 72 +++++ .../openai/OpenAiEmbeddingsRequest.java | 2 +- .../request/openai/OpenAiRequest.java | 12 + .../external/request/openai/OpenAiUtils.java | 4 + .../OpenAiChatCompletionResponseEntity.java | 102 ++++++ .../services/openai/OpenAiService.java | 11 +- .../services/openai/OpenAiServiceFields.java | 14 + .../completion/OpenAiChatCompletionModel.java | 84 +++++ ...enAiChatCompletionRequestTaskSettings.java | 52 +++ .../OpenAiChatCompletionServiceSettings.java | 176 +++++++++++ .../OpenAiChatCompletionTaskSettings.java | 106 +++++++ .../openai/OpenAiActionCreatorTests.java | 264 ++++++++++++++++ .../OpenAiChatCompletionActionTests.java | 297 ++++++++++++++++++ ...nAiChatCompletionResponseHandlerTests.java | 68 ++++ ...penAiChatCompletionRequestEntityTests.java | 53 ++++ .../OpenAiChatCompletionRequestTests.java | 132 ++++++++ ...enAiChatCompletionResponseEntityTests.java | 215 +++++++++++++ .../results/ChatCompletionResultsTests.java | 117 +++++++ .../services/openai/OpenAiServiceTests.java | 63 ++++ .../OpenAiChatCompletionModelTests.java | 66 ++++ ...hatCompletionRequestTaskSettingsTests.java | 47 +++ ...nAiChatCompletionServiceSettingsTests.java | 193 ++++++++++++ ...OpenAiChatCompletionTaskSettingsTests.java | 86 +++++ 33 files changed, 2620 insertions(+), 3 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionExecutableRequestCreator.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandler.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandlerTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/ChatCompletionResultsTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index b5070c5cbd065..c23d961119a74 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -151,6 +151,7 @@ static TransportVersion def(int id) { public static final TransportVersion AUTO_SHARDING_ROLLOVER_CONDITION = def(8_611_00_0); public static final TransportVersion KNN_QUERY_VECTOR_BUILDER = def(8_612_00_0); public static final TransportVersion USE_DATA_STREAM_GLOBAL_RETENTION = def(8_613_00_0); + public static final TransportVersion ML_COMPLETION_INFERENCE_SERVICE_ADDED = def(8_614_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/inference/TaskType.java b/server/src/main/java/org/elasticsearch/inference/TaskType.java index 5afedee873145..1e301ad796e90 100644 --- a/server/src/main/java/org/elasticsearch/inference/TaskType.java +++ b/server/src/main/java/org/elasticsearch/inference/TaskType.java @@ -21,6 +21,7 @@ public enum TaskType implements Writeable { TEXT_EMBEDDING, SPARSE_EMBEDDING, + COMPLETION, ANY { @Override public boolean isAnyOrSame(TaskType other) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java new file mode 100644 index 0000000000000..50ca46d85190f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Writes a chat completion result in the following json format: + * { + * "completion": [ + * { + * "result": "some result 1" + * }, + * { + * "result": "some result 2" + * } + * ] + * } + * + */ +public record ChatCompletionResults(List results) implements InferenceServiceResults { + + public static final String NAME = "chat_completion_service_results"; + public static final String COMPLETION = TaskType.COMPLETION.name().toLowerCase(Locale.ROOT); + + public ChatCompletionResults(StreamInput in) throws IOException { + this(in.readCollectionAsList(Result::new)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(COMPLETION); + for (Result result : results) { + result.toXContent(builder, params); + } + builder.endArray(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(results); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public List transformToCoordinationFormat() { + throw new UnsupportedOperationException(); + } + + @Override + public List transformToLegacyFormat() { + throw new UnsupportedOperationException(); + } + + public List getResults() { + return results; + } + + @Override + public Map asMap() { + Map map = new LinkedHashMap<>(); + map.put(COMPLETION, results.stream().map(Result::asMap).collect(Collectors.toList())); + + return map; + } + + public record Result(String content) implements Writeable, ToXContentObject { + + public static final String RESULT = "result"; + + public Result(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(content); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(RESULT, content); + builder.endObject(); + + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public Map asMap() { + return Map.of(RESULT, content); + } + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java index 94583c634fb26..dc89240862e6a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java @@ -10,6 +10,7 @@ import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; import java.util.Map; @@ -33,4 +34,11 @@ public ExecutableAction create(OpenAiEmbeddingsModel model, Map return new OpenAiEmbeddingsAction(sender, overriddenModel, serviceComponents); } + + @Override + public ExecutableAction create(OpenAiChatCompletionModel model, Map taskSettings) { + var overriddenModel = OpenAiChatCompletionModel.of(model, taskSettings); + + return new OpenAiChatCompletionAction(sender, overriddenModel, serviceComponents); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java index 52d9f2e2132a7..0f26e054d734b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java @@ -8,10 +8,13 @@ package org.elasticsearch.xpack.inference.external.action.openai; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; import java.util.Map; public interface OpenAiActionVisitor { ExecutableAction create(OpenAiEmbeddingsModel model, Map taskSettings); + + ExecutableAction create(OpenAiChatCompletionModel model, Map taskSettings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java new file mode 100644 index 0000000000000..31fd6a35ef26b --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.OpenAiCompletionExecutableRequestCreator; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class OpenAiChatCompletionAction implements ExecutableAction { + + private final String errorMessage; + private final OpenAiCompletionExecutableRequestCreator requestCreator; + + private final Sender sender; + + public OpenAiChatCompletionAction(Sender sender, OpenAiChatCompletionModel model, ServiceComponents serviceComponents) { + Objects.requireNonNull(serviceComponents); + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); + this.requestCreator = new OpenAiCompletionExecutableRequestCreator(model); + this.errorMessage = constructFailedToSendRequestMessage(model.getServiceSettings().uri(), "OpenAI chat completions"); + } + + @Override + public void execute(List input, ActionListener listener) { + if (input.size() > 1) { + listener.onFailure(new ElasticsearchStatusException("OpenAI completions only accepts 1 input", RestStatus.BAD_REQUEST)); + return; + } + + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + sender.send(requestCreator, input, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionExecutableRequestCreator.java new file mode 100644 index 0000000000000..44ab670843335 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionExecutableRequestCreator.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.openai.OpenAiChatCompletionResponseHandler; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.openai.OpenAiChatCompletionResponseEntity; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class OpenAiCompletionExecutableRequestCreator implements ExecutableRequestCreator { + + private static final Logger logger = LogManager.getLogger(OpenAiCompletionExecutableRequestCreator.class); + + private static final ResponseHandler HANDLER = createCompletionHandler(); + + private final OpenAiChatCompletionModel model; + + private final OpenAiAccount account; + + public OpenAiCompletionExecutableRequestCreator(OpenAiChatCompletionModel model) { + this.model = Objects.requireNonNull(model); + this.account = new OpenAiAccount( + this.model.getServiceSettings().uri(), + this.model.getServiceSettings().organizationId(), + this.model.getSecretSettings().apiKey() + ); + } + + @Override + public Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ) { + OpenAiChatCompletionRequest request = new OpenAiChatCompletionRequest(account, input, model); + + return new ExecutableInferenceRequest(requestSender, logger, request, context, HANDLER, hasRequestCompletedFunction, listener); + } + + private static ResponseHandler createCompletionHandler() { + return new OpenAiChatCompletionResponseHandler("openai completion", OpenAiChatCompletionResponseEntity::fromResponse); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandler.java new file mode 100644 index 0000000000000..5924356e610a3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandler.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseParser; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; + +public class OpenAiChatCompletionResponseHandler extends OpenAiResponseHandler { + public OpenAiChatCompletionResponseHandler(String requestType, ResponseParser parseFunction) { + super(requestType, parseFunction); + } + + @Override + RetryException buildExceptionHandling429(Request request, HttpResult result) { + // We don't retry, if the chat completion input is too large + return new RetryException(false, buildError(RATE_LIMIT, request, result)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiResponseHandler.java index 10083d3fd4667..db7ca8d6bdc63 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiResponseHandler.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiResponseHandler.java @@ -35,6 +35,7 @@ public class OpenAiResponseHandler extends BaseResponseHandler { static final String REMAINING_TOKENS = "x-ratelimit-remaining-tokens"; static final String CONTENT_TOO_LARGE_MESSAGE = "Please reduce your prompt; or completion length."; + static final String OPENAI_SERVER_BUSY = "Received a server busy error status code"; public OpenAiResponseHandler(String requestType, ResponseParser parseFunction) { @@ -70,7 +71,7 @@ void checkForFailureStatusCode(Request request, HttpResult result) throws RetryE } else if (statusCode > 500) { throw new RetryException(false, buildError(SERVER_ERROR, request, result)); } else if (statusCode == 429) { - throw new RetryException(true, buildError(buildRateLimitErrorMessage(result), request, result)); + throw buildExceptionHandling429(request, result); } else if (isContentTooLarge(result)) { throw new ContentTooLargeException(buildError(CONTENT_TOO_LARGE, request, result)); } else if (statusCode == 401) { @@ -82,6 +83,10 @@ void checkForFailureStatusCode(Request request, HttpResult result) throws RetryE } } + RetryException buildExceptionHandling429(Request request, HttpResult result) { + return new RetryException(true, buildError(buildRateLimitErrorMessage(result), request, result)); + } + private static boolean isContentTooLarge(HttpResult result) { int statusCode = result.response().getStatusLine().getStatusCode(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequest.java new file mode 100644 index 0000000000000..e53d4e7362735 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequest.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.entity.ByteArrayEntity; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.buildUri; +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.createOrgHeader; + +public class OpenAiChatCompletionRequest implements OpenAiRequest { + + private final OpenAiAccount account; + private final List input; + private final URI uri; + private final OpenAiChatCompletionModel model; + + public OpenAiChatCompletionRequest(OpenAiAccount account, List input, OpenAiChatCompletionModel model) { + this.account = Objects.requireNonNull(account); + this.input = Objects.requireNonNull(input); + this.uri = buildUri(this.account.url(), "OpenAI", OpenAiChatCompletionRequest::buildDefaultUri); + this.model = Objects.requireNonNull(model); + } + + @Override + public HttpRequest createHttpRequest() { + HttpPost httpPost = new HttpPost(uri); + + ByteArrayEntity byteEntity = new ByteArrayEntity( + Strings.toString( + new OpenAiChatCompletionRequestEntity(input, model.getServiceSettings().modelId(), model.getTaskSettings().user()) + ).getBytes(StandardCharsets.UTF_8) + ); + httpPost.setEntity(byteEntity); + + httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()); + httpPost.setHeader(createAuthBearerHeader(account.apiKey())); + + var org = account.organizationId(); + if (org != null) { + httpPost.setHeader(createOrgHeader(org)); + } + + return new HttpRequest(httpPost, getInferenceEntityId()); + } + + @Override + public URI getURI() { + return uri; + } + + @Override + public Request truncate() { + // No truncation for OpenAI chat completions + return this; + } + + @Override + public boolean[] getTruncationInfo() { + // No truncation for OpenAI chat completions + return null; + } + + @Override + public String getInferenceEntityId() { + return model.getInferenceEntityId(); + } + + // default for testing + static URI buildDefaultUri() throws URISyntaxException { + return new URIBuilder().setScheme("https") + .setHost(OpenAiUtils.HOST) + .setPathSegments(OpenAiUtils.VERSION_1, OpenAiUtils.CHAT_PATH, OpenAiUtils.COMPLETIONS_PATH) + .build(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntity.java new file mode 100644 index 0000000000000..c9aa225c77941 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntity.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class OpenAiChatCompletionRequestEntity implements ToXContentObject { + + private static final String MESSAGES_FIELD = "messages"; + private static final String MODEL_FIELD = "model"; + + private static final String NUMBER_OF_RETURNED_CHOICES_FIELD = "n"; + + private static final String ROLE_FIELD = "role"; + private static final String USER_FIELD = "user"; + private static final String CONTENT_FIELD = "content"; + + private final List messages; + private final String model; + + private final String user; + + public OpenAiChatCompletionRequestEntity(List messages, String model, String user) { + Objects.requireNonNull(messages); + Objects.requireNonNull(model); + + this.messages = messages; + this.model = model; + this.user = user; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(MESSAGES_FIELD); + { + for (String message : messages) { + builder.startObject(); + + { + builder.field(ROLE_FIELD, USER_FIELD); + builder.field(CONTENT_FIELD, message); + } + + builder.endObject(); + } + } + builder.endArray(); + + builder.field(MODEL_FIELD, model); + builder.field(NUMBER_OF_RETURNED_CHOICES_FIELD, 1); + + if (Strings.isNullOrEmpty(user) == false) { + builder.field(USER_FIELD, user); + } + + builder.endObject(); + + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java index 9893b556e1a47..df5d3024fd483 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java @@ -28,7 +28,7 @@ import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.createOrgHeader; -public class OpenAiEmbeddingsRequest implements Request { +public class OpenAiEmbeddingsRequest implements OpenAiRequest { private final Truncator truncator; private final OpenAiAccount account; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiRequest.java new file mode 100644 index 0000000000000..7a630108cfcdf --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiRequest.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.xpack.inference.external.request.Request; + +public interface OpenAiRequest extends Request {} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java index a6479b3ecde25..1199f8a4f0230 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java @@ -14,6 +14,10 @@ public class OpenAiUtils { public static final String HOST = "api.openai.com"; public static final String VERSION_1 = "v1"; public static final String EMBEDDINGS_PATH = "embeddings"; + + public static final String CHAT_PATH = "chat"; + + public static final String COMPLETIONS_PATH = "completions"; public static final String ORGANIZATION_HEADER = "OpenAI-Organization"; public static Header createOrgHeader(String org) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntity.java new file mode 100644 index 0000000000000..daf4e6578240e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntity.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.moveToFirstToken; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.positionParserAtTokenAfterField; + +public class OpenAiChatCompletionResponseEntity { + + private static final String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in OpenAI chat completions response"; + + /** + * Parses the OpenAI chat completion response. + * For a request like: + * + *
    +     *     
    +     *         {
    +     *             "inputs": ["Please summarize this text: some text", "Answer the following question: Question"]
    +     *         }
    +     *     
    +     * 
    + * + * The response would look like: + * + *
    +     *     
    +     *         {
    +     *              "id": "chatcmpl-123",
    +     *              "object": "chat.completion",
    +     *              "created": 1677652288,
    +     *              "model": "gpt-3.5-turbo-0613",
    +     *              "system_fingerprint": "fp_44709d6fcb",
    +     *              "choices": [
    +     *                  {
    +     *                      "index": 0,
    +     *                      "message": {
    +     *                          "role": "assistant",
    +     *                          "content": "\n\nHello there, how may I assist you today?",
    +    *                          },
    +     *                      "logprobs": null,
    +     *                      "finish_reason": "stop"
    +     *                  }
    +     *              ],
    +     *              "usage": {
    +     *                "prompt_tokens": 9,
    +     *                "completion_tokens": 12,
    +     *                "total_tokens": 21
    +     *              }
    +     *          }
    +     *     
    +     * 
    + */ + + public static ChatCompletionResults fromResponse(Request request, HttpResult response) throws IOException { + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) { + moveToFirstToken(jsonParser); + + XContentParser.Token token = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + positionParserAtTokenAfterField(jsonParser, "choices", FAILED_TO_FIND_FIELD_TEMPLATE); + + jsonParser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, jsonParser.currentToken(), jsonParser); + + positionParserAtTokenAfterField(jsonParser, "message", FAILED_TO_FIND_FIELD_TEMPLATE); + + token = jsonParser.currentToken(); + + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + positionParserAtTokenAfterField(jsonParser, "content", FAILED_TO_FIND_FIELD_TEMPLATE); + + XContentParser.Token contentToken = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.VALUE_STRING, contentToken, jsonParser); + String content = jsonParser.text(); + + return new ChatCompletionResults(List.of(new ChatCompletionResults.Result(content))); + } + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 130928b17ff8d..83e5eef45fda4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsServiceSettings; @@ -127,6 +128,14 @@ private static OpenAiModel createModel( secretSettings, context ); + case COMPLETION -> new OpenAiChatCompletionModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings + ); default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); }; } @@ -254,7 +263,7 @@ private OpenAiEmbeddingsModel updateModelWithEmbeddingDetails(OpenAiEmbeddingsMo @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_12_0; + return TransportVersions.ML_COMPLETION_INFERENCE_SERVICE_ADDED; } /** diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java new file mode 100644 index 0000000000000..1e2353f901705 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +public class OpenAiServiceFields { + + public static final String USER = "user"; + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java new file mode 100644 index 0000000000000..467c4f44f34fe --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; +import org.elasticsearch.xpack.inference.services.openai.OpenAiModel; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import java.util.Map; + +public class OpenAiChatCompletionModel extends OpenAiModel { + + public static OpenAiChatCompletionModel of(OpenAiChatCompletionModel model, Map taskSettings) { + if (taskSettings == null || taskSettings.isEmpty()) { + return model; + } + + var requestTaskSettings = OpenAiChatCompletionRequestTaskSettings.fromMap(taskSettings); + return new OpenAiChatCompletionModel(model, OpenAiChatCompletionTaskSettings.of(model.getTaskSettings(), requestTaskSettings)); + } + + public OpenAiChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + @Nullable Map secrets + ) { + this( + inferenceEntityId, + taskType, + service, + OpenAiChatCompletionServiceSettings.fromMap(serviceSettings), + OpenAiChatCompletionTaskSettings.fromMap(taskSettings), + DefaultSecretSettings.fromMap(secrets) + ); + } + + OpenAiChatCompletionModel( + String modelId, + TaskType taskType, + String service, + OpenAiChatCompletionServiceSettings serviceSettings, + OpenAiChatCompletionTaskSettings taskSettings, + @Nullable DefaultSecretSettings secrets + ) { + super(new ModelConfigurations(modelId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secrets)); + } + + private OpenAiChatCompletionModel(OpenAiChatCompletionModel originalModel, OpenAiChatCompletionTaskSettings taskSettings) { + super(originalModel, taskSettings); + } + + @Override + public OpenAiChatCompletionServiceSettings getServiceSettings() { + return (OpenAiChatCompletionServiceSettings) super.getServiceSettings(); + } + + @Override + public OpenAiChatCompletionTaskSettings getTaskSettings() { + return (OpenAiChatCompletionTaskSettings) super.getTaskSettings(); + } + + @Override + public DefaultSecretSettings getSecretSettings() { + return (DefaultSecretSettings) super.getSecretSettings(); + } + + @Override + public ExecutableAction accept(OpenAiActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java new file mode 100644 index 0000000000000..8029d8579baba --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; + +/** + * This class handles extracting OpenAI task settings from a request. The difference between this class and + * {@link OpenAiChatCompletionTaskSettings} is that this class considers all fields as optional. It will not throw an error if a field + * is missing. This allows overriding persistent task settings. + * @param user a unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse + */ +public record OpenAiChatCompletionRequestTaskSettings(@Nullable String user) { + + public static final OpenAiChatCompletionRequestTaskSettings EMPTY_SETTINGS = new OpenAiChatCompletionRequestTaskSettings(null); + + /** + * Extracts the task settings from a map. All settings are considered optional and the absence of a setting + * does not throw an error. + * + * @param map the settings received from a request + * @return a {@link OpenAiChatCompletionRequestTaskSettings} + */ + public static OpenAiChatCompletionRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return OpenAiChatCompletionRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + String user = extractOptionalString(map, USER, ModelConfigurations.TASK_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiChatCompletionRequestTaskSettings(user); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java new file mode 100644 index 0000000000000..0150d75b7037e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.net.URI; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; +import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.createOptionalUri; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; + +/** + * Defines the service settings for interacting with OpenAI's chat completion models. + */ +public class OpenAiChatCompletionServiceSettings implements ServiceSettings { + + public static final String NAME = "openai_completion_service_settings"; + + static final String ORGANIZATION = "organization_id"; + + public static OpenAiChatCompletionServiceSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String modelId = extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + String organizationId = extractOptionalString(map, ORGANIZATION, ModelConfigurations.SERVICE_SETTINGS, validationException); + + String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + URI uri = convertToUri(url, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + + Integer maxInputTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiChatCompletionServiceSettings(modelId, uri, organizationId, maxInputTokens); + } + + private final String modelId; + + private final URI uri; + + private final String organizationId; + + private final Integer maxInputTokens; + + public OpenAiChatCompletionServiceSettings( + String modelId, + @Nullable URI uri, + @Nullable String organizationId, + @Nullable Integer maxInputTokens + ) { + this.modelId = modelId; + this.uri = uri; + this.organizationId = organizationId; + this.maxInputTokens = maxInputTokens; + } + + OpenAiChatCompletionServiceSettings( + String modelId, + @Nullable String uri, + @Nullable String organizationId, + @Nullable Integer maxInputTokens + ) { + this(modelId, createOptionalUri(uri), organizationId, maxInputTokens); + } + + public OpenAiChatCompletionServiceSettings(StreamInput in) throws IOException { + this.modelId = in.readString(); + this.uri = createOptionalUri(in.readOptionalString()); + this.organizationId = in.readOptionalString(); + this.maxInputTokens = in.readOptionalVInt(); + } + + public String modelId() { + return modelId; + } + + public URI uri() { + return uri; + } + + public String organizationId() { + return organizationId; + } + + public Integer maxInputTokens() { + return maxInputTokens; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + { + builder.field(MODEL_ID, modelId); + + if (uri != null) { + builder.field(URL, uri.toString()); + } + + if (organizationId != null) { + builder.field(ORGANIZATION, organizationId); + } + + if (maxInputTokens != null) { + builder.field(MAX_INPUT_TOKENS, maxInputTokens); + } + } + + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_COMPLETION_INFERENCE_SERVICE_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(modelId); + out.writeOptionalString(uri != null ? uri.toString() : null); + out.writeOptionalString(organizationId); + out.writeOptionalVInt(maxInputTokens); + } + + @Override + public ToXContentObject getFilteredXContentObject() { + return this; + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + OpenAiChatCompletionServiceSettings that = (OpenAiChatCompletionServiceSettings) object; + return Objects.equals(modelId, that.modelId) + && Objects.equals(uri, that.uri) + && Objects.equals(organizationId, that.organizationId) + && Objects.equals(maxInputTokens, that.maxInputTokens); + } + + @Override + public int hashCode() { + return Objects.hash(modelId, uri, organizationId, maxInputTokens); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java new file mode 100644 index 0000000000000..fb10d959087de --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; + +public class OpenAiChatCompletionTaskSettings implements TaskSettings { + + public static final String NAME = "openai_completion_task_settings"; + + public static final String USER = "user"; + + public static OpenAiChatCompletionTaskSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String user = extractOptionalString(map, USER, ModelConfigurations.TASK_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiChatCompletionTaskSettings(user); + } + + private final String user; + + public OpenAiChatCompletionTaskSettings(@Nullable String user) { + this.user = user; + } + + public OpenAiChatCompletionTaskSettings(StreamInput in) throws IOException { + this.user = in.readOptionalString(); + } + + public static OpenAiChatCompletionTaskSettings of( + OpenAiChatCompletionTaskSettings originalSettings, + OpenAiChatCompletionRequestTaskSettings requestSettings + ) { + var userToUse = requestSettings.user() == null ? originalSettings.user : requestSettings.user(); + return new OpenAiChatCompletionTaskSettings(userToUse); + } + + public String user() { + return user; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (user != null) { + builder.field(USER, user); + } + + builder.endObject(); + + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_COMPLETION_INFERENCE_SERVICE_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(user); + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + OpenAiChatCompletionTaskSettings that = (OpenAiChatCompletionTaskSettings) object; + return Objects.equals(user, that.user); + } + + @Override + public int hashCode() { + return Objects.hash(user); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java index a844061fa48e1..9b14cf259522c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java @@ -27,17 +27,21 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.action.openai.OpenAiChatCompletionActionTests.buildExpectedChatCompletionResultMap; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModelTests.createChatCompletionModel; +import static org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionRequestTaskSettingsTests.getChatCompletionRequestTaskSettingsMap; import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsRequestTaskSettingsTests.getRequestTaskSettingsMap; import static org.hamcrest.Matchers.equalTo; @@ -283,6 +287,266 @@ public void testCreate_OpenAiEmbeddingsModel_FailsFromInvalidResponseFormat() th } } + public void testCreate_OpenAiChatCompletionModel() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello there, how may I assist you today?" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createChatCompletionModel(getUrl(webServer), "org", "secret", "model", "user"); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getChatCompletionRequestTaskSettingsMap("overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectedChatCompletionResultMap(List.of("Hello there, how may I assist you today?")))); + assertThat(webServer.requests(), hasSize(1)); + + var request = webServer.requests().get(0); + + assertNull(request.getUri().getQuery()); + assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(request.getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(request.getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(4)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + assertThat(requestMap.get("n"), is(1)); + } + } + + public void testCreate_OpenAiChatCompletionModel_WithoutUser() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello there, how may I assist you today?" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createChatCompletionModel(getUrl(webServer), "org", "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getChatCompletionRequestTaskSettingsMap(null); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectedChatCompletionResultMap(List.of("Hello there, how may I assist you today?")))); + assertThat(webServer.requests(), hasSize(1)); + + var request = webServer.requests().get(0); + + assertNull(request.getUri().getQuery()); + assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(request.getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(request.getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("n"), is(1)); + } + } + + public void testCreate_OpenAiChatCompletionModel_WithoutOrganization() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello there, how may I assist you today?" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createChatCompletionModel(getUrl(webServer), null, "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getChatCompletionRequestTaskSettingsMap("overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectedChatCompletionResultMap(List.of("Hello there, how may I assist you today?")))); + assertThat(webServer.requests(), hasSize(1)); + + var request = webServer.requests().get(0); + + assertNull(request.getUri().getQuery()); + assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(request.getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(request.getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(4)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + assertThat(requestMap.get("n"), is(1)); + } + } + + public void testCreate_OpenAiChatCompletionModel_FailsFromInvalidResponseFormat() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", + "data_does_not_exist": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello there, how may I assist you today?" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createChatCompletionModel(getUrl(webServer), null, "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getChatCompletionRequestTaskSettingsMap("overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is(format("Failed to send OpenAI chat completions request to [%s]", getUrl(webServer))) + ); + assertThat( + thrownException.getCause().getMessage(), + is("Failed to find required field [choices] in OpenAI chat completions response") + ); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(4)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + assertThat(requestMap.get("n"), is(1)); + } + } + public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From413StatusCode() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java new file mode 100644 index 0000000000000..15998469d08d0 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java @@ -0,0 +1,297 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockRequest; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.hamcrest.CoreMatchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModelTests.createChatCompletionModel; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class OpenAiChatCompletionActionTests extends ESTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testExecute_ReturnsSuccessfulResponse() throws IOException { + var senderFactory = new HttpRequestSender.Factory(createWithEmptySettings(threadPool), clientManager, mockClusterServiceEmpty()); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "result content" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectedChatCompletionResultMap(List.of("result content")))); + assertThat(webServer.requests(), hasSize(1)); + + MockRequest request = webServer.requests().get(0); + + assertNull(request.getUri().getQuery()); + assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(request.getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(request.getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(request.getBody()); + assertThat(requestMap.size(), is(4)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + assertThat(requestMap.get("n"), is(1)); + } + } + + public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOException { + try (var sender = mock(Sender.class)) { + var thrownException = expectThrows( + IllegalArgumentException.class, + () -> createAction("^^", "org", "secret", "model", "user", sender) + ); + assertThat(thrownException.getMessage(), is("unable to parse url [^^]")); + } + } + + public void testExecute_ThrowsElasticsearchException() { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("failed")); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI chat completions request to [%s]", getUrl(webServer)))); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled_WhenUrlIsNull() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any(), any()); + + var action = createAction(null, "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("Failed to send OpenAI chat completions request")); + } + + public void testExecute_ThrowsException() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI chat completions request to [%s]", getUrl(webServer)))); + } + + public void testExecute_ThrowsExceptionWithNullUrl() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); + + var action = createAction(null, "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("Failed to send OpenAI chat completions request")); + } + + public void testExecute_ThrowsException_WhenInputIsGreaterThanOne() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello there, how may I assist you today?" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc", "def"), listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), CoreMatchers.is("OpenAI completions only accepts 1 input")); + assertThat(thrownException.status(), CoreMatchers.is(RestStatus.BAD_REQUEST)); + } + } + + public static Map buildExpectedChatCompletionResultMap(List results) { + return Map.of( + ChatCompletionResults.COMPLETION, + results.stream().map(result -> Map.of(ChatCompletionResults.Result.RESULT, result)).toList() + ); + } + + private OpenAiChatCompletionAction createAction( + String url, + String org, + String apiKey, + String modelName, + @Nullable String user, + Sender sender + ) { + var model = createChatCompletionModel(url, org, apiKey, modelName, user); + + return new OpenAiChatCompletionAction(sender, model, createWithEmptySettings(threadPool)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandlerTests.java new file mode 100644 index 0000000000000..5c3585b630073 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiChatCompletionResponseHandlerTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.apache.http.Header; +import org.apache.http.HeaderElement; +import org.apache.http.HttpResponse; +import org.apache.http.StatusLine; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.RequestTests; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.core.Is.is; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OpenAiChatCompletionResponseHandlerTests extends ESTestCase { + + public void testHandle429InputAndOutputTokensTooLarge_ThrowWithoutRetrying() { + String responseBody = """ + { + "error": { + "message": "The input or output tokens must be reduced in order to run successfully", + "type": "content_too_large", + "param": null, + "code": null + } + } + """; + ByteArrayInputStream responseBodyStream = new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)); + + var header = mock(Header.class); + when(header.getElements()).thenReturn(new HeaderElement[] {}); + + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(429); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getFirstHeader(anyString())).thenReturn(header); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + + var mockRequest = RequestTests.mockRequest("id"); + var httpResult = new HttpResult(httpResponse, responseBodyStream.readAllBytes()); + var handler = new OpenAiChatCompletionResponseHandler("", (request, result) -> null); + + var retryException = expectThrows(RetryException.class, () -> handler.checkForFailureStatusCode(mockRequest, httpResult)); + + assertFalse(retryException.shouldRetry()); + assertThat( + retryException.getCause().getMessage(), + is( + "Received a rate limit status code for request from inference entity id [id] status [429]. " + + "Error message: [The input or output tokens must be reduced in order to run successfully]" + ) + ); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..0b61bf060fc5f --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntityTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.CoreMatchers.is; + +public class OpenAiChatCompletionRequestEntityTests extends ESTestCase { + + public void testXContent_WritesUserWhenDefined() throws IOException { + var entity = new OpenAiChatCompletionRequestEntity(List.of("abc"), "model", "user"); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"messages":[{"role":"user","content":"abc"}],"model":"model","n":1,"user":"user"}""")); + + } + + public void testXContent_DoesNotWriteUserWhenItIsNull() throws IOException { + var entity = new OpenAiChatCompletionRequestEntity(List.of("abc"), "model", null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"messages":[{"role":"user","content":"abc"}],"model":"model","n":1}""")); + } + + public void testXContent_ThrowsIfModelIsNull() { + assertThrows(NullPointerException.class, () -> new OpenAiChatCompletionRequestEntity(List.of("abc"), null, "user")); + } + + public void testXContent_ThrowsIfMessagesAreNull() { + assertThrows(NullPointerException.class, () -> new OpenAiChatCompletionRequestEntity(null, "model", "user")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestTests.java new file mode 100644 index 0000000000000..7858bdf4d1259 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestTests.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModelTests; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiChatCompletionRequest.buildDefaultUri; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class OpenAiChatCompletionRequestTests extends ESTestCase { + + public void testCreateRequest_WithUrlOrganizationUserDefined() throws IOException { + var request = createRequest("www.google.com", "org", "secret", "abc", "model", "user"); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getURI().toString(), is("www.google.com")); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(4)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + assertThat(requestMap.get("n"), is(1)); + } + + public void testCreateRequest_WithDefaultUrl() throws URISyntaxException, IOException { + var request = createRequest(null, "org", "secret", "abc", "model", "user"); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getURI().toString(), is(buildDefaultUri().toString())); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(4)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + assertThat(requestMap.get("n"), is(1)); + } + + public void testCreateRequest_WithDefaultUrlAndWithoutUserOrganization() throws URISyntaxException, IOException { + var request = createRequest(null, null, "secret", "abc", "model", null); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getURI().toString(), is(OpenAiChatCompletionRequest.buildDefaultUri().toString())); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertNull(httpPost.getLastHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("n"), is(1)); + } + + public void testTruncate_DoesNotReduceInputTextSize() throws URISyntaxException, IOException { + var request = createRequest(null, null, "secret", "abcd", "model", null); + var truncatedRequest = request.truncate(); + assertThat(request.getURI().toString(), is(OpenAiChatCompletionRequest.buildDefaultUri().toString())); + + var httpRequest = truncatedRequest.createHttpRequest(); + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + + // We do not truncate for OpenAi chat completions + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abcd")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("n"), is(1)); + } + + public void testTruncationInfo_ReturnsNull() { + var request = createRequest(null, null, "secret", "abcd", "model", null); + assertNull(request.getTruncationInfo()); + } + + public static OpenAiChatCompletionRequest createRequest( + @Nullable String url, + @Nullable String org, + String apiKey, + String input, + String model, + @Nullable String user + ) { + var chatCompletionModel = OpenAiChatCompletionModelTests.createChatCompletionModel(url, org, apiKey, model, user); + + var account = new OpenAiAccount( + chatCompletionModel.getServiceSettings().uri(), + org, + chatCompletionModel.getSecretSettings().apiKey() + ); + return new OpenAiChatCompletionRequest(account, List.of(input), chatCompletionModel); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntityTests.java new file mode 100644 index 0000000000000..18f702014e2d8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiChatCompletionResponseEntityTests.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.apache.http.HttpResponse; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class OpenAiChatCompletionResponseEntityTests extends ESTestCase { + + public void testFromResponse_CreatesResultsForASingleItem() throws IOException { + String responseJson = """ + { + "id": "some-id", + "object": "chat.completion", + "created": 1705397787, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "some content" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 46, + "completion_tokens": 39, + "total_tokens": 85 + }, + "system_fingerprint": null + } + """; + + ChatCompletionResults chatCompletionResults = OpenAiChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(chatCompletionResults.getResults().size(), equalTo(1)); + } + + public void testFromResponse_FailsWhenChoicesFieldIsNotPresent() { + String responseJson = """ + { + "id": "some-id", + "object": "chat.completion", + "created": 1705397787, + "model": "gpt-3.5-turbo-0613", + "not_choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "some content" + }, + "logprobs": null, + "finish_reason": "stop" + }, + ], + "usage": { + "prompt_tokens": 46, + "completion_tokens": 39, + "total_tokens": 85 + }, + "system_fingerprint": null + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> OpenAiChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [choices] in OpenAI chat completions response")); + } + + public void testFromResponse_FailsWhenChoicesFieldNotAnArray() { + String responseJson = """ + { + "id": "some-id", + "object": "chat.completion", + "created": 1705397787, + "model": "gpt-3.5-turbo-0613", + "choices": { + "test": { + "index": 0, + "message": { + "role": "assistant", + "content": "some content" + }, + "logprobs": null, + "finish_reason": "stop" + }, + }, + "usage": { + "prompt_tokens": 46, + "completion_tokens": 39, + "total_tokens": 85 + }, + "system_fingerprint": null + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [START_OBJECT] but found [FIELD_NAME]") + ); + } + + public void testFromResponse_FailsWhenMessageDoesNotExist() { + String responseJson = """ + { + "id": "some-id", + "object": "chat.completion", + "created": 1705397787, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "not_message": { + "role": "assistant", + "content": "some content" + }, + "logprobs": null, + "finish_reason": "stop" + }, + ], + "usage": { + "prompt_tokens": 46, + "completion_tokens": 39, + "total_tokens": 85 + }, + "system_fingerprint": null + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> OpenAiChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [message] in OpenAI chat completions response")); + } + + public void testFromResponse_FailsWhenMessageValueIsAString() { + String responseJson = """ + { + "id": "some-id", + "object": "chat.completion", + "created": 1705397787, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": "some content", + "logprobs": null, + "finish_reason": "stop" + }, + ], + "usage": { + "prompt_tokens": 46, + "completion_tokens": 39, + "total_tokens": 85 + }, + "system_fingerprint": null + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [START_OBJECT] but found [VALUE_STRING]") + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/ChatCompletionResultsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/ChatCompletionResultsTests.java new file mode 100644 index 0000000000000..444f6792abe63 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/ChatCompletionResultsTests.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class ChatCompletionResultsTests extends AbstractWireSerializingTestCase { + + public void testToXContent_CreateTheRightFormatForASingleChatCompletionResult() { + String resultContent = "content"; + var result = new ChatCompletionResults(List.of(new ChatCompletionResults.Result(resultContent))); + + assertThat( + result.asMap(), + is(Map.of(ChatCompletionResults.COMPLETION, List.of(Map.of(ChatCompletionResults.Result.RESULT, resultContent)))) + ); + + String xContentResult = Strings.toString(result, true, true); + assertThat(xContentResult, is(""" + { + "completion" : [ + { + "result" : "content" + } + ] + }""")); + } + + public void testToXContent_CreatesTheRightFormatForMultipleCompletionResults() { + String resultOneContent = "content 1"; + String resultTwoContent = "content 2"; + + var entity = new ChatCompletionResults( + List.of(new ChatCompletionResults.Result(resultOneContent), new ChatCompletionResults.Result(resultTwoContent)) + ); + + assertThat( + entity.asMap(), + is( + Map.of( + ChatCompletionResults.COMPLETION, + List.of( + Map.of(ChatCompletionResults.Result.RESULT, resultOneContent), + Map.of(ChatCompletionResults.Result.RESULT, resultTwoContent) + ) + ) + ) + ); + + String xContentResult = Strings.toString(entity, true, true); + assertThat(xContentResult, is(""" + { + "completion" : [ + { + "result" : "content 1" + }, + { + "result" : "content 2" + } + ] + }""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return ChatCompletionResults::new; + } + + @Override + protected ChatCompletionResults createTestInstance() { + return createRandomResults(); + } + + @Override + protected ChatCompletionResults mutateInstance(ChatCompletionResults instance) throws IOException { + // if true we reduce the chat results list by a random amount, if false we add a chat result to the list + if (randomBoolean()) { + // -1 to remove at least one item from the list + int end = randomInt(instance.results().size() - 1); + return new ChatCompletionResults(instance.results().subList(0, end)); + } else { + List completionResults = new ArrayList<>(instance.results()); + completionResults.add(createRandomChatCompletionResult()); + return new ChatCompletionResults(completionResults); + } + } + + public static ChatCompletionResults createRandomResults() { + int numOfCompletionResults = randomIntBetween(1, 10); + List chatCompletionResults = new ArrayList<>(numOfCompletionResults); + + for (int i = 0; i < numOfCompletionResults; i++) { + chatCompletionResults.add(createRandomChatCompletionResult()); + } + + return new ChatCompletionResults(chatCompletionResults); + } + + private static ChatCompletionResults.Result createRandomChatCompletionResult() { + return new ChatCompletionResults.Result(randomAlphaOfLengthBetween(10, 300)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index d819b2b243872..96a5b2d48e4e4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests; import org.hamcrest.MatcherAssert; @@ -119,6 +120,41 @@ public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModel() throws IOExc } } + public void testParseRequestConfig_CreatesAnOpenAiChatCompletionsModel() throws IOException { + var url = "url"; + var organization = "org"; + var model = "model"; + var user = "user"; + var secret = "secret"; + + try (var service = createOpenAiService()) { + ActionListener modelVerificationListener = ActionListener.wrap(m -> { + assertThat(m, instanceOf(OpenAiChatCompletionModel.class)); + + var completionsModel = (OpenAiChatCompletionModel) m; + + assertThat(completionsModel.getServiceSettings().uri().toString(), is(url)); + assertThat(completionsModel.getServiceSettings().organizationId(), is(organization)); + assertThat(completionsModel.getServiceSettings().modelId(), is(model)); + assertThat(completionsModel.getTaskSettings().user(), is(user)); + assertThat(completionsModel.getSecretSettings().apiKey().toString(), is(secret)); + + }, exception -> fail("Unexpected exception: " + exception)); + + service.parseRequestConfig( + "id", + TaskType.COMPLETION, + getRequestConfigMap( + getServiceSettingsMap(model, url, organization), + getTaskSettingsMap(user), + getSecretSettingsMap(secret) + ), + Set.of(), + modelVerificationListener + ); + } + } + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { try (var service = createOpenAiService()) { ActionListener modelVerificationListener = ActionListener.wrap( @@ -244,6 +280,33 @@ public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlO } } + public void testParseRequestConfig_CreatesAnOpenAiChatCompletionsModelWithoutUserWithoutUserUrlOrganization() throws IOException { + var model = "model"; + var secret = "secret"; + + try (var service = createOpenAiService()) { + ActionListener modelVerificationListener = ActionListener.wrap(m -> { + assertThat(m, instanceOf(OpenAiChatCompletionModel.class)); + + var completionsModel = (OpenAiChatCompletionModel) m; + assertNull(completionsModel.getServiceSettings().uri()); + assertNull(completionsModel.getServiceSettings().organizationId()); + assertThat(completionsModel.getServiceSettings().modelId(), is(model)); + assertNull(completionsModel.getTaskSettings().user()); + assertThat(completionsModel.getSecretSettings().apiKey().toString(), is(secret)); + + }, exception -> fail("Unexpected exception: " + exception)); + + service.parseRequestConfig( + "id", + TaskType.COMPLETION, + getRequestConfigMap(getServiceSettingsMap(model, null, null), getTaskSettingsMap(null), getSecretSettingsMap(secret)), + Set.of(), + modelVerificationListener + ); + } + } + public void testParseRequestConfig_MovesModel() throws IOException { try (var service = createOpenAiService()) { ActionListener modelVerificationListener = ActionListener.wrap(model -> { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java new file mode 100644 index 0000000000000..efc1fcc921ef3 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionRequestTaskSettingsTests.getChatCompletionRequestTaskSettingsMap; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; + +public class OpenAiChatCompletionModelTests extends ESTestCase { + + public void testOverrideWith_OverridesUser() { + var model = createChatCompletionModel("url", "org", "api_key", "model_name", null); + var requestTaskSettingsMap = getChatCompletionRequestTaskSettingsMap("user_override"); + + var overriddenModel = OpenAiChatCompletionModel.of(model, requestTaskSettingsMap); + + assertThat(overriddenModel, is(createChatCompletionModel("url", "org", "api_key", "model_name", "user_override"))); + } + + public void testOverrideWith_EmptyMap() { + var model = createChatCompletionModel("url", "org", "api_key", "model_name", null); + + var requestTaskSettingsMap = Map.of(); + + var overriddenModel = OpenAiChatCompletionModel.of(model, requestTaskSettingsMap); + assertThat(overriddenModel, sameInstance(model)); + } + + public void testOverrideWith_NullMap() { + var model = createChatCompletionModel("url", "org", "api_key", "model_name", null); + + var overriddenModel = OpenAiChatCompletionModel.of(model, null); + assertThat(overriddenModel, sameInstance(model)); + } + + public static OpenAiChatCompletionModel createChatCompletionModel( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user + ) { + return new OpenAiChatCompletionModel( + "id", + TaskType.COMPLETION, + "service", + new OpenAiChatCompletionServiceSettings(modelName, url, org, null), + new OpenAiChatCompletionTaskSettings(user), + new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) + ); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..24632e120f94b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class OpenAiChatCompletionRequestTaskSettingsTests extends ESTestCase { + + public void testFromMap_ReturnsEmptySettings_WhenTheMapIsEmpty() { + var settings = OpenAiChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of())); + assertNull(settings.user()); + } + + public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() { + var settings = OpenAiChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of("key", "value"))); + assertNull(settings.user()); + } + + public void testFromMap_ReturnsUser() { + var settings = OpenAiChatCompletionRequestTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user")) + ); + assertThat(settings.user(), is("user")); + } + + public static Map getChatCompletionRequestTaskSettingsMap(@Nullable String user) { + var map = new HashMap(); + + if (user != null) { + map.put(OpenAiChatCompletionTaskSettings.USER, user); + } + + return map; + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java new file mode 100644 index 0000000000000..8778b2f13e746 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java @@ -0,0 +1,193 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.ServiceUtils; + +import java.io.IOException; +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class OpenAiChatCompletionServiceSettingsTests extends AbstractWireSerializingTestCase { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var modelId = "some model"; + var url = "https://www.elastic.co"; + var org = "organization"; + var maxInputTokens = 8192; + + var serviceSettings = OpenAiChatCompletionServiceSettings.fromMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + ServiceFields.URL, + url, + OpenAiChatCompletionServiceSettings.ORGANIZATION, + org, + ServiceFields.MAX_INPUT_TOKENS, + maxInputTokens + ) + ) + ); + + assertThat(serviceSettings, is(new OpenAiChatCompletionServiceSettings(modelId, ServiceUtils.createUri(url), org, maxInputTokens))); + } + + public void testFromMap_MissingUrl_DoesNotThrowException() { + var modelId = "some model"; + var organization = "org"; + var maxInputTokens = 8192; + + var serviceSettings = OpenAiChatCompletionServiceSettings.fromMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + OpenAiChatCompletionServiceSettings.ORGANIZATION, + organization, + ServiceFields.MAX_INPUT_TOKENS, + maxInputTokens + ) + ) + ); + + assertNull(serviceSettings.uri()); + assertThat(serviceSettings.modelId(), is(modelId)); + assertThat(serviceSettings.organizationId(), is(organization)); + assertThat(serviceSettings.maxInputTokens(), is(maxInputTokens)); + } + + public void testFromMap_EmptyUrl_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiChatCompletionServiceSettings.fromMap(new HashMap<>(Map.of(ServiceFields.URL, "", ServiceFields.MODEL_ID, "model"))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + ServiceFields.URL + ) + ) + ); + } + + public void testFromMap_MissingOrganization_DoesNotThrowException() { + var modelId = "some model"; + var maxInputTokens = 8192; + + var serviceSettings = OpenAiChatCompletionServiceSettings.fromMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId, ServiceFields.MAX_INPUT_TOKENS, maxInputTokens)) + ); + + assertNull(serviceSettings.uri()); + assertThat(serviceSettings.modelId(), is(modelId)); + assertThat(serviceSettings.maxInputTokens(), is(maxInputTokens)); + } + + public void testFromMap_EmptyOrganization_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiChatCompletionServiceSettings.fromMap( + new HashMap<>(Map.of(OpenAiChatCompletionServiceSettings.ORGANIZATION, "", ServiceFields.MODEL_ID, "model")) + ) + ); + + assertThat( + thrownException.getMessage(), + containsString( + org.elasticsearch.common.Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + OpenAiChatCompletionServiceSettings.ORGANIZATION + ) + ) + ); + } + + public void testFromMap_InvalidUrl_ThrowsError() { + var url = "https://www.abc^.com"; + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiChatCompletionServiceSettings.fromMap( + new HashMap<>(Map.of(ServiceFields.URL, url, ServiceFields.MODEL_ID, "model")) + ) + ); + + assertThat( + thrownException.getMessage(), + is(Strings.format("Validation Failed: 1: [service_settings] Invalid url [%s] received for field [%s];", url, ServiceFields.URL)) + ); + } + + public void testToXContent_WritesAllValues() throws IOException { + var serviceSettings = new OpenAiChatCompletionServiceSettings("model", "url", "org", 1024); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + serviceSettings.toXContent(builder, null); + String xContentResult = org.elasticsearch.common.Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"model_id":"model","url":"url","organization_id":"org","max_input_tokens":1024}""")); + } + + public void testToXContent_DoesNotWriteOptionalValues() throws IOException { + URI uri = null; + + var serviceSettings = new OpenAiChatCompletionServiceSettings("model", uri, null, null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + serviceSettings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"model_id":"model"}""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return OpenAiChatCompletionServiceSettings::new; + } + + @Override + protected OpenAiChatCompletionServiceSettings createTestInstance() { + return createRandomWithNonNullUrl(); + } + + @Override + protected OpenAiChatCompletionServiceSettings mutateInstance(OpenAiChatCompletionServiceSettings instance) throws IOException { + return createRandomWithNonNullUrl(); + } + + private static OpenAiChatCompletionServiceSettings createRandomWithNonNullUrl() { + return createRandom(randomAlphaOfLength(15)); + } + + private static OpenAiChatCompletionServiceSettings createRandom(String url) { + var modelId = randomAlphaOfLength(8); + var organizationId = randomFrom(randomAlphaOfLength(15), null); + var maxInputTokens = randomFrom(randomIntBetween(128, 4096), null); + + return new OpenAiChatCompletionServiceSettings(modelId, ServiceUtils.createUri(url), organizationId, maxInputTokens); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java new file mode 100644 index 0000000000000..66a9ec371eb93 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.completion; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class OpenAiChatCompletionTaskSettingsTests extends AbstractWireSerializingTestCase { + + public static OpenAiChatCompletionTaskSettings createRandomWithUser() { + return new OpenAiChatCompletionTaskSettings(randomAlphaOfLength(15)); + } + + public void testFromMap_WithUser() { + assertEquals( + new OpenAiChatCompletionTaskSettings("user"), + OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))) + ); + } + + public void testFromMap_UserIsEmptyString() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, ""))) + ); + + assertThat( + thrownException.getMessage(), + is(Strings.format("Validation Failed: 1: [task_settings] Invalid value empty string. [user] must be a non-empty string;")) + ); + } + + public void testFromMap_MissingUser_DoesNotThrowException() { + var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of())); + assertNull(taskSettings.user()); + } + + public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { + var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))); + + var overriddenTaskSettings = OpenAiChatCompletionTaskSettings.of( + taskSettings, + OpenAiChatCompletionRequestTaskSettings.EMPTY_SETTINGS + ); + assertThat(overriddenTaskSettings, is(taskSettings)); + } + + public void testOverrideWith_UsesOverriddenSettings() { + var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))); + + var requestTaskSettings = OpenAiChatCompletionRequestTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user2")) + ); + + var overriddenTaskSettings = OpenAiChatCompletionTaskSettings.of(taskSettings, requestTaskSettings); + assertThat(overriddenTaskSettings, is(new OpenAiChatCompletionTaskSettings("user2"))); + } + + @Override + protected Writeable.Reader instanceReader() { + return OpenAiChatCompletionTaskSettings::new; + } + + @Override + protected OpenAiChatCompletionTaskSettings createTestInstance() { + return createRandomWithUser(); + } + + @Override + protected OpenAiChatCompletionTaskSettings mutateInstance(OpenAiChatCompletionTaskSettings instance) throws IOException { + return createRandomWithUser(); + } +} From 47dbd611b72235515f2e107d9832159143965ee6 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 26 Mar 2024 08:02:11 +0100 Subject: [PATCH 52/79] Refactor MultiBucketAggregatorsReducer and DelayedMultiBucketAggregatorsReducer (#106725) renamed to BucketReducer DelayedBucketReducer and they have a new property containing the prototype bucket --- .../adjacency/InternalAdjacencyMatrix.java | 39 ++++++++++++------- .../histogram/InternalAutoDateHistogram.java | 22 +++++------ ...egatorsReducer.java => BucketReducer.java} | 24 ++++++++---- ...Reducer.java => DelayedBucketReducer.java} | 33 ++++++++++------ .../FixedMultiBucketAggregatorsReducer.java | 27 ++++++------- .../bucket/composite/InternalComposite.java | 14 +++---- .../bucket/geogrid/InternalGeoGrid.java | 14 +++---- .../InternalVariableWidthHistogram.java | 16 ++++---- ...ongKeyedMultiBucketsAggregatorReducer.java | 16 ++++---- .../bucket/prefix/InternalIpPrefix.java | 26 ++++++------- .../terms/InternalSignificantTerms.java | 39 +++++++++++-------- 11 files changed, 151 insertions(+), 119 deletions(-) rename server/src/main/java/org/elasticsearch/search/aggregations/bucket/{MultiBucketAggregatorsReducer.java => BucketReducer.java} (69%) rename server/src/main/java/org/elasticsearch/search/aggregations/bucket/{DelayedMultiBucketAggregatorsReducer.java => DelayedBucketReducer.java} (67%) diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 6e70e9263df47..6bb16415adfc2 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -11,20 +11,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.ObjectObjectPagedHashMap; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -180,28 +180,40 @@ public InternalBucket getBucketByKey(String key) { @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - final Map bucketsReducer = new HashMap<>(getBuckets().size()); + final ObjectObjectPagedHashMap> bucketsReducer = new ObjectObjectPagedHashMap<>( + getBuckets().size(), + reduceContext.bigArrays() + ); @Override public void accept(InternalAggregation aggregation) { final InternalAdjacencyMatrix filters = (InternalAdjacencyMatrix) aggregation; for (InternalBucket bucket : filters.buckets) { - MultiBucketAggregatorsReducer reducer = bucketsReducer.computeIfAbsent( - bucket.key, - k -> new MultiBucketAggregatorsReducer(reduceContext, size) - ); + BucketReducer reducer = bucketsReducer.get(bucket.key); + if (reducer == null) { + reducer = new BucketReducer<>(bucket, reduceContext, size); + boolean success = false; + try { + bucketsReducer.put(bucket.key, reducer); + success = true; + } finally { + if (success == false) { + Releasables.close(reducer); + } + } + } reducer.accept(bucket); } } @Override public InternalAggregation get() { - List reducedBuckets = new ArrayList<>(bucketsReducer.size()); - for (Map.Entry entry : bucketsReducer.entrySet()) { - if (entry.getValue().getDocCount() >= 1) { - reducedBuckets.add(new InternalBucket(entry.getKey(), entry.getValue().getDocCount(), entry.getValue().get())); + List reducedBuckets = new ArrayList<>((int) bucketsReducer.size()); + bucketsReducer.forEach(entry -> { + if (entry.value.getDocCount() >= 1) { + reducedBuckets.add(new InternalBucket(entry.key, entry.value.getDocCount(), entry.value.getAggregations())); } - } + }); reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); reducedBuckets.sort(Comparator.comparing(InternalBucket::getKey)); return new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); @@ -209,7 +221,8 @@ public InternalAggregation get() { @Override public void close() { - Releasables.close(bucketsReducer.values()); + bucketsReducer.forEach(entry -> Releasables.close(entry.value)); + Releasables.close(bucketsReducer); } }; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index f0f7984079d97..ab531b69be947 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -21,7 +21,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -444,7 +444,7 @@ static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, Rou @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - private final LongObjectPagedHashMap bucketsReducer = new LongObjectPagedHashMap<>( + private final LongObjectPagedHashMap> bucketsReducer = new LongObjectPagedHashMap<>( getBuckets().size(), reduceContext.bigArrays() ); @@ -460,9 +460,9 @@ public void accept(InternalAggregation aggregation) { min = Math.min(min, histogram.buckets.get(0).key); max = Math.max(max, histogram.buckets.get(histogram.buckets.size() - 1).key); for (Bucket bucket : histogram.buckets) { - MultiBucketAggregatorsReducer reducer = bucketsReducer.get(bucket.key); + BucketReducer reducer = bucketsReducer.get(bucket.key); if (reducer == null) { - reducer = new MultiBucketAggregatorsReducer(reduceContext, size); + reducer = new BucketReducer<>(bucket, reduceContext, size); bucketsReducer.put(bucket.key, reducer); } reducer.accept(bucket); @@ -480,34 +480,34 @@ public InternalAggregation get() { { // fill the array and sort it final int[] index = new int[] { 0 }; - bucketsReducer.iterator().forEachRemaining(c -> keys[index[0]++] = c.key); + bucketsReducer.forEach(c -> keys[index[0]++] = c.key); Arrays.sort(keys); } final List reducedBuckets = new ArrayList<>(); if (keys.length > 0) { // list of buckets coming from different shards that have the same key - MultiBucketAggregatorsReducer currentReducer = null; + BucketReducer currentReducer = null; long key = reduceRounding.round(keys[0]); for (long top : keys) { if (reduceRounding.round(top) != key) { assert currentReducer != null; // the key changes, reduce what we already buffered and reset the buffer for current buckets - reducedBuckets.add(createBucket(key, currentReducer.getDocCount(), currentReducer.get())); + reducedBuckets.add(createBucket(key, currentReducer.getDocCount(), currentReducer.getAggregations())); currentReducer = null; key = reduceRounding.round(top); } - final MultiBucketAggregatorsReducer nextReducer = bucketsReducer.get(top); + final BucketReducer nextReducer = bucketsReducer.get(top); if (currentReducer == null) { currentReducer = nextReducer; } else { - currentReducer.accept(createBucket(key, nextReducer.getDocCount(), nextReducer.get())); + currentReducer.accept(createBucket(key, nextReducer.getDocCount(), nextReducer.getAggregations())); } } if (currentReducer != null) { - reducedBuckets.add(createBucket(key, currentReducer.getDocCount(), currentReducer.get())); + reducedBuckets.add(createBucket(key, currentReducer.getDocCount(), currentReducer.getAggregations())); } } @@ -546,7 +546,7 @@ public InternalAggregation get() { @Override public void close() { - bucketsReducer.iterator().forEachRemaining(c -> Releasables.close(c.value)); + bucketsReducer.forEach(c -> Releasables.close(c.value)); Releasables.close(bucketsReducer); } }; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketAggregatorsReducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketReducer.java similarity index 69% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketAggregatorsReducer.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketReducer.java index e7d0e6a17e4c6..a9aa3efd536d4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketAggregatorsReducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketReducer.java @@ -15,32 +15,40 @@ import org.elasticsearch.search.aggregations.InternalAggregations; /** - * Class for reducing a list of {@link MultiBucketsAggregation.Bucket} to a single - * {@link InternalAggregations} and the number of documents. + * Class for reducing a list of {@link B} to a single {@link InternalAggregations} + * and the number of documents. */ -public final class MultiBucketAggregatorsReducer implements Releasable { +public final class BucketReducer implements Releasable { private final AggregatorsReducer aggregatorsReducer; + private final B proto; private long count = 0; - public MultiBucketAggregatorsReducer(AggregationReduceContext context, int size) { + public BucketReducer(B proto, AggregationReduceContext context, int size) { this.aggregatorsReducer = new AggregatorsReducer(context, size); + this.proto = proto; } /** - * Adds a {@link MultiBucketsAggregation.Bucket} for reduction. + * Adds a {@link B} for reduction. */ - public void accept(MultiBucketsAggregation.Bucket bucket) { + public void accept(B bucket) { count += bucket.getDocCount(); aggregatorsReducer.accept(bucket.getAggregations()); } + /** + * returns the bucket prototype. + */ + public B getProto() { + return proto; + } + /** * returns the reduced {@link InternalAggregations}. */ - public InternalAggregations get() { + public InternalAggregations getAggregations() { return aggregatorsReducer.get(); - } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedMultiBucketAggregatorsReducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedBucketReducer.java similarity index 67% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedMultiBucketAggregatorsReducer.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedBucketReducer.java index 7fc7c96badaaa..b29159c66ac40 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedMultiBucketAggregatorsReducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DelayedBucketReducer.java @@ -16,38 +16,49 @@ import java.util.List; /** - * Class for reducing a list of {@link MultiBucketsAggregation.Bucket} to a single - * {@link InternalAggregations} and the number of documents in a delayable fashion. + * Class for reducing a list of {@link B} to a single {@link InternalAggregations} + * and the number of documents in a delayable fashion. * - * This class can be reused by calling {@link #reset()}. + * This class can be reused by calling {@link #reset(B)}. * - * @see MultiBucketAggregatorsReducer + * @see BucketReducer */ -public final class DelayedMultiBucketAggregatorsReducer { +public final class DelayedBucketReducer { private final AggregationReduceContext context; + // changes at reset time + private B proto; // the maximum size of this array is the number of shards to be reduced. We currently do it in a batches of 256 - // if we expect bigger batches, we might consider to use ObjectArray. + // by default. if we expect bigger batches, we might consider to use ObjectArray. private final List internalAggregations; private long count = 0; - public DelayedMultiBucketAggregatorsReducer(AggregationReduceContext context) { + public DelayedBucketReducer(B proto, AggregationReduceContext context) { + this.proto = proto; this.context = context; this.internalAggregations = new ArrayList<>(); } /** - * Adds a {@link MultiBucketsAggregation.Bucket} for reduction. + * Adds a {@link B} for reduction. */ - public void accept(MultiBucketsAggregation.Bucket bucket) { + public void accept(B bucket) { count += bucket.getDocCount(); internalAggregations.add(bucket.getAggregations()); } + /** + * returns the bucket prototype. + */ + public B getProto() { + return proto; + } + /** * Reset the content of this reducer. */ - public void reset() { + public void reset(B proto) { + this.proto = proto; count = 0L; internalAggregations.clear(); } @@ -55,7 +66,7 @@ public void reset() { /** * returns the reduced {@link InternalAggregations}. */ - public InternalAggregations get() { + public InternalAggregations getAggregations() { try (AggregatorsReducer aggregatorsReducer = new AggregatorsReducer(context, internalAggregations.size())) { for (InternalAggregations agg : internalAggregations) { aggregatorsReducer.accept(agg); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/FixedMultiBucketAggregatorsReducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/FixedMultiBucketAggregatorsReducer.java index 899d9dad7229c..a7261c9fd73f8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/FixedMultiBucketAggregatorsReducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/FixedMultiBucketAggregatorsReducer.java @@ -23,15 +23,13 @@ public abstract class FixedMultiBucketAggregatorsReducer implements Releasable { // we could use an ObjectArray here but these arrays are in normally small, so it is not worthy - private final MultiBucketAggregatorsReducer[] bucketsReducer; - private final List protoList; + private final List> bucketReducer; public FixedMultiBucketAggregatorsReducer(AggregationReduceContext reduceContext, int size, List protoList) { reduceContext.consumeBucketsAndMaybeBreak(protoList.size()); - this.protoList = protoList; - this.bucketsReducer = new MultiBucketAggregatorsReducer[protoList.size()]; + this.bucketReducer = new ArrayList<>(protoList.size()); for (int i = 0; i < protoList.size(); ++i) { - bucketsReducer[i] = new MultiBucketAggregatorsReducer(reduceContext, size); + bucketReducer.add(new BucketReducer<>(protoList.get(i), reduceContext, size)); } } @@ -40,10 +38,9 @@ public FixedMultiBucketAggregatorsReducer(AggregationReduceContext reduceContext * of the list passed on the constructor */ public final void accept(List buckets) { - assert buckets.size() == protoList.size(); - int i = 0; - for (B bucket : buckets) { - bucketsReducer[i++].accept(bucket); + assert buckets.size() == bucketReducer.size(); + for (int i = 0; i < buckets.size(); i++) { + bucketReducer.get(i).accept(buckets.get(i)); } } @@ -51,19 +48,17 @@ public final void accept(List buckets) { * returns the reduced buckets. */ public final List get() { - final List reduceBuckets = new ArrayList<>(protoList.size()); - for (int i = 0; i < protoList.size(); i++) { - final B proto = protoList.get(i); - final MultiBucketAggregatorsReducer reducer = bucketsReducer[i]; - reduceBuckets.add(createBucket(proto, reducer.getDocCount(), reducer.get())); + final List reduceBuckets = new ArrayList<>(bucketReducer.size()); + for (final BucketReducer reducer : bucketReducer) { + reduceBuckets.add(createBucket(reducer.getProto(), reducer.getDocCount(), reducer.getAggregations())); } return reduceBuckets; } - protected abstract B createBucket(B proto, long focCount, InternalAggregations aggregations); + protected abstract B createBucket(B proto, long docCount, InternalAggregations aggregations); @Override public final void close() { - Releasables.close(bucketsReducer); + Releasables.close(bucketReducer); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index fc13dcb6a22ee..22c967bb2ea14 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -22,7 +22,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.bucket.DelayedMultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.DelayedBucketReducer; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -257,7 +257,7 @@ public void close() { } private class BucketsQueue implements Releasable { - private final ObjectObjectPagedHashMap bucketReducers; + private final ObjectObjectPagedHashMap> bucketReducers; private final ObjectArrayPriorityQueue queue; private final AggregationReduceContext reduceContext; @@ -274,12 +274,12 @@ protected boolean lessThan(InternalBucket a, InternalBucket b) { /** adds a bucket to the queue. Return false if the bucket is not competitive, otherwise true.*/ boolean add(InternalBucket bucket) { - DelayedMultiBucketAggregatorsReducer delayed = bucketReducers.get(bucket.key); + DelayedBucketReducer delayed = bucketReducers.get(bucket.key); if (delayed == null) { final InternalBucket out = queue.insertWithOverflow(bucket); if (out == null) { // bucket is added - delayed = new DelayedMultiBucketAggregatorsReducer(reduceContext); + delayed = new DelayedBucketReducer<>(bucket, reduceContext); } else if (out == bucket) { // bucket is not competitive return false; @@ -287,7 +287,7 @@ boolean add(InternalBucket bucket) { // bucket replaces existing bucket delayed = bucketReducers.remove(out.key); assert delayed != null; - delayed.reset(); + delayed.reset(bucket); } bucketReducers.put(bucket.key, delayed); } @@ -307,7 +307,7 @@ List get() { * just whatever formats make sense for *its* index. This can be real * trouble when the index doing the reducing is unmapped. */ final var reducedFormats = bucket.formats; - final DelayedMultiBucketAggregatorsReducer reducer = Objects.requireNonNull(bucketReducers.get(bucket.key)); + final DelayedBucketReducer reducer = Objects.requireNonNull(bucketReducers.get(bucket.key)); result[i] = new InternalBucket( sourceNames, reducedFormats, @@ -315,7 +315,7 @@ List get() { reverseMuls, missingOrders, reducer.getDocCount(), - reducer.get() + reducer.getAggregations() ); } return List.of(result); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java index 4918a57b29ed1..027551288be5f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -17,7 +17,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -81,7 +81,7 @@ public List getBuckets() { protected AggregatorReducer getLeaderReducer(AggregationReduceContext context, int size) { return new AggregatorReducer() { - final LongObjectPagedHashMap bucketsReducer = new LongObjectPagedHashMap<>( + final LongObjectPagedHashMap> bucketsReducer = new LongObjectPagedHashMap<>( size, context.bigArrays() ); @@ -91,9 +91,9 @@ public void accept(InternalAggregation aggregation) { @SuppressWarnings("unchecked") final InternalGeoGrid grid = (InternalGeoGrid) aggregation; for (InternalGeoGridBucket bucket : grid.getBuckets()) { - MultiBucketAggregatorsReducer reducer = bucketsReducer.get(bucket.hashAsLong()); + BucketReducer reducer = bucketsReducer.get(bucket.hashAsLong()); if (reducer == null) { - reducer = new MultiBucketAggregatorsReducer(context, size); + reducer = new BucketReducer<>(bucket, context, size); bucketsReducer.put(bucket.hashAsLong(), reducer); } reducer.accept(bucket); @@ -106,8 +106,8 @@ public InternalAggregation get() { context.isFinalReduce() == false ? bucketsReducer.size() : Math.min(requiredSize, bucketsReducer.size()) ); try (BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, context.bigArrays())) { - bucketsReducer.iterator().forEachRemaining(entry -> { - InternalGeoGridBucket bucket = createBucket(entry.key, entry.value.getDocCount(), entry.value.get()); + bucketsReducer.forEach(entry -> { + InternalGeoGridBucket bucket = createBucket(entry.key, entry.value.getDocCount(), entry.value.getAggregations()); ordered.insertWithOverflow(bucket); }); final InternalGeoGridBucket[] list = new InternalGeoGridBucket[(int) ordered.size()]; @@ -121,7 +121,7 @@ public InternalAggregation get() { @Override public void close() { - bucketsReducer.iterator().forEachRemaining(r -> Releasables.close(r.value)); + bucketsReducer.forEach(r -> Releasables.close(r.value)); Releasables.close(bucketsReducer); } }; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 3478773464feb..27a79095eb49d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -20,7 +20,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -492,7 +492,7 @@ public void accept(InternalAggregation aggregation) { long key = NumericUtils.doubleToSortableLong(bucket.centroid()); ReducerAndExtraInfo reducer = bucketsReducer.get(key); if (reducer == null) { - reducer = new ReducerAndExtraInfo(new MultiBucketAggregatorsReducer(reduceContext, size)); + reducer = new ReducerAndExtraInfo(new BucketReducer<>(bucket, reduceContext, size)); bucketsReducer.put(key, reducer); reduceContext.consumeBucketsAndMaybeBreak(1); } @@ -506,10 +506,12 @@ public void accept(InternalAggregation aggregation) { @Override public InternalAggregation get() { final List reducedBuckets = new ArrayList<>((int) bucketsReducer.size()); - bucketsReducer.iterator().forEachRemaining(entry -> { + bucketsReducer.forEach(entry -> { final double centroid = entry.value.sum[0] / entry.value.reducer.getDocCount(); final Bucket.BucketBounds bounds = new Bucket.BucketBounds(entry.value.min[0], entry.value.max[0]); - reducedBuckets.add(new Bucket(centroid, bounds, entry.value.reducer.getDocCount(), format, entry.value.reducer.get())); + reducedBuckets.add( + new Bucket(centroid, bounds, entry.value.reducer.getDocCount(), format, entry.value.reducer.getAggregations()) + ); }); reducedBuckets.sort(Comparator.comparing(Bucket::centroid)); mergeBucketsIfNeeded(reducedBuckets, targetNumBuckets, reduceContext); @@ -523,14 +525,14 @@ public InternalAggregation get() { @Override public void close() { - bucketsReducer.iterator().forEachRemaining(entry -> Releasables.close(entry.value.reducer)); + bucketsReducer.forEach(entry -> Releasables.close(entry.value.reducer)); Releasables.close(bucketsReducer); } }; } - private record ReducerAndExtraInfo(MultiBucketAggregatorsReducer reducer, double[] min, double[] max, double[] sum) { - private ReducerAndExtraInfo(MultiBucketAggregatorsReducer reducer) { + private record ReducerAndExtraInfo(BucketReducer reducer, double[] min, double[] max, double[] sum) { + private ReducerAndExtraInfo(BucketReducer reducer) { this(reducer, new double[] { Double.POSITIVE_INFINITY }, new double[] { Double.NEGATIVE_INFINITY }, new double[] { 0 }); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/LongKeyedMultiBucketsAggregatorReducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/LongKeyedMultiBucketsAggregatorReducer.java index 71374421481eb..07208ab2096a0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/LongKeyedMultiBucketsAggregatorReducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/LongKeyedMultiBucketsAggregatorReducer.java @@ -13,7 +13,7 @@ import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import java.util.ArrayList; @@ -28,7 +28,7 @@ abstract class LongKeyedMultiBucketsAggregatorReducer bucketsReducer; + private final LongObjectPagedHashMap> bucketsReducer; int consumeBucketCount = 0; LongKeyedMultiBucketsAggregatorReducer(AggregationReduceContext reduceContext, int size, long minDocCount) { @@ -42,16 +42,16 @@ abstract class LongKeyedMultiBucketsAggregatorReducer reducer = bucketsReducer.get(key); if (reducer == null) { - reducer = new MultiBucketAggregatorsReducer(reduceContext, size); + reducer = new BucketReducer<>(bucket, reduceContext, size); bucketsReducer.put(key, reducer); } consumeBucketsAndMaybeBreak(reducer, bucket); reducer.accept(bucket); } - private void consumeBucketsAndMaybeBreak(MultiBucketAggregatorsReducer reducer, B bucket) { + private void consumeBucketsAndMaybeBreak(BucketReducer reducer, B bucket) { if (reduceContext.isFinalReduce() == false || minDocCount == 0) { if (reducer.getDocCount() == 0 && bucket.getDocCount() > 0) { consumeBucketsAndMaybeBreak(); @@ -76,9 +76,9 @@ private void consumeBucketsAndMaybeBreak() { public final List get() { reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); final List reducedBuckets = new ArrayList<>((int) bucketsReducer.size()); - bucketsReducer.iterator().forEachRemaining(entry -> { + bucketsReducer.forEach(entry -> { if (reduceContext.isFinalReduce() == false || entry.value.getDocCount() >= minDocCount) { - reducedBuckets.add(createBucket(entry.key, entry.value.getDocCount(), entry.value.get())); + reducedBuckets.add(createBucket(entry.key, entry.value.getDocCount(), entry.value.getAggregations())); } }); return reducedBuckets; @@ -91,7 +91,7 @@ public final List get() { @Override public final void close() { - bucketsReducer.iterator().forEachRemaining(r -> Releasables.close(r.value)); + bucketsReducer.forEach(r -> Releasables.close(r.value)); Releasables.close(bucketsReducer); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java index 50b92a2e77841..3557947bb9ea7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java @@ -20,7 +20,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -225,7 +225,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - final ObjectObjectPagedHashMap buckets = new ObjectObjectPagedHashMap<>( + final ObjectObjectPagedHashMap> buckets = new ObjectObjectPagedHashMap<>( getBuckets().size(), reduceContext.bigArrays() ); @@ -234,29 +234,29 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont public void accept(InternalAggregation aggregation) { final InternalIpPrefix ipPrefix = (InternalIpPrefix) aggregation; for (Bucket bucket : ipPrefix.getBuckets()) { - ReducerAndProto reducerAndProto = buckets.get(bucket.key); - if (reducerAndProto == null) { - reducerAndProto = new ReducerAndProto(new MultiBucketAggregatorsReducer(reduceContext, size), bucket); + BucketReducer bucketReducer = buckets.get(bucket.key); + if (bucketReducer == null) { + bucketReducer = new BucketReducer<>(bucket, reduceContext, size); boolean success = false; try { - buckets.put(bucket.key, reducerAndProto); + buckets.put(bucket.key, bucketReducer); success = true; } finally { if (success == false) { - Releasables.close(reducerAndProto.reducer); + Releasables.close(bucketReducer); } } } - reducerAndProto.reducer.accept(bucket); + bucketReducer.accept(bucket); } } @Override public InternalAggregation get() { final List reducedBuckets = new ArrayList<>(Math.toIntExact(buckets.size())); - buckets.iterator().forEachRemaining(entry -> { - if (false == reduceContext.isFinalReduce() || entry.value.reducer.getDocCount() >= minDocCount) { - reducedBuckets.add(createBucket(entry.value.proto, entry.value.reducer.get(), entry.value.reducer.getDocCount())); + buckets.forEach(entry -> { + if (false == reduceContext.isFinalReduce() || entry.value.getDocCount() >= minDocCount) { + reducedBuckets.add(createBucket(entry.value.getProto(), entry.value.getAggregations(), entry.value.getDocCount())); } }); reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); @@ -266,14 +266,12 @@ public InternalAggregation get() { @Override public void close() { - buckets.iterator().forEachRemaining(entry -> Releasables.close(entry.value.reducer)); + buckets.forEach(entry -> Releasables.close(entry.value)); Releasables.close(buckets); } }; } - private record ReducerAndProto(MultiBucketAggregatorsReducer reducer, Bucket proto) {} - @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index 440e42f845ce2..f8e7f3cf3a69c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -18,7 +18,8 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; +import org.elasticsearch.search.aggregations.bucket.BucketReducer; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -202,7 +203,7 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont return new AggregatorReducer() { long globalSubsetSize = 0; long globalSupersetSize = 0; - final ObjectObjectPagedHashMap> buckets = new ObjectObjectPagedHashMap<>( + final ObjectObjectPagedHashMap> buckets = new ObjectObjectPagedHashMap<>( getBuckets().size(), reduceContext.bigArrays() ); @@ -216,22 +217,22 @@ public void accept(InternalAggregation aggregation) { globalSubsetSize += terms.getSubsetSize(); globalSupersetSize += terms.getSupersetSize(); for (B bucket : terms.getBuckets()) { - ReducerAndProto reducerAndProto = buckets.get(bucket.getKeyAsString()); - if (reducerAndProto == null) { - reducerAndProto = new ReducerAndProto<>(new MultiBucketAggregatorsReducer(reduceContext, size), bucket); + ReducerAndExtraInfo reducerAndExtraInfo = buckets.get(bucket.getKeyAsString()); + if (reducerAndExtraInfo == null) { + reducerAndExtraInfo = new ReducerAndExtraInfo<>(new BucketReducer<>(bucket, reduceContext, size)); boolean success = false; try { - buckets.put(bucket.getKeyAsString(), reducerAndProto); + buckets.put(bucket.getKeyAsString(), reducerAndExtraInfo); success = true; } finally { if (success == false) { - Releasables.close(reducerAndProto.reducer); + Releasables.close(reducerAndExtraInfo.reducer); } } } - reducerAndProto.reducer.accept(bucket); - reducerAndProto.subsetDf[0] += bucket.subsetDf; - reducerAndProto.supersetDf[0] += bucket.supersetDf; + reducerAndExtraInfo.reducer.accept(bucket); + reducerAndExtraInfo.subsetDf[0] += bucket.subsetDf; + reducerAndExtraInfo.supersetDf[0] += bucket.supersetDf; } } @@ -240,14 +241,14 @@ public InternalAggregation get() { final SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext); final int size = (int) (reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); try (BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size, reduceContext.bigArrays())) { - buckets.iterator().forEachRemaining(entry -> { + buckets.forEach(entry -> { final B b = createBucket( entry.value.subsetDf[0], globalSubsetSize, entry.value.supersetDf[0], globalSupersetSize, - entry.value.reducer.get(), - entry.value.proto + entry.value.reducer.getAggregations(), + entry.value.reducer.getProto() ); b.updateScore(heuristic); if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) { @@ -271,15 +272,19 @@ public InternalAggregation get() { @Override public void close() { - buckets.iterator().forEachRemaining(entry -> Releasables.close(entry.value.reducer)); + buckets.forEach(entry -> Releasables.close(entry.value.reducer)); Releasables.close(buckets); } }; } - private record ReducerAndProto(MultiBucketAggregatorsReducer reducer, B proto, long[] subsetDf, long[] supersetDf) { - private ReducerAndProto(MultiBucketAggregatorsReducer reducer, B proto) { - this(reducer, proto, new long[] { 0 }, new long[] { 0 }); + private record ReducerAndExtraInfo( + BucketReducer reducer, + long[] subsetDf, + long[] supersetDf + ) { + private ReducerAndExtraInfo(BucketReducer reducer) { + this(reducer, new long[] { 0 }, new long[] { 0 }); } } From a3d96b93337d4af17ca85157f587d3cf37029d9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 26 Mar 2024 08:20:34 +0100 Subject: [PATCH 53/79] [DOCS] Changes model_id path param to inference_id (#106719) --- .../inference/delete-inference.asciidoc | 8 ++++---- .../inference/get-inference.asciidoc | 10 +++++----- .../inference/post-inference.asciidoc | 12 +++++------ .../inference/put-inference.asciidoc | 20 +++++++++---------- .../semantic-search-inference.asciidoc | 4 ++-- .../infer-api-ingest-pipeline.asciidoc | 8 ++++---- .../inference-api/infer-api-search.asciidoc | 4 ++-- .../inference-api/infer-api-task.asciidoc | 6 ++++-- 8 files changed, 37 insertions(+), 35 deletions(-) diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc index 4b661236aa928..5b693f51d65da 100644 --- a/docs/reference/inference/delete-inference.asciidoc +++ b/docs/reference/inference/delete-inference.asciidoc @@ -16,9 +16,9 @@ own model, use the <>. [[delete-inference-api-request]] ==== {api-request-title} -`DELETE /_inference/` +`DELETE /_inference/` -`DELETE /_inference//` +`DELETE /_inference//` [discrete] [[delete-inference-api-prereqs]] @@ -32,9 +32,9 @@ own model, use the <>. [[delete-inference-api-path-params]] ==== {api-path-parms-title} -:: +:: (Required, string) -The unique identifier of the {infer} model to delete. +The unique identifier of the {infer} endpoint to delete. :: (Optional, string) diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc index 705bc4e7a8c61..1a11904a169ca 100644 --- a/docs/reference/inference/get-inference.asciidoc +++ b/docs/reference/inference/get-inference.asciidoc @@ -18,11 +18,11 @@ own model, use the <>. `GET /_inference/_all` -`GET /_inference/` +`GET /_inference/` `GET /_inference//_all` -`GET /_inference//` +`GET /_inference//` [discrete] [[get-inference-api-prereqs]] @@ -47,9 +47,9 @@ and a wildcard expression, [[get-inference-api-path-params]] ==== {api-path-parms-title} -``:: +``:: (Optional, string) -The unique identifier of the {infer} model. +The unique identifier of the {infer} endpoint. ``:: @@ -77,7 +77,7 @@ The API returns the following response: [source,console-result] ------------------------------------------------------------ { - "model_id": "my-elser-model", + "inference_id": "my-elser-model", "task_type": "sparse_embedding", "service": "elser", "service_settings": { diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index 970cec7f4a452..e4cbd26904271 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -16,9 +16,9 @@ own model, use the <>. [[post-inference-api-request]] ==== {api-request-title} -`POST /_inference/` +`POST /_inference/` -`POST /_inference//` +`POST /_inference//` [discrete] @@ -32,8 +32,8 @@ own model, use the <>. [[post-inference-api-desc]] ==== {api-description-title} -The perform {infer} API enables you to use {infer} models to perform specific -tasks on data that you provide as an input. The API returns a response with the +The perform {infer} API enables you to use {ml} models to perform specific tasks +on data that you provide as an input. The API returns a response with the resutls of the tasks. The {infer} model you use can perform one specific task that has been defined when the model was created with the <>. @@ -42,9 +42,9 @@ that has been defined when the model was created with the <>. [[post-inference-api-path-params]] ==== {api-path-parms-title} -``:: +``:: (Required, string) -The unique identifier of the {infer} model. +The unique identifier of the {infer} endpoint. ``:: diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 9d5e187f5994a..c0b9d508e13c3 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -33,7 +33,7 @@ or if you want to use non-NLP models, use the <>. [[put-inference-api-desc]] ==== {api-description-title} -The create {infer} API enables you to create and configure an {infer} model to +The create {infer} API enables you to create and configure a {ml} model to perform a specific {infer} task. The following services are available through the {infer} API: @@ -50,9 +50,9 @@ The following services are available through the {infer} API: ==== {api-path-parms-title} -``:: +``:: (Required, string) -The unique identifier of the model. +The unique identifier of the {infer} endpoint. ``:: (Required, string) @@ -246,7 +246,7 @@ This section contains example API calls for every service type. [[inference-example-cohere]] ===== Cohere service -The following example shows how to create an {infer} model called +The following example shows how to create an {infer} endpoint called `cohere_embeddings` to perform a `text_embedding` task type. [source,console] @@ -268,7 +268,7 @@ PUT _inference/text_embedding/cohere-embeddings [[inference-example-e5]] ===== E5 via the elasticsearch service -The following example shows how to create an {infer} model called +The following example shows how to create an {infer} endpoint called `my-e5-model` to perform a `text_embedding` task type. [source,console] @@ -293,7 +293,7 @@ further details, refer to the {ml-docs}/ml-nlp-e5.html[E5 model documentation]. [[inference-example-elser]] ===== ELSER service -The following example shows how to create an {infer} model called +The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type. [source,console] @@ -315,7 +315,7 @@ Example response: [source,console-result] ------------------------------------------------------------ { - "model_id": "my-elser-model", + "inference_id": "my-elser-model", "task_type": "sparse_embedding", "service": "elser", "service_settings": { @@ -332,7 +332,7 @@ Example response: [[inference-example-hugging-face]] ===== Hugging Face service -The following example shows how to create an {infer} model called +The following example shows how to create an {infer} endpoint called `hugging-face_embeddings` to perform a `text_embedding` task type. [source,console] @@ -362,7 +362,7 @@ after the endpoint initialization has been finished. [[inference-example-eland]] ===== Models uploaded by Eland via the elasticsearch service -The following example shows how to create an {infer} model called +The following example shows how to create an {infer} endpoint called `my-msmarco-minilm-model` to perform a `text_embedding` task type. [source,console] @@ -387,7 +387,7 @@ been [[inference-example-openai]] ===== OpenAI service -The following example shows how to create an {infer} model called +The following example shows how to create an {infer} endpoint called `openai_embeddings` to perform a `text_embedding` task type. [source,console] diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index 97a37e34eb116..b5619f8dda7b9 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -23,9 +23,9 @@ include::{es-repo-dir}/tab-widgets/inference-api/infer-api-requirements-widget.a [discrete] [[infer-text-embedding-task]] -==== Create the inference task +==== Create an inference endpoint -Create the {infer} task by using the <>: +Create an {infer} endpoint by using the <>: include::{es-repo-dir}/tab-widgets/inference-api/infer-api-task-widget.asciidoc[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc index a5a1910e8f8ef..39f37f407926e 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc @@ -28,8 +28,8 @@ PUT _ingest/pipeline/cohere_embeddings ] } -------------------------------------------------- -<1> The name of the inference configuration you created by using the -<>. +<1> The name of the inference endpoint you created by using the +<>, it's referred to as `inference_id` in that step. <2> Configuration object that defines the `input_field` for the {infer} process and the `output_field` that will contain the {infer} results. @@ -55,8 +55,8 @@ PUT _ingest/pipeline/openai_embeddings ] } -------------------------------------------------- -<1> The name of the inference configuration you created by using the -<>. +<1> The name of the inference endpoint you created by using the +<>, it's referred to as `inference_id` in that step. <2> Configuration object that defines the `input_field` for the {infer} process and the `output_field` that will contain the {infer} results. diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc index 1e8470471491f..843c351648c63 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc @@ -8,7 +8,7 @@ GET cohere-embeddings/_search "field": "content_embedding", "query_vector_builder": { "text_embedding": { - "model_id": "cohere_embeddings", + "inference_id": "cohere_embeddings", "model_text": "Muscles in human body" } }, @@ -83,7 +83,7 @@ GET openai-embeddings/_search "field": "content_embedding", "query_vector_builder": { "text_embedding": { - "model_id": "openai_embeddings", + "inference_id": "openai_embeddings", "model_text": "Calculate fuel cost" } }, diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc index 7c6e750138c1e..dea7511f74566 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc @@ -13,7 +13,8 @@ PUT _inference/text_embedding/cohere_embeddings <1> } ------------------------------------------------------------ // TEST[skip:TBD] -<1> The task type is `text_embedding` in the path. +<1> The task type is `text_embedding` in the path and the `inference_id` which +is the unique identifier of the {infer} endpoint is `cohere_embeddings`. <2> The API key of your Cohere account. You can find your API keys in your Cohere dashboard under the https://dashboard.cohere.com/api-keys[API keys section]. You need to provide @@ -46,7 +47,8 @@ PUT _inference/text_embedding/openai_embeddings <1> } ------------------------------------------------------------ // TEST[skip:TBD] -<1> The task type is `text_embedding` in the path. +<1> The task type is `text_embedding` in the path and the `inference_id` which +is the unique identifier of the {infer} endpoint is `openai_embeddings`. <2> The API key of your OpenAI account. You can find your OpenAI API keys in your OpenAI account under the https://platform.openai.com/api-keys[API keys section]. You need to provide From b39b3731a7923f38eddbe815e4f2c777261cd7d1 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 26 Mar 2024 08:39:39 +0100 Subject: [PATCH 54/79] Port krb5kdc to test container and rework hdfs handling (#106228) This ports our krb5kdc test fixture to test container and reworks hdfs handling to also be based on test containers. The yaml rest tests that are using hdfs required introducing variable substitution in yamlresttestparser handling. --- .../shadow/XmlClassRelocationTransformer.java | 142 ++++++ .../test/rest/InternalJavaRestTestPlugin.java | 10 +- .../test/rest/InternalYamlRestTestPlugin.java | 9 +- plugins/repository-hdfs/build.gradle | 479 ++++++------------ .../AbstractHaHdfsFailoverTestSuiteIT.java | 75 +++ .../hdfs/HaHdfsFailoverTestSuiteIT.java | 280 +--------- .../hdfs/SecureHaHdfsFailoverTestSuiteIT.java | 55 ++ .../hdfs/HdfsBlobStoreContainerTests.java | 1 + .../hdfs/HdfsBlobStoreRepositoryTests.java | 1 + .../hdfs/HdfsRepositoryTests.java | 1 + .../repositories/hdfs/HdfsTests.java | 1 + .../RepositoryHdfsClientYamlTestSuiteIT.java | 32 +- ...reRepositoryHdfsClientYamlTestSuiteIT.java | 62 +++ settings.gradle | 3 +- test/fixtures/hdfs-fixture/build.gradle | 90 ++++ .../hdfs/HdfsClientThreadLeakFilter.java | 9 +- .../test/fixtures/hdfs/HdfsFixture.java | 438 ++++++++++++++++ .../main/resources/readonly-repository.tar.gz | Bin test/fixtures/hdfs2-fixture/build.gradle | 13 - .../src/main/java/hdfs/MiniHDFS.java | 175 ------- test/fixtures/hdfs3-fixture/build.gradle | 13 - .../src/main/java/hdfs/MiniHDFS.java | 176 ------- .../main/resources/readonly-repository.tar.gz | Bin 1314 -> 0 bytes test/fixtures/krb5kdc-fixture/build.gradle | 64 ++- .../krb5kdc-fixture/docker-compose.yml | 32 -- .../fixtures/krb5kdc/Krb5kDcContainer.java | 172 +++++++ .../src/main/resources/provision/hdfs.sh | 2 +- .../DockerEnvironmentAwareTestContainer.java | 8 +- .../cluster/util/resource/FileResource.java | 7 +- .../test/cluster/util/resource/Resource.java | 4 + .../rest/yaml/ESClientYamlSuiteTestCase.java | 28 +- .../ParameterizableYamlXContentParser.java | 295 +++++++++++ .../yaml/section/ClientYamlTestSuite.java | 25 +- .../searchable-snapshots/qa/hdfs/build.gradle | 172 +------ .../hdfs/HdfsSearchableSnapshotsIT.java | 46 +- .../hdfs/SecureHdfsSearchableSnapshotsIT.java | 67 +++ .../qa/hdfs/build.gradle | 168 +----- .../AbstractHdfsSnapshotRepoTestKitIT.java | 38 ++ .../testkit/HdfsSnapshotRepoTestKitIT.java | 50 +- .../SecureHdfsSnapshotRepoTestKitIT.java | 63 +++ x-pack/qa/kerberos-tests/build.gradle | 44 +- .../kerberos/KerberosAuthenticationIT.java | 41 +- 42 files changed, 1912 insertions(+), 1479 deletions(-) create mode 100644 build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/shadow/XmlClassRelocationTransformer.java create mode 100644 plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/AbstractHaHdfsFailoverTestSuiteIT.java create mode 100644 plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/SecureHaHdfsFailoverTestSuiteIT.java create mode 100644 plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/SecureRepositoryHdfsClientYamlTestSuiteIT.java create mode 100644 test/fixtures/hdfs-fixture/build.gradle rename {plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories => test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures}/hdfs/HdfsClientThreadLeakFilter.java (77%) create mode 100644 test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures/hdfs/HdfsFixture.java rename test/fixtures/{hdfs2-fixture => hdfs-fixture}/src/main/resources/readonly-repository.tar.gz (100%) delete mode 100644 test/fixtures/hdfs2-fixture/build.gradle delete mode 100644 test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java delete mode 100644 test/fixtures/hdfs3-fixture/build.gradle delete mode 100644 test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java delete mode 100644 test/fixtures/hdfs3-fixture/src/main/resources/readonly-repository.tar.gz delete mode 100644 test/fixtures/krb5kdc-fixture/docker-compose.yml create mode 100644 test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java create mode 100644 test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ParameterizableYamlXContentParser.java create mode 100644 x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/SecureHdfsSearchableSnapshotsIT.java create mode 100644 x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java create mode 100644 x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/shadow/XmlClassRelocationTransformer.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/shadow/XmlClassRelocationTransformer.java new file mode 100644 index 0000000000000..b365142282785 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/shadow/XmlClassRelocationTransformer.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.shadow; + +import com.github.jengelman.gradle.plugins.shadow.ShadowStats; +import com.github.jengelman.gradle.plugins.shadow.relocation.RelocateClassContext; +import com.github.jengelman.gradle.plugins.shadow.relocation.Relocator; +import com.github.jengelman.gradle.plugins.shadow.transformers.Transformer; +import com.github.jengelman.gradle.plugins.shadow.transformers.TransformerContext; + +import org.apache.commons.io.IOUtils; +import org.apache.tools.zip.ZipEntry; +import org.apache.tools.zip.ZipOutputStream; +import org.gradle.api.file.FileTreeElement; +import org.w3c.dom.Document; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import java.io.BufferedInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.List; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +public class XmlClassRelocationTransformer implements Transformer { + + boolean hasTransformedResource = false; + + private Document doc; + + private String resource; + + @Override + public boolean canTransformResource(FileTreeElement element) { + String path = element.getRelativePath().getPathString(); + if (resource != null && resource.equals(path)) { + return true; + } + return false; + } + + @Override + public void transform(TransformerContext context) { + try { + BufferedInputStream bis = new BufferedInputStream(context.getIs()); + DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); + DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); + doc = dBuilder.parse(bis); + doc.getDocumentElement().normalize(); + Node root = doc.getDocumentElement(); + walkThroughNodes(root, context); + if (hasTransformedResource == false) { + this.doc = null; + } + } catch (Exception e) { + throw new RuntimeException("Error parsing xml file in " + context.getIs(), e); + } + } + + private static String getRelocatedClass(String className, TransformerContext context) { + List relocators = context.getRelocators(); + ShadowStats stats = context.getStats(); + if (className != null && className.length() > 0 && relocators != null) { + for (Relocator relocator : relocators) { + if (relocator.canRelocateClass(className)) { + RelocateClassContext relocateClassContext = new RelocateClassContext(className, stats); + return relocator.relocateClass(relocateClassContext); + } + } + } + + return className; + } + + private void walkThroughNodes(Node node, TransformerContext context) { + if (node.getNodeType() == Node.TEXT_NODE) { + String nodeValue = node.getNodeValue(); + if (nodeValue.isBlank() == false) { + String relocatedClass = getRelocatedClass(nodeValue, context); + if (relocatedClass.equals(nodeValue) == false) { + node.setNodeValue(relocatedClass); + hasTransformedResource = true; + } + } + } + NodeList nodeList = node.getChildNodes(); + for (int i = 0; i < nodeList.getLength(); i++) { + Node currentNode = nodeList.item(i); + walkThroughNodes(currentNode, context); + } + } + + @Override + public boolean hasTransformedResource() { + return hasTransformedResource; + } + + @Override + public void modifyOutputStream(ZipOutputStream os, boolean preserveFileTimestamps) { + ZipEntry entry = new ZipEntry(resource); + entry.setTime(TransformerContext.getEntryTimestamp(preserveFileTimestamps, entry.getTime())); + + try { + // Write the content back to the XML file + TransformerFactory transformerFactory = TransformerFactory.newInstance(); + DOMSource source = new DOMSource(doc); + + // Result stream will be a ByteArrayOutputStream + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + StreamResult result = new StreamResult(baos); + // Do the transformation and serialization + transformerFactory.newTransformer().transform(source, result); + os.putNextEntry(entry); + IOUtils.write(baos.toByteArray(), os); + os.closeEntry(); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (TransformerException e) { + throw new RuntimeException(e); + } finally { + hasTransformedResource = false; + doc = null; + } + } + + @Override + public String getName() { + return getClass().getSimpleName(); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java index f3950c8646292..1787ebcccf3a9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.elasticsearch.gradle.internal.test.RestIntegTestTask; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -40,13 +40,7 @@ public void apply(Project project) { } // setup the javaRestTest task - // we use a StandloneRestIntegTestTask here so that the conventions of RestTestBasePlugin don't create a test cluster - TaskProvider testTask = registerTestTask( - project, - javaTestSourceSet, - SOURCE_SET_NAME, - StandaloneRestIntegTestTask.class - ); + TaskProvider testTask = registerTestTask(project, javaTestSourceSet, SOURCE_SET_NAME, RestIntegTestTask.class); project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure(check -> check.dependsOn(testTask)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java index 66d3507f7f9b3..ba40998e2b02a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.elasticsearch.gradle.internal.test.RestIntegTestTask; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -36,12 +36,7 @@ public void apply(Project project) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet yamlTestSourceSet = sourceSets.create(SOURCE_SET_NAME); - TaskProvider testTask = registerTestTask( - project, - yamlTestSourceSet, - SOURCE_SET_NAME, - StandaloneRestIntegTestTask.class - ); + TaskProvider testTask = registerTestTask(project, yamlTestSourceSet, SOURCE_SET_NAME, RestIntegTestTask.class); project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure(check -> check.dependsOn(testTask)); diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 57f4fc9a04ecd..beaf8723df4d5 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -6,376 +6,179 @@ * Side Public License, v 1. */ -import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.util.HdfsUtils -import java.nio.file.Path -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE - -apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.legacy-java-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' esplugin { - description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' - classname 'org.elasticsearch.repositories.hdfs.HdfsPlugin' + description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' + classname 'org.elasticsearch.repositories.hdfs.HdfsPlugin' } versions << [ - 'hadoop': '3.3.3' + 'hadoop': '3.3.3' ] -final int minTestedHadoopVersion = 2; -final int maxTestedHadoopVersion = 3; - -testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "hdfs" - configurations { - krb5Config - krb5Keytabs + hdfsFixture2 + hdfsFixture3 } -dependencies { - api project(path: 'hadoop-client-api', configuration: 'shadow') - if (isEclipse) { - /* - * Eclipse can't pick up the shadow dependency so we point it at *something* - * so it can compile things. - */ - api project(path: 'hadoop-client-api') - } - runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop}" - implementation "org.apache.hadoop:hadoop-hdfs:${versions.hadoop}" - api "com.google.protobuf:protobuf-java:${versions.protobuf}" - api "commons-logging:commons-logging:${versions.commonslogging}" - api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" - api 'commons-cli:commons-cli:1.2' - api "commons-codec:commons-codec:${versions.commonscodec}" - api 'commons-io:commons-io:2.8.0' - api 'org.apache.commons:commons-lang3:3.11' - api 'javax.servlet:javax.servlet-api:3.1.0' - api "org.slf4j:slf4j-api:${versions.slf4j}" - runtimeOnly "org.slf4j:slf4j-nop:${versions.slf4j}" - // runtimeOnly("org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}") https://github.com/elastic/elasticsearch/issues/93714 - krb5Keytabs project(path: ':test:fixtures:krb5kdc-fixture', configuration: 'krb5KeytabsHdfsDir') - krb5Config project(path: ':test:fixtures:krb5kdc-fixture', configuration: 'krb5ConfHdfsFile') +dependencies { + api project(path: 'hadoop-client-api', configuration: 'shadow') + if (isEclipse) { + /* + * Eclipse can't pick up the shadow dependency so we point it at *something* + * so it can compile things. + */ + api project(path: 'hadoop-client-api') + } + runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop}" + implementation "org.apache.hadoop:hadoop-hdfs:${versions.hadoop}" + api "com.google.protobuf:protobuf-java:${versions.protobuf}" + api "commons-logging:commons-logging:${versions.commonslogging}" + api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" + api 'commons-cli:commons-cli:1.2' + api "commons-codec:commons-codec:${versions.commonscodec}" + api 'commons-io:commons-io:2.8.0' + api 'org.apache.commons:commons-lang3:3.11' + api 'javax.servlet:javax.servlet-api:3.1.0' + api "org.slf4j:slf4j-api:${versions.slf4j}" + runtimeOnly "org.slf4j:slf4j-nop:${versions.slf4j}" + // https://github.com/elastic/elasticsearch/issues/93714 + // runtimeOnly("org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}") + + testImplementation(project(':test:fixtures:hdfs-fixture')) + javaRestTestCompileOnly(project(':test:fixtures:hdfs-fixture')) + + javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') + javaRestTestImplementation "org.slf4j:slf4j-api:${versions.slf4j}" + javaRestTestRuntimeOnly "com.google.guava:guava:16.0.1" + javaRestTestRuntimeOnly "commons-cli:commons-cli:1.2" + javaRestTestRuntimeOnly "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" + + yamlRestTestCompileOnly(project(':test:fixtures:hdfs-fixture')) + yamlRestTestImplementation project(':test:fixtures:krb5kdc-fixture') + yamlRestTestImplementation "org.slf4j:slf4j-api:${versions.slf4j}" + yamlRestTestRuntimeOnly "com.google.guava:guava:16.0.1" + yamlRestTestRuntimeOnly "commons-cli:commons-cli:1.2" + yamlRestTestRuntimeOnly "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" + + hdfsFixture2 project(path: ':test:fixtures:hdfs-fixture', configuration: 'shadowedHdfs2') + hdfsFixture3 project(path: ':test:fixtures:hdfs-fixture', configuration: 'shadow') } restResources { - restApi { - include '_common', 'cluster', 'nodes', 'indices', 'index', 'snapshot' - } -} - -normalization { - runtimeClasspath { - // ignore generated keytab files for the purposes of build avoidance - ignore '*.keytab' - // ignore fixture ports file which is on the classpath primarily to pacify the security manager - ignore 'ports' - } + restApi { + include '_common', 'cluster', 'nodes', 'indices', 'index', 'snapshot' + } } tasks.named("dependencyLicenses").configure { - mapping from: /hadoop-.*/, to: 'hadoop' + mapping from: /hadoop-.*/, to: 'hadoop' } -// TODO work that into the java rest test plugin when combined with java plugin -sourceSets { - javaRestTest { - compileClasspath = compileClasspath + main.compileClasspath - runtimeClasspath = runtimeClasspath + main.runtimeClasspath + files("src/main/plugin-metadata") - } +tasks.withType(RestIntegTestTask).configureEach { + usesDefaultDistribution() + jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } tasks.named('javaRestTest').configure { - enabled = false + classpath = sourceSets.javaRestTest.runtimeClasspath + configurations.hdfsFixture3 } -tasks.named('yamlRestTest').configure { - enabled = false +tasks.register("javaRestTestHdfs2", RestIntegTestTask) { + description = "Runs rest tests against an elasticsearch cluster with HDFS version 2" + testClassesDirs = sourceSets.javaRestTest.output.classesDirs + classpath = sourceSets.javaRestTest.runtimeClasspath + configurations.hdfsFixture2 } -String realm = "BUILD.ELASTIC.CO" -String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs") - -// Determine HDFS Fixture compatibility for the current build environment. -ext.fixtureSupported = project.provider(() -> HdfsUtils.isHdfsFixtureSupported(project)) - -for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoopVersion; hadoopVersion++) { - final int hadoopVer = hadoopVersion - - configurations.create("hdfs" + hadoopVersion + "Fixture") - dependencies.add("hdfs" + hadoopVersion + "Fixture", project(':test:fixtures:hdfs' + hadoopVersion + '-fixture')) - - for (String fixtureName : ['hdfs' + hadoopVersion + 'Fixture', 'haHdfs' + hadoopVersion + 'Fixture', 'secureHdfs' + hadoopVersion + 'Fixture', 'secureHaHdfs' + hadoopVersion + 'Fixture']) { - project.tasks.register(fixtureName, org.elasticsearch.gradle.internal.test.AntFixture) { - executable = "${BuildParams.runtimeJavaHome}/bin/java" - dependsOn project.configurations.getByName("hdfs" + hadoopVer + "Fixture"), project.configurations.krb5Config, project.configurations.krb5Keytabs - env 'CLASSPATH', "${-> project.configurations.getByName("hdfs" + hadoopVer + "Fixture").asPath}" - - maxWaitInSeconds 60 - BuildParams.withFipsEnabledOnly(it) - waitCondition = { fixture, ant -> - // the hdfs.MiniHDFS fixture writes the ports file when - // it's ready, so we can just wait for the file to exist - return fixture.portsFile.exists() - } - final List miniHDFSArgs = [] - - // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options - if (name.startsWith('secure')) { - miniHDFSArgs.addAll(["--add-exports", "java.security.jgss/sun.security.krb5=ALL-UNNAMED"]) - miniHDFSArgs.add("-Djava.security.krb5.conf=${project.configurations.krb5Config.getSingleFile().getPath()}") - miniHDFSArgs.add("-Dhdfs.config.port=" + getSecureNamenodePortForVersion(hadoopVer)) - } else { - miniHDFSArgs.add("-Dhdfs.config.port=" + getNonSecureNamenodePortForVersion(hadoopVer)) - } - // If it's an HA fixture, set a nameservice to use in the JVM options - if (name.startsWith('haHdfs') || name.startsWith('secureHaHdfs')) { - miniHDFSArgs.add("-Dha-nameservice=ha-hdfs") - } - - // Common options - miniHDFSArgs.add('hdfs.MiniHDFS') - miniHDFSArgs.add(baseDir) - - // If it's a secure fixture, then set the principal name and keytab locations to use for auth. - if (name.startsWith('secure')) { - miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}") - miniHDFSArgs.add(new File(project.configurations.krb5Keytabs.singleFile, "hdfs_hdfs.build.elastic.co.keytab").getPath()) - } - - args miniHDFSArgs.toArray() - } - } - - for (String integTestTaskName : ['javaRestTest' + hadoopVersion, 'javaRestTestSecure' + hadoopVersion]) { - tasks.register(integTestTaskName, RestIntegTestTask) { - description = "Runs rest tests against an elasticsearch cluster with HDFS" + hadoopVer + "-HA" - - if (name.contains("Secure")) { - dependsOn "secureHaHdfs" + hadoopVer + "Fixture" - } - - File portsFileDir = file("${workingDir}/hdfs" + hadoopVer + "Fixture") - Path portsFile = name.contains("Secure") ? - buildDir.toPath() - .resolve("fixtures") - .resolve("secureHaHdfs" + hadoopVer + "Fixture") - .resolve("ports") : - buildDir.toPath() - .resolve("fixtures") - .resolve("haHdfs" + hadoopVer + "Fixture") - .resolve("ports") - nonInputProperties.systemProperty "test.hdfs-fixture.ports", file("$portsFileDir/ports") - - // Copy ports file to separate location which is placed on the test classpath - doFirst { - mkdir(portsFileDir) - copy { - from portsFile - into portsFileDir - } - } - testClassesDirs = sourceSets.javaRestTest.output.classesDirs - // Set the keytab files in the classpath so that we can access them from test code without the security manager - // freaking out. - classpath = sourceSets.javaRestTest.runtimeClasspath + - configurations.krb5Keytabs + - files(portsFileDir) - } - } - - for (String integTestTaskName : ['yamlRestTest' + hadoopVersion, 'yamlRestTestSecure' + hadoopVersion]) { - tasks.register(integTestTaskName, RestIntegTestTask) { - description = "Runs rest tests against an elasticsearch cluster with HDFS" + hadoopVer - - if (name.contains("Secure")) { - dependsOn "secureHdfs" + hadoopVer + "Fixture" - } - - testClassesDirs = sourceSets.yamlRestTest.output.classesDirs - classpath = sourceSets.yamlRestTest.runtimeClasspath - } - } - - def processHadoopTestResources = tasks.register("processHadoop" + hadoopVer + "TestResources", Copy) - processHadoopTestResources.configure { - Map expansions = [ - 'hdfs_port' : getNonSecureNamenodePortForVersion(hadoopVer), - 'secure_hdfs_port': getSecureNamenodePortForVersion(hadoopVer), - ] - inputs.properties(expansions) - filter("tokens": expansions.collectEntries { k, v -> [k, v.toString()]}, ReplaceTokens.class) - it.into("build/resources/yamlRestTest/rest-api-spec/test") - it.into("hdfs_repository_" + hadoopVer) { - from "src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository" - } - it.into("secure_hdfs_repository_" + hadoopVer) { - from "src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository" - } - } - tasks.named("processYamlRestTestResources").configure { - dependsOn(processHadoopTestResources) - } - - if (fixtureSupported.get()) { - // Check depends on the HA test. Already depends on the standard test. - tasks.named("check").configure { - dependsOn("javaRestTest" + hadoopVer) - } - - // Both standard and HA tests depend on their respective HDFS fixtures - tasks.named("yamlRestTest" + hadoopVer).configure { - dependsOn "hdfs" + hadoopVer + "Fixture" - // The normal test runner only runs the standard hdfs rest tests - systemProperty 'tests.rest.suite', 'hdfs_repository_' + hadoopVer - } - tasks.named("javaRestTest" + hadoopVer).configure { - dependsOn "haHdfs" + hadoopVer + "Fixture" - } - } else { - // The normal integration test runner will just test that the plugin loads - tasks.named("yamlRestTest" + hadoopVer).configure { - systemProperty 'tests.rest.suite', 'hdfs_repository_' + hadoopVer + '/10_basic' - } - // HA fixture is unsupported. Don't run them. - tasks.named("javaRestTestSecure" + hadoopVer).configure { - enabled = false - } - } - - tasks.named("check").configure { - dependsOn("yamlRestTest" + hadoopVer, "yamlRestTestSecure" + hadoopVer, "javaRestTestSecure" + hadoopVer) - } - - // Run just the secure hdfs rest test suite. - tasks.named("yamlRestTestSecure" + hadoopVer).configure { - systemProperty 'tests.rest.suite', 'secure_hdfs_repository_' + hadoopVer - } -} - - -def getSecureNamenodePortForVersion(hadoopVersion) { - return 10002 - (2 * hadoopVersion) +tasks.named('yamlRestTest').configure { + classpath = sourceSets.yamlRestTest.runtimeClasspath + configurations.hdfsFixture2 } -def getNonSecureNamenodePortForVersion(hadoopVersion) { - return 10003 - (2 * hadoopVersion) +tasks.register("yamlRestTestHdfs2", RestIntegTestTask) { + description = "Runs yaml rest tests against an elasticsearch cluster with HDFS version 2" + testClassesDirs = sourceSets.yamlRestTest.output.classesDirs + classpath = sourceSets.yamlRestTest.runtimeClasspath + configurations.hdfsFixture2 } -Set disabledIntegTestTaskNames = [] - -tasks.withType(RestIntegTestTask).configureEach { testTask -> - if (disabledIntegTestTaskNames.contains(name)) { - enabled = false; - } - BuildParams.withFipsEnabledOnly(testTask) - - if (name.contains("Secure")) { - if (disabledIntegTestTaskNames.contains(name) == false) { - nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" - nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" - nonInputProperties.systemProperty "java.security.krb5.conf", "${project.configurations.krb5Config.getSingleFile().getPath()}" - nonInputProperties.systemProperty( - "test.krb5.keytab.hdfs", - new File(project.configurations.krb5Keytabs.singleFile, "hdfs_hdfs.build.elastic.co.keytab").getPath() - ) - } - } - - testClusters.matching { it.name == testTask.name }.configureEach { - if (testTask.name.contains("Secure")) { - systemProperty "java.security.krb5.conf", { configurations.krb5Config.singleFile.getPath() }, IGNORE_VALUE - extraConfigFile( - "repository-hdfs/krb5.keytab", - new File(project.configurations.krb5Keytabs.singleFile, "elasticsearch.keytab"), - IGNORE_VALUE - ) - } - } +tasks.named("check").configure { + dependsOn(tasks.withType(RestIntegTestTask)) } - tasks.named("thirdPartyAudit").configure { - ignoreMissingClasses() - ignoreViolations( - // internal java api: sun.misc.Unsafe - 'com.google.protobuf.MessageSchema', - 'com.google.protobuf.UnsafeUtil', - 'com.google.protobuf.UnsafeUtil$1', - 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', - 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', - 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'com.google.protobuf.UnsafeUtil$MemoryAccessor', - 'org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture$UnsafeAtomicHelper', - 'org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture$UnsafeAtomicHelper$1', - 'org.apache.hadoop.shaded.com.google.common.cache.Striped64', - 'org.apache.hadoop.shaded.com.google.common.cache.Striped64$1', - 'org.apache.hadoop.shaded.com.google.common.cache.Striped64$Cell', - 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', - 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', - 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', - 'org.apache.hadoop.shaded.com.google.common.hash.Striped64', - 'org.apache.hadoop.shaded.com.google.common.hash.Striped64$1', - 'org.apache.hadoop.shaded.com.google.common.hash.Striped64$Cell', - 'org.apache.hadoop.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'org.apache.hadoop.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'org.apache.hadoop.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', - 'org.apache.hadoop.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeBooleanField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeByteField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCachedField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCharField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCustomEncodedField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeDoubleField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeFloatField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeIntField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeLongField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeObjectField', - 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeShortField', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64$1', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64$Cell', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64$1', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64$Cell', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', - 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', - 'org.apache.hadoop.shaded.org.xbill.DNS.spi.DNSJavaNameServiceDescriptor', - 'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64', - 'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64$1', - 'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64$Cell', - 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', - 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', - 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', - 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64', - 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$1', - 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$Cell', - 'org.apache.hadoop.thirdparty.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'org.apache.hadoop.thirdparty.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', - 'org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', - 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil', - 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$1', - 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$MemoryAccessor' - ) -} - -tasks.named('resolveAllDependencies') { - // This avoids spinning up the test fixture when downloading all dependencies - configs = project.configurations - [project.configurations.krb5Config] + ignoreMissingClasses() + ignoreViolations( + // internal java api: sun.misc.Unsafe + 'com.google.protobuf.MessageSchema', + 'com.google.protobuf.UnsafeUtil', + 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', + 'org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture$UnsafeAtomicHelper', + 'org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture$UnsafeAtomicHelper$1', + 'org.apache.hadoop.shaded.com.google.common.cache.Striped64', + 'org.apache.hadoop.shaded.com.google.common.cache.Striped64$1', + 'org.apache.hadoop.shaded.com.google.common.cache.Striped64$Cell', + 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'org.apache.hadoop.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', + 'org.apache.hadoop.shaded.com.google.common.hash.Striped64', + 'org.apache.hadoop.shaded.com.google.common.hash.Striped64$1', + 'org.apache.hadoop.shaded.com.google.common.hash.Striped64$Cell', + 'org.apache.hadoop.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'org.apache.hadoop.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'org.apache.hadoop.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'org.apache.hadoop.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeBooleanField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeByteField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCachedField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCharField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCustomEncodedField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeDoubleField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeFloatField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeIntField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeLongField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeObjectField', + 'org.apache.hadoop.shaded.org.apache.avro.reflect.FieldAccessUnsafe$UnsafeShortField', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64$1', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.cache.Striped64$Cell', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64$1', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.hash.Striped64$Cell', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'org.apache.hadoop.shaded.org.xbill.DNS.spi.DNSJavaNameServiceDescriptor', + 'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64', + 'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64$1', + 'org.apache.hadoop.thirdparty.com.google.common.cache.Striped64$Cell', + 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', + 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64', + 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$1', + 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$Cell', + 'org.apache.hadoop.thirdparty.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'org.apache.hadoop.thirdparty.com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil', + 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$1', + 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$MemoryAccessor' + ) } diff --git a/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/AbstractHaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/AbstractHaHdfsFailoverTestSuiteIT.java new file mode 100644 index 0000000000000..d14cff30caef3 --- /dev/null +++ b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/AbstractHaHdfsFailoverTestSuiteIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.hdfs; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Assert; + +import java.io.IOException; + +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class, TestContainersThreadFilter.class }) +abstract class AbstractHaHdfsFailoverTestSuiteIT extends ESRestTestCase { + + abstract HdfsFixture getHdfsFixture(); + + String securityCredentials() { + return ""; + } + + public void testHAFailoverWithRepository() throws Exception { + getHdfsFixture().setupHA(); + + RestClient client = client(); + + createRepository(client); + + // Get repository + Response response = client.performRequest(new Request("GET", "/_snapshot/hdfs_ha_repo_read/_all")); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + + // Failover the namenode to the second. + getHdfsFixture().failoverHDFS("nn1", "nn2"); + safeSleep(2000); + // Get repository again + response = client.performRequest(new Request("GET", "/_snapshot/hdfs_ha_repo_read/_all")); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + } + + private void createRepository(RestClient client) throws IOException { + Request request = new Request("PUT", "/_snapshot/hdfs_ha_repo_read"); + request.setJsonEntity(Strings.format(""" + { + "type": "hdfs", + "settings": { + "uri": "hdfs://ha-hdfs/", + "path": "/user/elasticsearch/existing/readonly-repository", + "readonly": "true", + %s + "conf.dfs.nameservices": "ha-hdfs", + "conf.dfs.ha.namenodes.ha-hdfs": "nn1,nn2", + "conf.dfs.namenode.rpc-address.ha-hdfs.nn1": "localhost:%s", + "conf.dfs.namenode.rpc-address.ha-hdfs.nn2": "localhost:%s", + "conf.dfs.client.failover.proxy.provider.ha-hdfs":\ + "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" + } + }""", securityCredentials(), getHdfsFixture().getPort(0), getHdfsFixture().getPort(1))); + Response response = client.performRequest(request); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + } + +} diff --git a/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index cb8c4d65d88d6..7bd15ad64582f 100644 --- a/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -8,271 +8,41 @@ package org.elasticsearch.repositories.hdfs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ha.BadFencingConfigurationException; -import org.apache.hadoop.ha.HAServiceProtocol; -import org.apache.hadoop.ha.HAServiceTarget; -import org.apache.hadoop.ha.NodeFencer; -import org.apache.hadoop.ha.ZKFCProtocol; -import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.tools.DFSHAAdmin; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.Strings; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.junit.Assert; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.List; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; /** * Integration test that runs against an HA-Enabled HDFS instance */ -public class HaHdfsFailoverTestSuiteIT extends ESRestTestCase { +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) +public class HaHdfsFailoverTestSuiteIT extends AbstractHaHdfsFailoverTestSuiteIT { - public void testHAFailoverWithRepository() throws Exception { - RestClient client = client(); + public static HdfsFixture hdfsFixture = new HdfsFixture().withHAService("ha-hdfs"); - String esKerberosPrincipal = System.getProperty("test.krb5.principal.es"); - String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs"); - String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs"); - String ports = System.getProperty("test.hdfs-fixture.ports"); - String nn1Port = "10001"; - String nn2Port = "10002"; - if (ports.length() > 0) { - final Path path = PathUtils.get(ports); - final List lines = AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { - return Files.readAllLines(path); - }); - nn1Port = lines.get(0); - nn2Port = lines.get(1); - } - boolean securityEnabled = hdfsKerberosPrincipal != null; + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .build(); - Configuration hdfsConfiguration = new Configuration(); - hdfsConfiguration.set("dfs.nameservices", "ha-hdfs"); - hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2"); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port); - hdfsConfiguration.set( - "dfs.client.failover.proxy.provider.ha-hdfs", - "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" - ); + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(hdfsFixture).around(cluster); - AccessController.doPrivileged((PrivilegedExceptionAction) () -> { - if (securityEnabled) { - // ensure that keytab exists - Path kt = PathUtils.get(kerberosKeytabLocation); - if (Files.exists(kt) == false) { - throw new IllegalStateException("Could not locate keytab at " + kerberosKeytabLocation); - } - if (Files.isReadable(kt) != true) { - throw new IllegalStateException("Could not read keytab at " + kerberosKeytabLocation); - } - logger.info("Keytab Length: " + Files.readAllBytes(kt).length); - - // set principal names - hdfsConfiguration.set("dfs.namenode.kerberos.principal", hdfsKerberosPrincipal); - hdfsConfiguration.set("dfs.datanode.kerberos.principal", hdfsKerberosPrincipal); - hdfsConfiguration.set("dfs.data.transfer.protection", "authentication"); - - SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, hdfsConfiguration); - UserGroupInformation.setConfiguration(hdfsConfiguration); - UserGroupInformation.loginUserFromKeytab(hdfsKerberosPrincipal, kerberosKeytabLocation); - } else { - SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE, hdfsConfiguration); - UserGroupInformation.setConfiguration(hdfsConfiguration); - UserGroupInformation.getCurrentUser(); - } - return null; - }); - - // Create repository - { - Request request = new Request("PUT", "/_snapshot/hdfs_ha_repo_read"); - request.setJsonEntity(Strings.format(""" - { - "type": "hdfs", - "settings": { - "uri": "hdfs://ha-hdfs/", - "path": "/user/elasticsearch/existing/readonly-repository", - "readonly": "true", - %s - "conf.dfs.nameservices": "ha-hdfs", - "conf.dfs.ha.namenodes.ha-hdfs": "nn1,nn2", - "conf.dfs.namenode.rpc-address.ha-hdfs.nn1": "localhost:%s", - "conf.dfs.namenode.rpc-address.ha-hdfs.nn2": "localhost:%s", - "conf.dfs.client.failover.proxy.provider.ha-hdfs": \ - "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" - } - }""", securityCredentials(securityEnabled, esKerberosPrincipal), nn1Port, nn2Port)); - Response response = client.performRequest(request); - - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - } - - // Get repository - { - Response response = client.performRequest(new Request("GET", "/_snapshot/hdfs_ha_repo_read/_all")); - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - } - - // Failover the namenode to the second. - failoverHDFS("nn1", "nn2", hdfsConfiguration); - - // Get repository again - { - Response response = client.performRequest(new Request("GET", "/_snapshot/hdfs_ha_repo_read/_all")); - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - } - } - - private String securityCredentials(boolean securityEnabled, String kerberosPrincipal) { - if (securityEnabled) { - return String.format(java.util.Locale.ROOT, """ - "security.principal": "%s","conf.dfs.data.transfer.protection": "authentication",""", kerberosPrincipal); - } else { - return ""; - } - } - - /** - * Wraps an HAServiceTarget, keeping track of any HAServiceProtocol proxies it generates in order - * to close them at the end of the test lifecycle. - */ - private static class CloseableHAServiceTarget extends HAServiceTarget { - private final HAServiceTarget delegate; - private final List protocolsToClose = new ArrayList<>(); - - CloseableHAServiceTarget(HAServiceTarget delegate) { - this.delegate = delegate; - } - - @Override - public InetSocketAddress getAddress() { - return delegate.getAddress(); - } - - @Override - public InetSocketAddress getHealthMonitorAddress() { - return delegate.getHealthMonitorAddress(); - } - - @Override - public InetSocketAddress getZKFCAddress() { - return delegate.getZKFCAddress(); - } - - @Override - public NodeFencer getFencer() { - return delegate.getFencer(); - } - - @Override - public void checkFencingConfigured() throws BadFencingConfigurationException { - delegate.checkFencingConfigured(); - } - - @Override - public HAServiceProtocol getProxy(Configuration conf, int timeoutMs) throws IOException { - HAServiceProtocol proxy = delegate.getProxy(conf, timeoutMs); - protocolsToClose.add(proxy); - return proxy; - } - - @Override - public HAServiceProtocol getHealthMonitorProxy(Configuration conf, int timeoutMs) throws IOException { - return delegate.getHealthMonitorProxy(conf, timeoutMs); - } - - @Override - public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs) throws IOException { - return delegate.getZKFCProxy(conf, timeoutMs); - } - - @Override - public boolean isAutoFailoverEnabled() { - return delegate.isAutoFailoverEnabled(); - } - - private void close() { - for (HAServiceProtocol protocol : protocolsToClose) { - if (protocol instanceof HAServiceProtocolClientSideTranslatorPB haServiceProtocolClientSideTranslatorPB) { - haServiceProtocolClientSideTranslatorPB.close(); - } - } - } - } - - /** - * The default HAAdmin tool does not throw exceptions on failures, and does not close any client connection - * resources when it concludes. This subclass overrides the tool to allow for exception throwing, and to - * keep track of and clean up connection resources. - */ - private static class CloseableHAAdmin extends DFSHAAdmin { - private final List serviceTargets = new ArrayList<>(); - - @Override - protected HAServiceTarget resolveTarget(String nnId) { - CloseableHAServiceTarget target = new CloseableHAServiceTarget(super.resolveTarget(nnId)); - serviceTargets.add(target); - return target; - } - - @Override - public int run(String[] argv) throws Exception { - return runCmd(argv); - } - - public int transitionToStandby(String namenodeID) throws Exception { - return run(new String[] { "-transitionToStandby", namenodeID }); - } - - public int transitionToActive(String namenodeID) throws Exception { - return run(new String[] { "-transitionToActive", namenodeID }); - } - - public void close() { - for (CloseableHAServiceTarget serviceTarget : serviceTargets) { - serviceTarget.close(); - } - } + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); } - /** - * Performs a two-phase leading namenode transition. - * @param from Namenode ID to transition to standby - * @param to Namenode ID to transition to active - * @param configuration Client configuration for HAAdmin tool - * @throws IOException In the event of a raised exception during namenode failover. - */ - private void failoverHDFS(String from, String to, Configuration configuration) throws IOException { - logger.info("Swapping active namenodes: [{}] to standby and [{}] to active", from, to); - try { - AccessController.doPrivileged((PrivilegedExceptionAction) () -> { - CloseableHAAdmin haAdmin = new CloseableHAAdmin(); - haAdmin.setConf(configuration); - try { - haAdmin.transitionToStandby(from); - haAdmin.transitionToActive(to); - } finally { - haAdmin.close(); - } - return null; - }); - } catch (PrivilegedActionException pae) { - throw new IOException("Unable to perform namenode failover", pae); - } + @Override + HdfsFixture getHdfsFixture() { + return hdfsFixture; } } diff --git a/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/SecureHaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/SecureHaHdfsFailoverTestSuiteIT.java new file mode 100644 index 0000000000000..8ba27f703c419 --- /dev/null +++ b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/SecureHaHdfsFailoverTestSuiteIT.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.hdfs; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.elasticsearch.test.fixtures.krb5kdc.Krb5kDcContainer; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class SecureHaHdfsFailoverTestSuiteIT extends AbstractHaHdfsFailoverTestSuiteIT { + + public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer(); + + public static HdfsFixture hdfsFixture = new HdfsFixture().withHAService("ha-hdfs") + .withKerberos(() -> krb5Fixture.getPrincipal(), () -> krb5Fixture.getKeytab()); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .systemProperty("java.security.krb5.conf", () -> krb5Fixture.getConfPath().toString()) + .configFile("repository-hdfs/krb5.conf", Resource.fromString(() -> krb5Fixture.getConf())) + .configFile("repository-hdfs/krb5.keytab", Resource.fromFile(() -> krb5Fixture.getEsKeytab())) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(krb5Fixture).around(hdfsFixture).around(cluster); + + @Override + HdfsFixture getHdfsFixture() { + return hdfsFixture; + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + protected String securityCredentials() { + return String.format(java.util.Locale.ROOT, """ + "security.principal": "%s","conf.dfs.data.transfer.protection": "authentication",""", krb5Fixture.getEsPrincipal()); + } + +} diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 592192f29c262..ee1e54e8a3356 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; import org.hamcrest.CoreMatchers; import org.mockito.AdditionalMatchers; import org.mockito.Mockito; diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index fed4411f68768..a52724496289a 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; import java.util.Collection; import java.util.Collections; diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java index cd38cc04e6b31..a6d2bdcf8a1d4 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; import java.util.Collection; diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 313dcdd6623c4..0e2ec25b6cfaa 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; import java.util.Collection; diff --git a/plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java b/plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java index bdc6368bb5719..a0a4d9379bc78 100644 --- a/plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java +++ b/plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java @@ -5,22 +5,52 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ + package org.elasticsearch.repositories.hdfs; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.util.Map; +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class, TestContainersThreadFilter.class }) public class RepositoryHdfsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + public static HdfsFixture hdfsFixture = new HdfsFixture(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(hdfsFixture).around(cluster); + public RepositoryHdfsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @ParametersFactory public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); + return createParameters(new String[] { "hdfs_repository" }, Map.of("hdfs_port", hdfsFixture.getPort())); } } diff --git a/plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/SecureRepositoryHdfsClientYamlTestSuiteIT.java b/plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/SecureRepositoryHdfsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..45b992a35d731 --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/SecureRepositoryHdfsClientYamlTestSuiteIT.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.hdfs; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.elasticsearch.test.fixtures.krb5kdc.Krb5kDcContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.util.Map; + +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class, TestContainersThreadFilter.class }) +public class SecureRepositoryHdfsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer(); + + public static HdfsFixture hdfsFixture = new HdfsFixture().withKerberos(() -> krb5Fixture.getPrincipal(), () -> krb5Fixture.getKeytab()); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .systemProperty("java.security.krb5.conf", () -> krb5Fixture.getConfPath().toString()) + .configFile("repository-hdfs/krb5.conf", Resource.fromString(() -> krb5Fixture.getConf())) + .configFile("repository-hdfs/krb5.keytab", Resource.fromFile(() -> krb5Fixture.getEsKeytab())) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(krb5Fixture).around(hdfsFixture).around(cluster); + + public SecureRepositoryHdfsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "secure_hdfs_repository" }, Map.of("secure_hdfs_port", hdfsFixture.getPort())); + } +} diff --git a/settings.gradle b/settings.gradle index 97cce0a476d99..48e3794c9005d 100644 --- a/settings.gradle +++ b/settings.gradle @@ -90,8 +90,7 @@ List projects = [ 'test:framework', 'test:fixtures:azure-fixture', 'test:fixtures:gcs-fixture', - 'test:fixtures:hdfs2-fixture', - 'test:fixtures:hdfs3-fixture', + 'test:fixtures:hdfs-fixture', 'test:fixtures:krb5kdc-fixture', 'test:fixtures:minio-fixture', 'test:fixtures:old-elasticsearch', diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle new file mode 100644 index 0000000000000..bd4acf4e51505 --- /dev/null +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.java' +apply plugin: 'com.github.johnrengelman.shadow' + +import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar + +configurations { +// all { +// transitive = true +// } + hdfs2 + hdfs3 + consumable("shadowedHdfs2") +} + +dependencies { + compileOnly("org.apache.hadoop:hadoop-minicluster:2.8.5") + api("com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}") { + transitive false + } + compileOnly "junit:junit:${versions.junit}" + hdfs2 "org.apache.hadoop:hadoop-minicluster:2.8.5" + hdfs3 "org.apache.hadoop:hadoop-minicluster:3.3.1" + +} + +tasks.named("shadowJar").configure { + archiveClassifier.set("hdfs3") + // fix issues with signed jars + + relocate("org.apache.hadoop", "fixture.hdfs3.org.apache.hadoop") { + exclude "org.apache.hadoop.hdfs.protocol.ClientProtocol" + exclude "org.apache.hadoop.ipc.StandbyException" + } + configurations << project.configurations.hdfs3 +} + +def hdfs2Jar = tasks.register("hdfs2jar", ShadowJar) { + relocate("org.apache.hadoop", "fixture.hdfs2.org.apache.hadoop") { + exclude "org.apache.hadoop.hdfs.protocol.ClientProtocol" + exclude "org.apache.hadoop.ipc.StandbyException" + } + archiveClassifier.set("hdfs2") + from sourceSets.main.output + configurations << project.configurations.hdfs2 +} + +tasks.withType(ShadowJar) { + dependencies { +// exclude(dependency('commons-io:commons-io:2.8.0')) + exclude(dependency("com.carrotsearch.randomizedtesting:randomizedtesting-runner:.*")) + exclude(dependency("junit:junit:.*")) + exclude(dependency("org.slf4j:slf4j-api:.*")) + exclude(dependency("com.google.guava:guava:.*")) + exclude(dependency("org.apache.commons:commons-compress:.*")) + exclude(dependency("commons-logging:commons-logging:.*")) + exclude(dependency("commons-codec:commons-codec:.*")) + exclude(dependency("org.apache.httpcomponents:httpclient:.*")) + exclude(dependency("org.apache.httpcomponents:httpcore:.*")) + exclude(dependency("org.apache.logging.log4j:log4j-1.2-api:.*")) + exclude(dependency("log4j:log4j:.*")) + exclude(dependency("io.netty:.*:.*")) + exclude(dependency("com.nimbusds:nimbus-jose-jwt:.*")) + exclude(dependency("commons-cli:commons-cli:1.2")) + exclude(dependency("net.java.dev.jna:jna:.*")) + exclude(dependency("org.objenesis:objenesis:.*")) + exclude(dependency('com.fasterxml.jackson.core:.*:.*')) + } + + transform(org.elasticsearch.gradle.internal.shadow.XmlClassRelocationTransformer.class) { + resource = "core-default.xml" + enabled = true + } + + transform(org.elasticsearch.gradle.internal.shadow.XmlClassRelocationTransformer.class) { + resource = "hdfs-default.xml" + enabled = true + } +} + +artifacts { + shadowedHdfs2(hdfs2Jar) +} diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsClientThreadLeakFilter.java b/test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures/hdfs/HdfsClientThreadLeakFilter.java similarity index 77% rename from plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsClientThreadLeakFilter.java rename to test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures/hdfs/HdfsClientThreadLeakFilter.java index c79418557da20..be63e22742ed5 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsClientThreadLeakFilter.java +++ b/test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures/hdfs/HdfsClientThreadLeakFilter.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.repositories.hdfs; +package org.elasticsearch.test.fixtures.hdfs; import com.carrotsearch.randomizedtesting.ThreadFilter; @@ -29,6 +29,11 @@ public final class HdfsClientThreadLeakFilter implements ThreadFilter { @Override public boolean reject(Thread t) { - return t.getName().equals(OFFENDING_THREAD_NAME); + return t.getName().contains(OFFENDING_THREAD_NAME) + || t.getName().startsWith("LeaseRenewer") + || t.getName().startsWith("SSL Certificates Store Monitor") // hadoop 3 brings that in + || t.getName().startsWith("GcTimeMonitor") // hadoop 3 + || t.getName().startsWith("Command processor") // hadoop 3 + || t.getName().startsWith("ForkJoinPool-"); // hadoop 3 } } diff --git a/test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures/hdfs/HdfsFixture.java b/test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures/hdfs/HdfsFixture.java new file mode 100644 index 0000000000000..18d406e2a97a0 --- /dev/null +++ b/test/fixtures/hdfs-fixture/src/main/java/org/elasticsearch/test/fixtures/hdfs/HdfsFixture.java @@ -0,0 +1,438 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.fixtures.hdfs; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.ha.BadFencingConfigurationException; +import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.ha.HAServiceTarget; +import org.apache.hadoop.ha.NodeFencer; +import org.apache.hadoop.ha.ZKFCProtocol; +import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.hdfs.tools.DFSHAAdmin; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Assume; +import org.junit.rules.ExternalResource; +import org.junit.rules.TemporaryFolder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +public class HdfsFixture extends ExternalResource { + + private static final Logger LOGGER = LoggerFactory.getLogger(HdfsFixture.class); + + private TemporaryFolder temporaryFolder = new TemporaryFolder(); + private MiniDFSCluster dfs; + private String haNameService; + private Supplier principalConfig = null; + private Supplier keytab = null; + private Configuration cfg; + + private Configuration haConfiguration; + private int explicitPort = findAvailablePort(); + + public HdfsFixture withHAService(String haNameService) { + this.haNameService = haNameService; + return this; + } + + public HdfsFixture withKerberos(Supplier principalConfig, Supplier keytabFile) { + this.principalConfig = principalConfig; + this.keytab = keytabFile; + return this; + } + + @Override + protected void before() throws Throwable { + temporaryFolder.create(); + assumeHdfsAvailable(); + startMinHdfs(); + } + + private void assumeHdfsAvailable() { + boolean fixtureSupported = false; + if (isWindows()) { + // hdfs fixture will not start without hadoop native libraries on windows + String nativePath = System.getenv("HADOOP_HOME"); + if (nativePath != null) { + java.nio.file.Path path = Paths.get(nativePath); + if (Files.isDirectory(path) + && Files.exists(path.resolve("bin").resolve("winutils.exe")) + && Files.exists(path.resolve("bin").resolve("hadoop.dll")) + && Files.exists(path.resolve("bin").resolve("hdfs.dll"))) { + fixtureSupported = true; + } else { + throw new IllegalStateException( + "HADOOP_HOME: " + path + " is invalid, does not contain hadoop native libraries in " + nativePath + "/bin" + ); + } + } + } else { + fixtureSupported = true; + } + + boolean nonLegalegalPath = temporaryFolder.getRoot().getAbsolutePath().contains(" "); + if (nonLegalegalPath) { + fixtureSupported = false; + } + + Assume.assumeTrue("HDFS Fixture is not supported", fixtureSupported); + } + + private boolean isWindows() { + return System.getProperty("os.name").toLowerCase().startsWith("windows"); + } + + /** + * Performs a two-phase leading namenode transition. + * @param from Namenode ID to transition to standby + * @param to Namenode ID to transition to active + * @throws IOException In the event of a raised exception during namenode failover. + */ + public void failoverHDFS(String from, String to) throws IOException { + assert isHA() && haConfiguration != null : "HA Configuration must be set up before performing failover"; + LOGGER.info("Swapping active namenodes: [{}] to standby and [{}] to active", from, to); + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + CloseableHAAdmin haAdmin = new CloseableHAAdmin(); + haAdmin.setConf(haConfiguration); + try { + haAdmin.transitionToStandby(from); + haAdmin.transitionToActive(to); + } finally { + haAdmin.close(); + } + return null; + }); + } catch (PrivilegedActionException pae) { + throw new IOException("Unable to perform namenode failover", pae); + } + } + + public void setupHA() throws IOException { + assert isHA() : "HA Name Service must be set up before setting up HA"; + haConfiguration = new Configuration(); + haConfiguration.set("dfs.nameservices", haNameService); + haConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2"); + haConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + getPort(0)); + haConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + (getPort(1))); + haConfiguration.set( + "dfs.client.failover.proxy.provider.ha-hdfs", + "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" + ); + + if (isSecure()) { + // ensure that keytab exists + Path kt = this.keytab.get(); + if (Files.exists(kt) == false) { + throw new IllegalStateException("Could not locate keytab at " + keytab.get()); + } + if (Files.isReadable(kt) != true) { + throw new IllegalStateException("Could not read keytab at " + keytab.get()); + } + LOGGER.info("Keytab Length: " + Files.readAllBytes(kt).length); + + // set principal names + String hdfsKerberosPrincipal = principalConfig.get(); + haConfiguration.set("dfs.namenode.kerberos.principal", hdfsKerberosPrincipal); + haConfiguration.set("dfs.datanode.kerberos.principal", hdfsKerberosPrincipal); + haConfiguration.set("dfs.data.transfer.protection", "authentication"); + + SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, haConfiguration); + UserGroupInformation.setConfiguration(haConfiguration); + UserGroupInformation.loginUserFromKeytab(hdfsKerberosPrincipal, keytab.get().toString()); + } else { + SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE, haConfiguration); + UserGroupInformation.setConfiguration(haConfiguration); + UserGroupInformation.getCurrentUser(); + } + } + + private void startMinHdfs() throws Exception { + Path baseDir = temporaryFolder.newFolder("baseDir").toPath(); + if (System.getenv("HADOOP_HOME") == null) { + Path hadoopHome = baseDir.resolve("hadoop-home"); + Files.createDirectories(hadoopHome); + System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString()); + } + // hdfs-data/, where any data is going + Path hdfsHome = baseDir.resolve("hdfs-data"); + new File(hdfsHome.toFile(), "data").mkdirs(); + // configure cluster + cfg = new Configuration(); + cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString()); + cfg.set("hadoop.security.group.mapping", "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback"); + + // optionally configure security + if (isSecure()) { + String kerberosPrincipal = principalConfig.get(); + String keytabFilePath = keytab.get().toString(); + cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true"); + cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); + cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); + cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); + cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFilePath); + cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFilePath); + cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true"); + cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true"); + cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true"); + cfg.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, "true"); + } + refreshKrb5Config(); + UserGroupInformation.setConfiguration(cfg); + + MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg); + // if(isSecure()) { + builder.nameNodePort(explicitPort); + // } else { + // builder.nameNodePort(explicitPort); + // } + if (isHA()) { + MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0); + MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0); + MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2); + MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice); + builder.nnTopology(namenodeTopology); + } + dfs = builder.build(); + // Configure contents of the filesystem + org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch"); + FileSystem fs; + if (isHA()) { + dfs.transitionToActive(0); + fs = HATestUtil.configureFailoverFs(dfs, cfg); + } else { + fs = dfs.getFileSystem(0); + } + + try { + // Set the elasticsearch user directory up + fs.mkdirs(esUserPath); + if (UserGroupInformation.isSecurityEnabled()) { + List acls = new ArrayList<>(); + acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build()); + fs.modifyAclEntries(esUserPath, acls); + } + + // Install a pre-existing repository into HDFS + String directoryName = "readonly-repository"; + String archiveName = directoryName + ".tar.gz"; + URL readOnlyRepositoryArchiveURL = getClass().getClassLoader().getResource(archiveName); + if (readOnlyRepositoryArchiveURL != null) { + Path tempDirectory = Files.createTempDirectory(getClass().getName()); + File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile(); + FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive); + FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile()); + + fs.copyFromLocalFile( + true, + true, + new org.apache.hadoop.fs.Path(tempDirectory.resolve(directoryName).toAbsolutePath().toUri()), + esUserPath.suffix("/existing/" + directoryName) + ); + + FileUtils.deleteDirectory(tempDirectory.toFile()); + } + } finally { + fs.close(); + } + } + + private boolean isSecure() { + return keytab != null && principalConfig != null; + } + + @Override + protected void after() { + if (dfs != null) { + try { + if (isHA()) { + dfs.getFileSystem(0).close(); + dfs.getFileSystem(1).close(); + } else { + dfs.getFileSystem().close(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + dfs.close(); + } + temporaryFolder.delete(); + } + + private boolean isHA() { + return haNameService != null; + } + + public int getPort() { + return dfs == null ? explicitPort : dfs.getNameNodePort(0); + } + + // fix port handling to allow parallel hdfs fixture runs + public int getPort(int i) { + return dfs.getNameNodePort(i); + } + + /** + * Wraps an HAServiceTarget, keeping track of any HAServiceProtocol proxies it generates in order + * to close them at the end of the test lifecycle. + */ + protected static class CloseableHAServiceTarget extends HAServiceTarget { + private final HAServiceTarget delegate; + private final List protocolsToClose = new ArrayList<>(); + + CloseableHAServiceTarget(HAServiceTarget delegate) { + this.delegate = delegate; + } + + @Override + public InetSocketAddress getAddress() { + return delegate.getAddress(); + } + + @Override + public InetSocketAddress getHealthMonitorAddress() { + return delegate.getHealthMonitorAddress(); + } + + @Override + public InetSocketAddress getZKFCAddress() { + return delegate.getZKFCAddress(); + } + + @Override + public NodeFencer getFencer() { + return delegate.getFencer(); + } + + @Override + public void checkFencingConfigured() throws BadFencingConfigurationException { + delegate.checkFencingConfigured(); + } + + @Override + public HAServiceProtocol getProxy(Configuration conf, int timeoutMs) throws IOException { + HAServiceProtocol proxy = delegate.getProxy(conf, timeoutMs); + protocolsToClose.add(proxy); + return proxy; + } + + @Override + public HAServiceProtocol getHealthMonitorProxy(Configuration conf, int timeoutMs) throws IOException { + return delegate.getHealthMonitorProxy(conf, timeoutMs); + } + + @Override + public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs) throws IOException { + return delegate.getZKFCProxy(conf, timeoutMs); + } + + @Override + public boolean isAutoFailoverEnabled() { + return delegate.isAutoFailoverEnabled(); + } + + private void close() { + for (HAServiceProtocol protocol : protocolsToClose) { + if (protocol instanceof HAServiceProtocolClientSideTranslatorPB haServiceProtocolClientSideTranslatorPB) { + haServiceProtocolClientSideTranslatorPB.close(); + } + } + } + } + + /** + * The default HAAdmin tool does not throw exceptions on failures, and does not close any client connection + * resources when it concludes. This subclass overrides the tool to allow for exception throwing, and to + * keep track of and clean up connection resources. + */ + protected static class CloseableHAAdmin extends DFSHAAdmin { + private final List serviceTargets = new ArrayList<>(); + + @Override + protected HAServiceTarget resolveTarget(String nnId) { + CloseableHAServiceTarget target = new CloseableHAServiceTarget(super.resolveTarget(nnId)); + serviceTargets.add(target); + return target; + } + + @Override + public int run(String[] argv) throws Exception { + return runCmd(argv); + } + + public int transitionToStandby(String namenodeID) throws Exception { + return run(new String[] { "-transitionToStandby", namenodeID }); + } + + public int transitionToActive(String namenodeID) throws Exception { + return run(new String[] { "-transitionToActive", namenodeID }); + } + + public void close() { + for (CloseableHAServiceTarget serviceTarget : serviceTargets) { + serviceTarget.close(); + } + } + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public static void refreshKrb5Config() throws ClassNotFoundException, NoSuchMethodException, IllegalArgumentException, + IllegalAccessException, InvocationTargetException, InvocationTargetException { + Class classRef; + if (System.getProperty("java.vendor").contains("IBM")) { + classRef = Class.forName("com.ibm.security.krb5.internal.Config"); + } else { + classRef = Class.forName("sun.security.krb5.Config"); + } + + Method refreshMethod = classRef.getMethod("refresh"); + refreshMethod.invoke(classRef); + } + + private static int findAvailablePort() { + try (ServerSocket socket = new ServerSocket(0)) { + return socket.getLocalPort(); + } catch (Exception ex) { + LOGGER.error("Failed to find available port", ex); + } + return -1; + } + +} diff --git a/test/fixtures/hdfs2-fixture/src/main/resources/readonly-repository.tar.gz b/test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz similarity index 100% rename from test/fixtures/hdfs2-fixture/src/main/resources/readonly-repository.tar.gz rename to test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz diff --git a/test/fixtures/hdfs2-fixture/build.gradle b/test/fixtures/hdfs2-fixture/build.gradle deleted file mode 100644 index 43d14a38c5e3e..0000000000000 --- a/test/fixtures/hdfs2-fixture/build.gradle +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -apply plugin: 'elasticsearch.java' - -dependencies { - api "org.apache.hadoop:hadoop-minicluster:2.8.5" -} diff --git a/test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java deleted file mode 100644 index ee993fec74eb4..0000000000000 --- a/test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package hdfs; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.permission.AclEntry; -import org.apache.hadoop.fs.permission.AclEntryType; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; -import org.apache.hadoop.security.UserGroupInformation; - -import java.io.File; -import java.lang.management.ManagementFactory; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * MiniHDFS test fixture. There is a CLI tool, but here we can - * easily properly setup logging, avoid parsing JSON, etc. - */ -public class MiniHDFS { - - private static String PORT_FILE_NAME = "ports"; - private static String PID_FILE_NAME = "pid"; - - public static void main(String[] args) throws Exception { - if (args.length != 1 && args.length != 3) { - throw new IllegalArgumentException( - "Expected: MiniHDFS [ ], got: " + Arrays.toString(args) - ); - } - boolean secure = args.length == 3; - - // configure Paths - Path baseDir = Paths.get(args[0]); - // hadoop-home/, so logs will not complain - if (System.getenv("HADOOP_HOME") == null) { - Path hadoopHome = baseDir.resolve("hadoop-home"); - Files.createDirectories(hadoopHome); - System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString()); - } - // hdfs-data/, where any data is going - Path hdfsHome = baseDir.resolve("hdfs-data"); - - // configure cluster - Configuration cfg = new Configuration(); - cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString()); - // lower default permission: TODO: needed? - cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766"); - - // optionally configure security - if (secure) { - String kerberosPrincipal = args[1]; - String keytabFile = args[2]; - - cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true"); - cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); - cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); - cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); - cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile); - cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile); - cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true"); - cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true"); - cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true"); - cfg.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, "true"); - } - - UserGroupInformation.setConfiguration(cfg); - - MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg); - String explicitPort = System.getProperty("hdfs.config.port"); - if (explicitPort != null) { - builder.nameNodePort(Integer.parseInt(explicitPort)); - } else { - if (secure) { - builder.nameNodePort(9998); - } else { - builder.nameNodePort(9999); - } - } - - // Configure HA mode - String haNameService = System.getProperty("ha-nameservice"); - boolean haEnabled = haNameService != null; - if (haEnabled) { - MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0); - MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0); - MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2); - MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice); - builder.nnTopology(namenodeTopology); - } - - MiniDFSCluster dfs = builder.build(); - - // Configure contents of the filesystem - org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch"); - - FileSystem fs; - if (haEnabled) { - dfs.transitionToActive(0); - fs = HATestUtil.configureFailoverFs(dfs, cfg); - } else { - fs = dfs.getFileSystem(); - } - - try { - // Set the elasticsearch user directory up - fs.mkdirs(esUserPath); - if (UserGroupInformation.isSecurityEnabled()) { - List acls = new ArrayList<>(); - acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build()); - fs.modifyAclEntries(esUserPath, acls); - } - - // Install a pre-existing repository into HDFS - String directoryName = "readonly-repository"; - String archiveName = directoryName + ".tar.gz"; - URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName); - if (readOnlyRepositoryArchiveURL != null) { - Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName()); - File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile(); - FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive); - FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile()); - - fs.copyFromLocalFile( - true, - true, - new org.apache.hadoop.fs.Path(tempDirectory.resolve(directoryName).toAbsolutePath().toUri()), - esUserPath.suffix("/existing/" + directoryName) - ); - - FileUtils.deleteDirectory(tempDirectory.toFile()); - } - } finally { - fs.close(); - } - - // write our PID file - Path tmp = Files.createTempFile(baseDir, null, null); - String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; - Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8)); - Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); - - // write our port file - String portFileContent = Integer.toString(dfs.getNameNodePort(0)); - if (haEnabled) { - portFileContent = portFileContent + "\n" + Integer.toString(dfs.getNameNodePort(1)); - } - tmp = Files.createTempFile(baseDir, null, null); - Files.write(tmp, portFileContent.getBytes(StandardCharsets.UTF_8)); - Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); - } - -} diff --git a/test/fixtures/hdfs3-fixture/build.gradle b/test/fixtures/hdfs3-fixture/build.gradle deleted file mode 100644 index 872ab2efd42ab..0000000000000 --- a/test/fixtures/hdfs3-fixture/build.gradle +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -apply plugin: 'elasticsearch.java' - -dependencies { - api "org.apache.hadoop:hadoop-minicluster:3.3.1" -} diff --git a/test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java deleted file mode 100644 index 0a26f5d82ac17..0000000000000 --- a/test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package hdfs; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.permission.AclEntry; -import org.apache.hadoop.fs.permission.AclEntryType; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; -import org.apache.hadoop.security.UserGroupInformation; - -import java.io.File; -import java.lang.management.ManagementFactory; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * MiniHDFS test fixture. There is a CLI tool, but here we can - * easily properly setup logging, avoid parsing JSON, etc. - */ -public class MiniHDFS { - - private static String PORT_FILE_NAME = "ports"; - private static String PID_FILE_NAME = "pid"; - - public static void main(String[] args) throws Exception { - if (args.length != 1 && args.length != 3) { - throw new IllegalArgumentException( - "Expected: MiniHDFS [ ], got: " + Arrays.toString(args) - ); - } - boolean secure = args.length == 3; - - // configure Paths - Path baseDir = Paths.get(args[0]); - // hadoop-home/, so logs will not complain - if (System.getenv("HADOOP_HOME") == null) { - Path hadoopHome = baseDir.resolve("hadoop-home"); - Files.createDirectories(hadoopHome); - System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString()); - } - // hdfs-data/, where any data is going - Path hdfsHome = baseDir.resolve("hdfs-data"); - - // configure cluster - Configuration cfg = new Configuration(); - cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString()); - // lower default permission: TODO: needed? - cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766"); - - // optionally configure security - if (secure) { - String kerberosPrincipal = args[1]; - String keytabFile = args[2]; - - cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true"); - cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); - cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); - cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal); - cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile); - cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile); - cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true"); - cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true"); - cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true"); - cfg.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, "true"); - cfg.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, "AES/CTR/NoPadding"); - } - - UserGroupInformation.setConfiguration(cfg); - - MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg); - String explicitPort = System.getProperty("hdfs.config.port"); - if (explicitPort != null) { - builder.nameNodePort(Integer.parseInt(explicitPort)); - } else { - if (secure) { - builder.nameNodePort(9998); - } else { - builder.nameNodePort(9999); - } - } - - // Configure HA mode - String haNameService = System.getProperty("ha-nameservice"); - boolean haEnabled = haNameService != null; - if (haEnabled) { - MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0); - MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0); - MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2); - MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice); - builder.nnTopology(namenodeTopology); - } - - MiniDFSCluster dfs = builder.build(); - - // Configure contents of the filesystem - org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch"); - - FileSystem fs; - if (haEnabled) { - dfs.transitionToActive(0); - fs = HATestUtil.configureFailoverFs(dfs, cfg); - } else { - fs = dfs.getFileSystem(); - } - - try { - // Set the elasticsearch user directory up - fs.mkdirs(esUserPath); - if (UserGroupInformation.isSecurityEnabled()) { - List acls = new ArrayList<>(); - acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build()); - fs.modifyAclEntries(esUserPath, acls); - } - - // Install a pre-existing repository into HDFS - String directoryName = "readonly-repository"; - String archiveName = directoryName + ".tar.gz"; - URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName); - if (readOnlyRepositoryArchiveURL != null) { - Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName()); - File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile(); - FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive); - FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile()); - - fs.copyFromLocalFile( - true, - true, - new org.apache.hadoop.fs.Path(tempDirectory.resolve(directoryName).toAbsolutePath().toUri()), - esUserPath.suffix("/existing/" + directoryName) - ); - - FileUtils.deleteDirectory(tempDirectory.toFile()); - } - } finally { - fs.close(); - } - - // write our PID file - Path tmp = Files.createTempFile(baseDir, null, null); - String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; - Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8)); - Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); - - // write our port file - String portFileContent = Integer.toString(dfs.getNameNodePort(0)); - if (haEnabled) { - portFileContent = portFileContent + "\n" + Integer.toString(dfs.getNameNodePort(1)); - } - tmp = Files.createTempFile(baseDir, null, null); - Files.write(tmp, portFileContent.getBytes(StandardCharsets.UTF_8)); - Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); - } - -} diff --git a/test/fixtures/hdfs3-fixture/src/main/resources/readonly-repository.tar.gz b/test/fixtures/hdfs3-fixture/src/main/resources/readonly-repository.tar.gz deleted file mode 100644 index 2cdb6d77c07d0a06029521d60fdc966e92248d72..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1314 zcmV+-1>O1|iwFQ)qqIjP0S`X&6>Cgu-pu#=zCXvJBxxF2uE(NTlc~08Xdnk?@>NJB3M=Q zuNq0Z+=H672A4-jl4|Q;ka1D}I20B!*MB%1#%+P-J;5nlaQ&|anL)BFW7;e@@cL@d z;9>|4lm`_Y><&7Pn&e|?6cwk4u`qL2pO1@^%W^cX^bhg#Tr@Vn39rp|2l?Jn6O_-j z8cY(KxJU11A_|i_)$|yPjlC8Q%|xOoI-PcJKC*nRC1%&w*3Mf{k0~|nzgOc*Wp*>x zsAvBOqvQ53U@Rc8e*k#c)Ubcohc_(`{O|c+$^MZb2$27^f@=ELsZDyShvHry)Ei!- zp8vZEp*>Lxs&nYBAL`e!Q2^xU8t9 zxanZrk&+7&x9F?WPS5E0Z7t3EQn8ikZJX*@FL5Xw92|W6(ucp7*13QE%6I4Znm`Sz z*}t2Tx6=MaQ9NP)vIO?u3OxHS)nIKU``;>!6xQ8>aN@8WT=%TM`sT; zR+8<$$D9P8jisFO&{m45nq%=L_K&-p$U9SW=P1=jhtu-E>NR`gX<@_tKS2z+{U6Bx z9zJK#KWcObG_HRMi3s%H3eKSahRcI`@n7bf<5>I`F_J`x|E<7{|4;i3{}WZf`mqAw z`}Zx=u~j3j7>-^FzT51Wh6`7v!jq8^nL1I z!6%yI1%;VX`0mrx1Yv{d~0;C*RxOB$`T?TfXdMEmbF0PT?hw z&h~cq`T7dW83~1Gnwjw4j1}HOd-3RKR52$e;xRimsSW4S^NC?`acRGz09ttP&;4`M zb1lx~{}rVn)2wIzf+SSr|B{4du>V%z=l?XVQ^RKbf5*)~^Id=Z@Yz`Z8&|%(+Wpmd zJMQQxUHnA($5XaN45pcBuN&~}(1N+R6pP%N877N}nz9yorSI<2Gv$lY)Zk)uO{N`R z)ycBr7>uN-LLA%V)qD`*?)r#!g}@}s{H;VVgW(_uO(FP|5bMVc07--6cgy(lfAod*4;l3 z-aSm)JqNOzFLv services = ["peppa", "hdfs"] - -tasks.named("preProcessFixture").configure { - doLast { - // We need to create these up-front because if docker creates them they will be owned by root and we won't be - // able to clean them up - services.each { fixturesDir.dir("shared/${it}").get().getAsFile().mkdirs() } - } -} - -tasks.named("postProcessFixture").configure { task -> - inputs.dir(fixturesDir.dir('shared').get().getAsFile()) - services.each { service -> - File confTemplate = fixturesDir.file("shared/${service}/krb5.conf.template").get().asFile - File confFile = fixturesDir.file("shared/${service}/krb5.conf").get().asFile - outputs.file(confFile) - doLast { - assert confTemplate.exists() - String confContents = confTemplate.text - .replace("\${MAPPED_PORT}", "${ext."test.fixtures.${service}.udp.88"}") - confFile.text = confContents - } +dockerFixtures { + krb5dc { + dockerContext = projectDir + version = "1.0" + baseImages = ["ubuntu:14.04"] } } -project.ext.krb5Conf = { s -> file("$testFixturesDir/shared/${s}/krb5.conf") } -project.ext.krb5Keytabs = { s, fileName -> file("$testFixturesDir/shared/${s}/keytabs/${fileName}") } - configurations { + all { + transitive = false + } krb5ConfHdfsFile { canBeConsumed = true canBeResolved = false @@ -49,11 +34,24 @@ configurations { } } -artifacts { - krb5ConfHdfsFile(krb5Conf('hdfs')) { - builtBy("postProcessFixture") - } - krb5KeytabsHdfsDir(file("$testFixturesDir/shared/hdfs/keytabs/")) { - builtBy("postProcessFixture") - } +dependencies { + testImplementation project(':test:framework') + + api "junit:junit:${versions.junit}" + api project(':test:fixtures:testcontainer-utils') + api "org.testcontainers:testcontainers:${versions.testcontainer}" + implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + implementation "org.slf4j:slf4j-api:${versions.slf4j}" + implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" + implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + + runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" + runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" + runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" + + // ensure we have proper logging during when used in tests + runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" + runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" } diff --git a/test/fixtures/krb5kdc-fixture/docker-compose.yml b/test/fixtures/krb5kdc-fixture/docker-compose.yml deleted file mode 100644 index 9e2d67000532e..0000000000000 --- a/test/fixtures/krb5kdc-fixture/docker-compose.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: '3' -services: - peppa: - hostname: kerberos.build.elastic.co - build: - context: . - dockerfile: Dockerfile - extra_hosts: - - "kerberos.build.elastic.co:127.0.0.1" - command: "bash /fixture/src/main/resources/provision/peppa.sh" - volumes: - - ./testfixtures_shared/shared/peppa:/fixture/build - # containers have bad entropy so mount /dev/urandom. Less secure but this is a test fixture. - - /dev/urandom:/dev/random - ports: - - "4444" - - "88/udp" - hdfs: - hostname: kerberos.build.elastic.co - build: - context: . - dockerfile: Dockerfile - extra_hosts: - - "kerberos.build.elastic.co:127.0.0.1" - command: "bash /fixture/src/main/resources/provision/hdfs.sh" - volumes: - - ./testfixtures_shared/shared/hdfs:/fixture/build - # containers have bad entropy so mount /dev/urandom. Less secure but this is a test fixture. - - /dev/urandom:/dev/random - ports: - - "4444" - - "88/udp" diff --git a/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java new file mode 100644 index 0000000000000..fa75b57ea87a6 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.fixtures.krb5kdc; + +import com.github.dockerjava.api.model.ExposedPort; +import com.github.dockerjava.api.model.Ports; + +import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; +import org.junit.rules.TemporaryFolder; +import org.testcontainers.containers.Network; +import org.testcontainers.images.RemoteDockerImage; +import org.testcontainers.shaded.org.apache.commons.io.IOUtils; +import org.testcontainers.utility.MountableFile; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public final class Krb5kDcContainer extends DockerEnvironmentAwareTestContainer { + public static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/krb5dc-fixture:1.0"; + private final TemporaryFolder temporaryFolder = new TemporaryFolder(); + private final ProvisioningId provisioningId; + private Path krb5ConfFile; + private Path keytabFile; + private Path esKeytabFile; + + public enum ProvisioningId { + HDFS( + "hdfs", + "/fixture/src/main/resources/provision/hdfs.sh", + "/fixture/build/keytabs/hdfs_hdfs.build.elastic.co.keytab", + "/fixture/build/keytabs/elasticsearch.keytab", + "hdfs/hdfs.build.elastic.co@BUILD.ELASTIC.CO" + ), + PEPPA( + "peppa", + "/fixture/src/main/resources/provision/peppa.sh", + "/fixture/build/keytabs/peppa.keytab", + "/fixture/build/keytabs/HTTP_localhost.keytab", + "peppa@BUILD.ELASTIC.CO" + ); + + private final String id; + private final String scriptPath; + private final String keytabPath; + public final String esKeytab; + private final String keytabPrincipal; + + ProvisioningId(String id, String scriptPath, String keytabPath, String esKeytab, String principal) { + this.id = id; + this.scriptPath = scriptPath; + this.keytabPath = keytabPath; + this.esKeytab = esKeytab; + this.keytabPrincipal = principal; + } + } + + public Krb5kDcContainer() { + this(ProvisioningId.HDFS); + } + + public Krb5kDcContainer(ProvisioningId provisioningId) { + super(new RemoteDockerImage(DOCKER_BASE_IMAGE)); + this.provisioningId = provisioningId; + withNetwork(Network.newNetwork()); + addExposedPorts(88, 4444); + withCreateContainerCmdModifier(cmd -> { + // Add previously exposed ports and UDP port + List exposedPorts = new ArrayList<>(); + for (ExposedPort p : cmd.getExposedPorts()) { + exposedPorts.add(p); + } + exposedPorts.add(ExposedPort.udp(88)); + cmd.withExposedPorts(exposedPorts); + + // Add previous port bindings and UDP port binding + Ports ports = cmd.getPortBindings(); + ports.bind(ExposedPort.udp(88), Ports.Binding.empty()); + cmd.withPortBindings(ports); + }); + withNetworkAliases("kerberos.build.elastic.co", "build.elastic.co"); + withCopyFileToContainer(MountableFile.forHostPath("/dev/urandom"), "/dev/random"); + withExtraHost("kerberos.build.elastic.co", "127.0.0.1"); + withCommand("bash", provisioningId.scriptPath); + } + + @Override + public void start() { + try { + temporaryFolder.create(); + } catch (IOException e) { + throw new RuntimeException(e); + } + super.start(); + System.setProperty("java.security.krb5.conf", getConfPath().toString()); + } + + @Override + public void stop() { + super.stop(); + System.clearProperty("java.security.krb5.conf"); + temporaryFolder.delete(); + } + + @SuppressWarnings("all") + public String getConf() { + var bindings = Arrays.asList(getCurrentContainerInfo().getNetworkSettings().getPorts().getBindings().get(ExposedPort.udp(88))) + .stream() + .findFirst(); + String hostPortSpec = bindings.get().getHostPortSpec(); + String s = copyFileFromContainer("/fixture/build/krb5.conf.template", i -> IOUtils.toString(i, StandardCharsets.UTF_8)); + return s.replace("${MAPPED_PORT}", hostPortSpec); + } + + public Path getKeytab() { + if (keytabFile != null) { + return keytabFile; + } + try { + String keytabPath = provisioningId.keytabPath; + keytabFile = temporaryFolder.newFile(provisioningId.id + ".keytab").toPath(); + copyFileFromContainer(keytabPath, keytabFile.toAbsolutePath().toString()); + return keytabFile; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public Path getEsKeytab() { + if (esKeytabFile != null) { + return esKeytabFile; + } + try { + esKeytabFile = temporaryFolder.newFile("elasticsearch.keytab").toPath(); + copyFileFromContainer(provisioningId.esKeytab, esKeytabFile.toAbsolutePath().toString()); + return esKeytabFile; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public Path getConfPath() { + if (krb5ConfFile != null) { + return krb5ConfFile; + } + try { + krb5ConfFile = temporaryFolder.newFile("krb5.conf").toPath(); + Files.write(krb5ConfFile, getConf().getBytes(StandardCharsets.UTF_8)); + return krb5ConfFile; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public String getPrincipal() { + return provisioningId.keytabPrincipal; + } + + public String getEsPrincipal() { + return "elasticsearch@BUILD.ELASTIC.CO"; + } +} diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh index ef5bba076444c..de08a52df3306 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh @@ -8,4 +8,4 @@ addprinc.sh "hdfs/hdfs.build.elastic.co" # Use this as a signal that setup is complete python3 -m http.server 4444 & -sleep infinity \ No newline at end of file +sleep infinity diff --git a/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/DockerEnvironmentAwareTestContainer.java b/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/DockerEnvironmentAwareTestContainer.java index 11063dc4cf10b..1b47039f9ac5c 100644 --- a/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/DockerEnvironmentAwareTestContainer.java +++ b/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/DockerEnvironmentAwareTestContainer.java @@ -64,10 +64,16 @@ public DockerEnvironmentAwareTestContainer(Future image) { public void start() { Assume.assumeFalse("Docker support excluded on OS", EXCLUDED_OS); Assume.assumeTrue("Docker probing succesful", DOCKER_PROBING_SUCCESSFUL); - withLogConsumer(new Slf4jLogConsumer(logger())); + withLogConsumer(new Slf4jLogConsumer(LOGGER)); super.start(); } + @Override + public void stop() { + LOGGER.info("Stopping container {}", getContainerId()); + super.stop(); + } + @Override public void cache() { try { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/FileResource.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/FileResource.java index 2d4a88c4369b5..de4df7eaaaf49 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/FileResource.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/FileResource.java @@ -14,18 +14,19 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.function.Supplier; public class FileResource implements Resource { - private final Path file; + private final Supplier file; - FileResource(Path file) { + FileResource(Supplier file) { this.file = file; } @Override public InputStream asStream() { try { - return Files.newInputStream(file, StandardOpenOption.READ); + return Files.newInputStream(file.get(), StandardOpenOption.READ); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/Resource.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/Resource.java index 829e34007044d..22dc3e7465426 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/Resource.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/resource/Resource.java @@ -32,6 +32,10 @@ static Resource fromClasspath(String path) { } static Resource fromFile(Path file) { + return fromFile(() -> file); + } + + static Resource fromFile(Supplier file) { return new FileResource(file); } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 804f4eae4042d..5ac83f94f6248 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -278,11 +278,18 @@ public static Iterable createParameters(NamedXContentRegistry executea return createParameters(executeableSectionRegistry, null); } + /** + * Create parameters for this parameterized test. + */ + public static Iterable createParameters(String[] testPaths, Map yamlParameters) throws Exception { + return createParameters(ExecutableSection.XCONTENT_REGISTRY, testPaths, yamlParameters); + } + /** * Create parameters for this parameterized test. */ public static Iterable createParameters(String[] testPaths) throws Exception { - return createParameters(ExecutableSection.XCONTENT_REGISTRY, testPaths); + return createParameters(testPaths, Collections.emptyMap()); } /** @@ -295,6 +302,23 @@ public static Iterable createParameters(String[] testPaths) throws Exc */ public static Iterable createParameters(NamedXContentRegistry executeableSectionRegistry, String[] testPaths) throws Exception { + return createParameters(executeableSectionRegistry, testPaths, Collections.emptyMap()); + } + + /** + * Create parameters for this parameterized test. + * + * @param executeableSectionRegistry registry of executable sections + * @param testPaths list of paths to explicitly search for tests. If null then include all tests in root path. + * @param yamlParameters map or parameters used within the yaml specs to be replaced at parsing time. + * @return list of test candidates. + * @throws Exception + */ + public static Iterable createParameters( + NamedXContentRegistry executeableSectionRegistry, + String[] testPaths, + Map yamlParameters + ) throws Exception { if (testPaths != null && System.getProperty(REST_TESTS_SUITE) != null) { throw new IllegalArgumentException("The '" + REST_TESTS_SUITE + "' system property is not supported with explicit test paths."); } @@ -308,7 +332,7 @@ public static Iterable createParameters(NamedXContentRegistry executea for (String api : yamlSuites.keySet()) { List yamlFiles = new ArrayList<>(yamlSuites.get(api)); for (Path yamlFile : yamlFiles) { - ClientYamlTestSuite suite = ClientYamlTestSuite.parse(executeableSectionRegistry, api, yamlFile); + ClientYamlTestSuite suite = ClientYamlTestSuite.parse(executeableSectionRegistry, api, yamlFile, yamlParameters); suites.add(suite); try { suite.validate(); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ParameterizableYamlXContentParser.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ParameterizableYamlXContentParser.java new file mode 100644 index 0000000000000..43ea4f9d665d0 --- /dev/null +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ParameterizableYamlXContentParser.java @@ -0,0 +1,295 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml; + +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.xcontent.DeprecationHandler; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentLocation; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.nio.CharBuffer; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +/** + * a wrapper around YamlXContentParser that allows for parameter replacement in the yaml file + */ +public class ParameterizableYamlXContentParser implements XContentParser { + private final XContentParser delegate; + private final Map params; + + public ParameterizableYamlXContentParser(XContentParser delegate, Map params) { + this.delegate = delegate; + this.params = params.entrySet().stream().collect(Collectors.toMap(e -> "@" + e.getKey() + "@", Map.Entry::getValue)); + } + + @Override + public XContentType contentType() { + return delegate.contentType(); + } + + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + delegate.allowDuplicateKeys(allowDuplicateKeys); + } + + @Override + public Token nextToken() throws IOException { + return delegate.nextToken(); + } + + @Override + @Nullable + public String nextFieldName() throws IOException { + return delegate.nextFieldName(); + } + + @Override + public void skipChildren() throws IOException { + delegate.skipChildren(); + } + + @Override + public Token currentToken() { + return delegate.currentToken(); + } + + @Override + public String currentName() throws IOException { + return delegate.currentName(); + } + + @Override + public Map map() throws IOException { + return delegate.map(); + } + + @Override + public Map mapOrdered() throws IOException { + return visitMapForParameterReplacements(delegate.mapOrdered()); + } + + private Map visitMapForParameterReplacements(Map stringObjectMap) { + var updatedMap = stringObjectMap.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> maybeReplaceParams(e.getValue()))); + return updatedMap; + } + + @SuppressWarnings("unchecked") + private Object maybeReplaceParams(Object inputValue) { + if (inputValue == null) { + return null; + } + if (inputValue instanceof Map) { + return visitMapForParameterReplacements((Map) inputValue); + } + if (inputValue instanceof String) { + if (((String) inputValue).matches(".*@.*@.*")) { + String value = (String) inputValue; + for (String s : params.keySet()) { + if (value.contains(s)) { + value = value.replace(s, params.get(s).toString()); + } + } + return value; + } + } + return inputValue; + } + + @Override + public Map mapStrings() throws IOException { + return delegate.mapStrings(); + } + + @Override + public Map map(Supplier> mapFactory, CheckedFunction mapValueParser) + throws IOException { + return delegate.map(mapFactory, mapValueParser); + } + + @Override + public List list() throws IOException { + return delegate.list(); + } + + @Override + public List listOrderedMap() throws IOException { + return delegate.listOrderedMap(); + } + + @Override + public String text() throws IOException { + return delegate.text(); + } + + @Override + public String textOrNull() throws IOException { + return delegate.textOrNull(); + } + + @Override + public CharBuffer charBufferOrNull() throws IOException { + return delegate.charBufferOrNull(); + } + + @Override + public CharBuffer charBuffer() throws IOException { + return delegate.charBuffer(); + } + + @Override + public Object objectText() throws IOException { + return delegate.objectText(); + } + + @Override + public Object objectBytes() throws IOException { + return delegate.objectBytes(); + } + + @Override + public boolean hasTextCharacters() { + return delegate.hasTextCharacters(); + } + + @Override + public char[] textCharacters() throws IOException { + return delegate.textCharacters(); + } + + @Override + public int textLength() throws IOException { + return delegate.textLength(); + } + + @Override + public int textOffset() throws IOException { + return delegate.textOffset(); + } + + @Override + public Number numberValue() throws IOException { + return delegate.numberValue(); + } + + @Override + public NumberType numberType() throws IOException { + return delegate.numberType(); + } + + @Override + public short shortValue(boolean coerce) throws IOException { + return delegate.shortValue(coerce); + } + + @Override + public int intValue(boolean coerce) throws IOException { + return delegate.intValue(coerce); + } + + @Override + public long longValue(boolean coerce) throws IOException { + return delegate.longValue(coerce); + } + + @Override + public float floatValue(boolean coerce) throws IOException { + return delegate.floatValue(coerce); + } + + @Override + public double doubleValue(boolean coerce) throws IOException { + return delegate.doubleValue(coerce); + } + + @Override + public short shortValue() throws IOException { + return delegate.shortValue(); + } + + @Override + public int intValue() throws IOException { + return delegate.intValue(); + } + + @Override + public long longValue() throws IOException { + return delegate.longValue(); + } + + @Override + public float floatValue() throws IOException { + return delegate.floatValue(); + } + + @Override + public double doubleValue() throws IOException { + return delegate.doubleValue(); + } + + @Override + public boolean isBooleanValue() throws IOException { + return delegate.isBooleanValue(); + } + + @Override + public boolean booleanValue() throws IOException { + return delegate.booleanValue(); + } + + @Override + public byte[] binaryValue() throws IOException { + return delegate.binaryValue(); + } + + @Override + public XContentLocation getTokenLocation() { + return delegate.getTokenLocation(); + } + + @Override + public T namedObject(Class categoryClass, String name, Object context) throws IOException { + return getXContentRegistry().parseNamedObject(categoryClass, name, this, context); + } + + @Override + public NamedXContentRegistry getXContentRegistry() { + return delegate.getXContentRegistry(); + } + + @Override + public boolean isClosed() { + return delegate.isClosed(); + } + + @Override + public RestApiVersion getRestApiVersion() { + return delegate.getRestApiVersion(); + } + + @Override + public DeprecationHandler getDeprecationHandler() { + return delegate.getDeprecationHandler(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + +} diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index e5f46ff135171..466b64736ddbc 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.test.rest.yaml.ParameterizableYamlXContentParser; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; @@ -26,6 +27,7 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; @@ -38,7 +40,8 @@ * Supports a setup section and multiple test sections. */ public class ClientYamlTestSuite { - public static ClientYamlTestSuite parse(NamedXContentRegistry executeableSectionRegistry, String api, Path file) throws IOException { + public static ClientYamlTestSuite parse(NamedXContentRegistry executeableSectionRegistry, String api, Path file, Map params) + throws IOException { if (Files.isRegularFile(file) == false) { throw new IllegalArgumentException(file.toAbsolutePath() + " is not a file"); } @@ -63,10 +66,18 @@ public static ClientYamlTestSuite parse(NamedXContentRegistry executeableSection } try ( - XContentParser parser = YamlXContent.yamlXContent.createParser( - XContentParserConfiguration.EMPTY.withRegistry(executeableSectionRegistry), - Files.newInputStream(file) - ) + XContentParser parser = params.isEmpty() + ? YamlXContent.yamlXContent.createParser( + XContentParserConfiguration.EMPTY.withRegistry(executeableSectionRegistry), + Files.newInputStream(file) + ) + : new ParameterizableYamlXContentParser( + YamlXContent.yamlXContent.createParser( + XContentParserConfiguration.EMPTY.withRegistry(executeableSectionRegistry), + Files.newInputStream(file) + ), + params + ) ) { return parse(api, filename, Optional.of(file), parser); } catch (Exception e) { @@ -103,6 +114,10 @@ public static ClientYamlTestSuite parse(String api, String suiteName, Optional

    (testSections)); } + public static ClientYamlTestSuite parse(NamedXContentRegistry xcontentRegistry, String api, Path filePath) throws IOException { + return parse(xcontentRegistry, api, filePath, Collections.emptyMap()); + } + private final String api; private final String name; private final Optional file; diff --git a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle index 94aa196f8e8e1..333364c6167c0 100644 --- a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle @@ -5,28 +5,17 @@ * 2.0. */ -import org.elasticsearch.gradle.OS -import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.util.ports.ReservedPortRange - -import java.nio.file.Files -import java.nio.file.Paths - -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE - -apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' apply plugin: 'elasticsearch.internal-available-ports' -final Project hdfsFixtureProject = project(':test:fixtures:hdfs2-fixture') -final Project krbFixtureProject = project(':test:fixtures:krb5kdc-fixture') -final Project hdfsRepoPluginProject = project(':plugins:repository-hdfs') - dependencies { + clusterPlugins project(':plugins:repository-hdfs') javaRestTestImplementation(testArtifact(project(xpackModule('searchable-snapshots')))) - javaRestTestImplementation hdfsRepoPluginProject + javaRestTestImplementation project(path: ':test:fixtures:hdfs-fixture', configuration:"shadowedHdfs2") + javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') + javaRestTestRuntimeOnly "com.google.guava:guava:16.0.1" + javaRestTestRuntimeOnly "commons-cli:commons-cli:1.2" } restResources { @@ -35,152 +24,7 @@ restResources { } } -testFixtures.useFixture(krbFixtureProject.path, 'hdfs-snapshot') - -configurations { - hdfsFixture -} - -dependencies { - hdfsFixture hdfsFixtureProject - // Set the keytab files in the classpath so that we can access them from test code without the security manager freaking out. - if (isEclipse == false) { - javaRestTestRuntimeOnly files(krbFixtureProject.ext.krb5Keytabs("hdfs-snapshot", "hdfs_hdfs.build.elastic.co.keytab").parent){ - builtBy ":test:fixtures:krb5kdc-fixture:preProcessFixture" - } - } -} - -normalization { - runtimeClasspath { - // ignore generated keytab files for the purposes of build avoidance - ignore '*.keytab' - // ignore fixture ports file which is on the classpath primarily to pacify the security manager - ignore 'ports' - } -} - -String realm = "BUILD.ELASTIC.CO" -String krb5conf = krbFixtureProject.ext.krb5Conf("hdfs") - -// Create HDFS File System Testing Fixtures -for (String fixtureName : ['hdfsFixture', 'secureHdfsFixture']) { - project.tasks.register(fixtureName, org.elasticsearch.gradle.internal.test.AntFixture) { - dependsOn project.configurations.hdfsFixture, krbFixtureProject.tasks.postProcessFixture - executable = "${BuildParams.runtimeJavaHome}/bin/java" - env 'CLASSPATH', "${-> project.configurations.hdfsFixture.asPath}" - maxWaitInSeconds 60 - BuildParams.withFipsEnabledOnly(it) - waitCondition = { fixture, ant -> - // the hdfs.MiniHDFS fixture writes the ports file when - // it's ready, so we can just wait for the file to exist - return fixture.portsFile.exists() - } - final List miniHDFSArgs = [] - - // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options - if (name.equals('secureHdfsFixture')) { - miniHDFSArgs.addAll(["--add-exports", "java.security.jgss/sun.security.krb5=ALL-UNNAMED"]) - miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5conf}") - } - // configure port dynamically - def portRange = project.getExtensions().getByType(ReservedPortRange) - miniHDFSArgs.add("-Dhdfs.config.port=${portRange.getOrAllocate(name)}") - - // Common options - miniHDFSArgs.add('hdfs.MiniHDFS') - miniHDFSArgs.add(baseDir) - - // If it's a secure fixture, then set the principal name and keytab locations to use for auth. - if (name.equals('secureHdfsFixture')) { - miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}") - miniHDFSArgs.add(project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")) - } - - args miniHDFSArgs.toArray() - } -} - -// Disable integration test if Fips mode tasks.named("javaRestTest").configure { - description = "Runs rest tests against an elasticsearch cluster with HDFS." - def hdfsPort = project.getExtensions().getByType(ReservedPortRange).getOrAllocate("hdfsFixture") - systemProperty 'test.hdfs.uri', "hdfs://localhost:$hdfsPort" - nonInputProperties.systemProperty 'test.hdfs.path', '/user/elasticsearch/test/searchable_snapshots/simple' - BuildParams.withFipsEnabledOnly(it) -} - -tasks.register("javaRestTestSecure", RestIntegTestTask) { - description = "Runs rest tests against an elasticsearch cluster with Secured HDFS." - def hdfsPort = project.getExtensions().getByType(ReservedPortRange).getOrAllocate("secureHdfsFixture") - nonInputProperties.systemProperty 'test.hdfs.uri', "hdfs://localhost:$hdfsPort" - nonInputProperties.systemProperty 'test.hdfs.path', '/user/elasticsearch/test/searchable_snapshots/secure' - nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" - nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" - nonInputProperties.systemProperty( - "test.krb5.keytab.hdfs", - project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab") - ) - testClassesDirs = sourceSets.javaRestTest.output.classesDirs - classpath = sourceSets.javaRestTest.runtimeClasspath - BuildParams.withFipsEnabledOnly(it) -} -tasks.named("check").configure { dependsOn("javaRestTestSecure") } - -testClusters.configureEach { - testDistribution = 'DEFAULT' - plugin(hdfsRepoPluginProject.path) - setting 'xpack.license.self_generated.type', 'trial' - - setting 'xpack.searchable.snapshot.shared_cache.size', '16MB' - setting 'xpack.searchable.snapshot.shared_cache.region_size', '256KB' - - setting 'xpack.security.enabled', 'false' -} - -testClusters.matching { it.name == "javaRestTestSecure" }.configureEach { - systemProperty "java.security.krb5.conf", krb5conf - jvmArgs "--add-exports", "java.security.jgss/sun.security.krb5=ALL-UNNAMED" - extraConfigFile( - "repository-hdfs/krb5.keytab", - file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"), IGNORE_VALUE - ) -} - -// Determine HDFS Fixture compatibility for the current build environment. -boolean fixtureSupported = false -if (OS.current() != OS.WINDOWS) { - // hdfs fixture will not start without hadoop native libraries on windows - String nativePath = System.getenv("HADOOP_HOME") - if (nativePath != null) { - java.nio.file.Path path = Paths.get(nativePath) - if (Files.isDirectory(path) && - Files.exists(path.resolve("bin").resolve("winutils.exe")) && - Files.exists(path.resolve("bin").resolve("hadoop.dll")) && - Files.exists(path.resolve("bin").resolve("hdfs.dll"))) { - fixtureSupported = true - } else { - throw new IllegalStateException("HADOOP_HOME: ${path} is invalid, does not contain hadoop native libraries in \$HADOOP_HOME/bin") - } - } -} else { - fixtureSupported = true -} - -boolean legalPath = rootProject.rootDir.toString().contains(" ") == false -if (legalPath == false) { - fixtureSupported = false -} - -if (fixtureSupported) { - tasks.named("javaRestTest").configure {dependsOn "hdfsFixture" } - tasks.named("javaRestTestSecure").configure {dependsOn "secureHdfsFixture" } -} else { - tasks.named("javaRestTest").configure {enabled = false } - tasks.named("javaRestTestSecure").configure { enabled = false } - if (legalPath) { - logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH") - } else { - logger.warn("hdfsFixture unsupported since there are spaces in the path: '" + rootProject.rootDir.toString() + "'") - } + usesDefaultDistribution() + jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/HdfsSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/HdfsSearchableSnapshotsIT.java index 515be1adccc9b..b8ace1d32d0f7 100644 --- a/x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/HdfsSearchableSnapshotsIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/HdfsSearchableSnapshotsIT.java @@ -7,13 +7,39 @@ package org.elasticsearch.xpack.searchablesnapshots.hdfs; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; import org.elasticsearch.xpack.searchablesnapshots.AbstractSearchableSnapshotsRestTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; -import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.not; - +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) public class HdfsSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + public static HdfsFixture hdfsFixture = new HdfsFixture(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(hdfsFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected String writeRepositoryType() { return "hdfs"; @@ -21,19 +47,9 @@ protected String writeRepositoryType() { @Override protected Settings writeRepositorySettings() { - final String uri = System.getProperty("test.hdfs.uri"); - assertThat(uri, not(blankOrNullString())); - - final String path = System.getProperty("test.hdfs.path"); - assertThat(path, not(blankOrNullString())); - - // Optional based on type of test - final String principal = System.getProperty("test.krb5.principal.es"); - + final String uri = "hdfs://localhost:" + hdfsFixture.getPort(); + final String path = "/user/elasticsearch/test/searchable_snapshots/simple"; Settings.Builder repositorySettings = Settings.builder().put("client", "searchable_snapshots").put("uri", uri).put("path", path); - if (principal != null) { - repositorySettings.put("security.principal", principal); - } return repositorySettings.build(); } } diff --git a/x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/SecureHdfsSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/SecureHdfsSearchableSnapshotsIT.java new file mode 100644 index 0000000000000..cf30fae9861ff --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/hdfs/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/hdfs/SecureHdfsSearchableSnapshotsIT.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.searchablesnapshots.hdfs; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.elasticsearch.test.fixtures.krb5kdc.Krb5kDcContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.elasticsearch.xpack.searchablesnapshots.AbstractSearchableSnapshotsRestTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class, TestContainersThreadFilter.class }) +public class SecureHdfsSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + + public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer(); + + public static HdfsFixture hdfsFixture = new HdfsFixture().withKerberos(() -> krb5Fixture.getPrincipal(), () -> krb5Fixture.getKeytab()); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .systemProperty("java.security.krb5.conf", () -> krb5Fixture.getConfPath().toString()) + .configFile("repository-hdfs/krb5.keytab", Resource.fromFile(() -> krb5Fixture.getEsKeytab())) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(krb5Fixture).around(hdfsFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String writeRepositoryType() { + return "hdfs"; + } + + @Override + protected Settings writeRepositorySettings() { + final String uri = "hdfs://localhost:" + hdfsFixture.getPort(); + final String path = "/user/elasticsearch/test/searchable_snapshots/secure"; + Settings.Builder repositorySettings = Settings.builder().put("client", "searchable_snapshots").put("uri", uri).put("path", path); + + final String principal = "elasticsearch@BUILD.ELASTIC.CO"; + repositorySettings.put("security.principal", principal); + return repositorySettings.build(); + } + +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle index 90a6f4ada32e0..3fbb55ca4eb3a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle @@ -5,29 +5,19 @@ * 2.0. */ - -import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.util.ports.ReservedPortRange - -import java.nio.file.Files -import java.nio.file.Paths - -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' -apply plugin: 'elasticsearch.internal-available-ports' - -final Project hdfsFixtureProject = project(':test:fixtures:hdfs2-fixture') -final Project krbFixtureProject = project(':test:fixtures:krb5kdc-fixture') -final Project hdfsRepoPluginProject = project(':plugins:repository-hdfs') dependencies { javaRestTestImplementation testArtifact(project(xpackModule('snapshot-repo-test-kit'))) - javaRestTestImplementation project(':plugins:repository-hdfs') + javaRestTestImplementation project(path: ':test:fixtures:hdfs-fixture', configuration:"shadow") + javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') + javaRestTestImplementation "org.slf4j:slf4j-api:${versions.slf4j}" + javaRestTestImplementation "org.slf4j:slf4j-simple:${versions.slf4j}" + javaRestTestRuntimeOnly "com.google.guava:guava:16.0.1" + javaRestTestRuntimeOnly "commons-cli:commons-cli:1.2" } restResources { @@ -36,151 +26,15 @@ restResources { } } -testFixtures.useFixture(krbFixtureProject.path, 'hdfs-snapshot-repo-tests') - -configurations { - hdfsFixture -} - dependencies { - hdfsFixture hdfsFixtureProject - // Set the keytab files in the classpath so that we can access them from test code without the security manager freaking out. - if (isEclipse == false) { - testRuntimeOnly files(krbFixtureProject.ext.krb5Keytabs("hdfs-snapshot-repo-tests", "hdfs_hdfs.build.elastic.co.keytab").parent){ - builtBy ":test:fixtures:krb5kdc-fixture:preProcessFixture" - } - } -} - -normalization { - runtimeClasspath { - // ignore generated keytab files for the purposes of build avoidance - ignore '*.keytab' - // ignore fixture ports file which is on the classpath primarily to pacify the security manager - ignore 'ports' - } -} - -String realm = "BUILD.ELASTIC.CO" -String krb5conf = krbFixtureProject.ext.krb5Conf("hdfs") - -// Create HDFS File System Testing Fixtures -for (String fixtureName : ['hdfsFixture', 'secureHdfsFixture']) { - project.tasks.register(fixtureName, org.elasticsearch.gradle.internal.test.AntFixture) { - dependsOn project.configurations.hdfsFixture, krbFixtureProject.tasks.postProcessFixture - executable = "${BuildParams.runtimeJavaHome}/bin/java" - env 'CLASSPATH', "${-> project.configurations.hdfsFixture.asPath}" - maxWaitInSeconds 60 - BuildParams.withFipsEnabledOnly(it) - waitCondition = { fixture, ant -> - // the hdfs.MiniHDFS fixture writes the ports file when - // it's ready, so we can just wait for the file to exist - return fixture.portsFile.exists() - } - final List miniHDFSArgs = [] - - // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options - if (name.equals('secureHdfsFixture')) { - onlyIf("Only runtime java version < 16") { BuildParams.runtimeJavaVersion < JavaVersion.VERSION_16 } - miniHDFSArgs.addAll(["--add-exports", "java.security.jgss/sun.security.krb5=ALL-UNNAMED"]) - miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5conf}") - } - // configure port dynamically - def portRange = project.getExtensions().getByType(ReservedPortRange) - miniHDFSArgs.add("-Dhdfs.config.port=${portRange.getOrAllocate(name)}") - - // Common options - miniHDFSArgs.add('hdfs.MiniHDFS') - miniHDFSArgs.add(baseDir) - - // If it's a secure fixture, then set the principal name and keytab locations to use for auth. - if (name.equals('secureHdfsFixture')) { - miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}") - miniHDFSArgs.add(project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")) - } - - args miniHDFSArgs.toArray() - } + clusterPlugins project(':plugins:repository-hdfs') } // Disable integration test if Fips mode tasks.named("javaRestTest").configure { + usesDefaultDistribution() description = "Runs rest tests against an elasticsearch cluster with HDFS." - def hdfsPort = project.getExtensions().getByType(ReservedPortRange).getOrAllocate("hdfsFixture") - systemProperty 'test.hdfs.uri', "hdfs://localhost:$hdfsPort" - nonInputProperties.systemProperty 'test.hdfs.path', '/user/elasticsearch/test/repository_test_kit/simple' BuildParams.withFipsEnabledOnly(it) -} - -tasks.register("javaRestTestSecure", RestIntegTestTask) { - description = "Runs rest tests against an elasticsearch cluster with Secured HDFS." - def hdfsPort = project.getExtensions().getByType(ReservedPortRange).getOrAllocate("secureHdfsFixture") - nonInputProperties.systemProperty 'test.hdfs.uri', "hdfs://localhost:$hdfsPort" - nonInputProperties.systemProperty 'test.hdfs.path', '/user/elasticsearch/test/repository_test_kit/secure' - nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" - nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" - nonInputProperties.systemProperty( - "test.krb5.keytab.hdfs", - project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab") - ) - onlyIf("FIPS mode disabled and runtime java < 16") { - BuildParams.inFipsJvm == false && BuildParams.runtimeJavaVersion < JavaVersion.VERSION_16 - } - testClassesDirs = sourceSets.javaRestTest.output.classesDirs - classpath = sourceSets.javaRestTest.runtimeClasspath -} - -tasks.named("check").configure { dependsOn("javaRestTestSecure") } - -testClusters.configureEach { - testDistribution = 'DEFAULT' - plugin(hdfsRepoPluginProject.path) - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'false' -} - -testClusters.matching { it.name == "javaRestTestSecure" }.configureEach { - systemProperty "java.security.krb5.conf", krb5conf - extraConfigFile( - "repository-hdfs/krb5.keytab", - file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"), IGNORE_VALUE - ) -} - -// Determine HDFS Fixture compatibility for the current build environment. -boolean fixtureSupported = false -if (OS.current() == OS.WINDOWS) { - // hdfs fixture will not start without hadoop native libraries on windows - String nativePath = System.getenv("HADOOP_HOME") - if (nativePath != null) { - java.nio.file.Path path = Paths.get(nativePath) - if (Files.isDirectory(path) && - Files.exists(path.resolve("bin").resolve("winutils.exe")) && - Files.exists(path.resolve("bin").resolve("hadoop.dll")) && - Files.exists(path.resolve("bin").resolve("hdfs.dll"))) { - fixtureSupported = true - } else { - throw new IllegalStateException("HADOOP_HOME: ${path} is invalid, does not contain hadoop native libraries in \$HADOOP_HOME/bin") - } - } -} else { - fixtureSupported = true -} - -boolean legalPath = rootProject.rootDir.toString().contains(" ") == false -if (legalPath == false) { - fixtureSupported = false -} - -if (fixtureSupported) { - tasks.named("javaRestTest").configure {dependsOn "hdfsFixture" } - tasks.named("javaRestTestSecure").configure {dependsOn "secureHdfsFixture" } -} else { - tasks.named("javaRestTest").configure {enabled = false } - tasks.named("javaRestTestSecure").configure { enabled = false } - if (legalPath) { - logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH") - } else { - logger.warn("hdfsFixture unsupported since there are spaces in the path: '" + rootProject.rootDir.toString() + "'") - } + // required for krb5kdc-fixture to work + jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java new file mode 100644 index 0000000000000..2810c4801e8dd --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit; + +import org.elasticsearch.common.settings.Settings; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; + +public abstract class AbstractHdfsSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { + + @Override + protected String repositoryType() { + return "hdfs"; + } + + @Override + protected Settings repositorySettings() { + final String uri = "hdfs://localhost:" + getHdfsPort(); + // final String uri = System.getProperty("test.hdfs.uri"); + assertThat(uri, not(blankOrNullString())); + + final String path = getRepositoryPath(); + assertThat(path, not(blankOrNullString())); + Settings.Builder repositorySettings = Settings.builder().put("client", "repository_test_kit").put("uri", uri).put("path", path); + return repositorySettings.build(); + } + + protected abstract String getRepositoryPath(); + + protected abstract int getHdfsPort(); + +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java index 2cc81567e94bf..e9787ecdce854 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java @@ -6,33 +6,43 @@ */ package org.elasticsearch.repositories.blobstore.testkit; -import org.elasticsearch.common.settings.Settings; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.not; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; -public class HdfsSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) +public class HdfsSnapshotRepoTestKitIT extends AbstractHdfsSnapshotRepoTestKitIT { + + public static HdfsFixture hdfsFixture = new HdfsFixture(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(hdfsFixture).around(cluster); @Override - protected String repositoryType() { - return "hdfs"; + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); } @Override - protected Settings repositorySettings() { - final String uri = System.getProperty("test.hdfs.uri"); - assertThat(uri, not(blankOrNullString())); - - final String path = System.getProperty("test.hdfs.path"); - assertThat(path, not(blankOrNullString())); - - // Optional based on type of test - final String principal = System.getProperty("test.krb5.principal.es"); + protected String getRepositoryPath() { + return "/user/elasticsearch/test/repository_test_kit/simple"; + } - Settings.Builder repositorySettings = Settings.builder().put("client", "repository_test_kit").put("uri", uri).put("path", path); - if (principal != null) { - repositorySettings.put("security.principal", principal); - } - return repositorySettings.build(); + @Override + protected int getHdfsPort() { + return hdfsFixture.getPort(); } } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java new file mode 100644 index 0000000000000..6d599e41e3b9f --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.fixtures.hdfs.HdfsClientThreadLeakFilter; +import org.elasticsearch.test.fixtures.hdfs.HdfsFixture; +import org.elasticsearch.test.fixtures.krb5kdc.Krb5kDcContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class, TestContainersThreadFilter.class }) +public class SecureHdfsSnapshotRepoTestKitIT extends AbstractHdfsSnapshotRepoTestKitIT { + + public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer(); + + public static HdfsFixture hdfsFixture = new HdfsFixture().withKerberos(() -> krb5Fixture.getPrincipal(), () -> krb5Fixture.getKeytab()); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .plugin("repository-hdfs") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "false") + .systemProperty("java.security.krb5.conf", () -> krb5Fixture.getConfPath().toString()) + .configFile("repository-hdfs/krb5.conf", Resource.fromString(() -> krb5Fixture.getConf())) + .configFile("repository-hdfs/krb5.keytab", Resource.fromFile(() -> krb5Fixture.getEsKeytab())) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(krb5Fixture).around(hdfsFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected int getHdfsPort() { + return hdfsFixture.getPort(); + } + + @Override + protected String getRepositoryPath() { + return "/user/elasticsearch/test/repository_test_kit/secure"; + } + + @Override + protected Settings repositorySettings() { + return Settings.builder().put(super.repositorySettings()).put("security.principal", "elasticsearch@BUILD.ELASTIC.CO").build(); + } +} diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index 62d6f0a1e34b8..0ec7044ed18ab 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -1,51 +1,15 @@ -import java.nio.file.Path -import java.nio.file.Paths - apply plugin: 'elasticsearch.internal-java-rest-test' -apply plugin: 'elasticsearch.test.fixtures' - -testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "peppa" dependencies { javaRestTestImplementation project(':x-pack:plugin:core') javaRestTestImplementation(testArtifact(project(xpackModule('core')))) javaRestTestImplementation(testArtifact(project(xpackModule('security')))) + javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') } -normalization { - runtimeClasspath { - ignore 'krb5.conf' - ignore '*.keytab' - } -} - -tasks.register("copyKeytabToGeneratedResources", Copy) { - from project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "peppa.keytab") - into "$buildDir/generated-resources/keytabs" - from project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "HTTP_localhost.keytab") - into "$buildDir/generated-resources/keytabs" - dependsOn ":test:fixtures:krb5kdc-fixture:postProcessFixture" -} - -tasks.register("copyConfToGeneratedResources", Copy) { - from project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("peppa") - into "$buildDir/generated-resources/conf" - dependsOn ":test:fixtures:krb5kdc-fixture:postProcessFixture" -} - -String realm = "BUILD.ELASTIC.CO" tasks.named("javaRestTest").configure { - dependsOn "copyKeytabToGeneratedResources", "copyConfToGeneratedResources" usesDefaultDistribution() - Path peppaKeytab = Paths.get("${project.buildDir}", "generated-resources", "keytabs", "peppa.keytab") - Path krb5Conf = Paths.get("${project.buildDir}", "generated-resources", "conf", "krb5.conf") - nonInputProperties.systemProperty 'test.userkt', "peppa@${realm}" - nonInputProperties.systemProperty 'test.userkt.keytab', "${peppaKeytab}" - nonInputProperties.systemProperty 'test.userpwd', "george@${realm}" - nonInputProperties.systemProperty 'test.krb5.conf', "${krb5Conf}" - nonInputProperties.systemProperty 'java.security.krb5.conf', "${krb5Conf}" - systemProperty 'test.userpwd.password', "dino_but_longer_than_14_chars" - systemProperty 'sun.security.krb5.debug', true - classpath += files("$buildDir/generated-resources/keytabs") - classpath += files("$buildDir/generated-resources/conf") + description = "Runs rest tests against an elasticsearch cluster with Kerberos." + // required for krb5kdc-fixture to work + jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java b/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java index ff37b9c77735c..3058905548c08 100644 --- a/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java +++ b/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.security.authc.kerberos; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.elasticsearch.client.Request; @@ -22,12 +24,16 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.fixtures.krb5kdc.Krb5kDcContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.ietf.jgss.GSSException; import org.junit.Before; import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import java.io.IOException; import java.net.InetAddress; @@ -56,15 +62,16 @@ * Demonstrates login by keytab and login by password for given user principal * name using rest client. */ +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class KerberosAuthenticationIT extends ESRestTestCase { private static final String ENABLE_KERBEROS_DEBUG_LOGS_KEY = "test.krb.debug"; - private static final String TEST_USER_WITH_KEYTAB_KEY = "test.userkt"; - private static final String TEST_USER_WITH_KEYTAB_PATH_KEY = "test.userkt.keytab"; - private static final String TEST_USER_WITH_PWD_KEY = "test.userpwd"; - private static final String TEST_USER_WITH_PWD_PASSWD_KEY = "test.userpwd.password"; + private static final String TEST_USER_WITH_KEYTAB_KEY = "peppa@BUILD.ELASTIC.CO"; + private static final String TEST_USER_WITH_PWD_KEY = "george@BUILD.ELASTIC.CO"; + private static final String TEST_USER_WITH_PWD_PASSWD_KEY = "dino_but_longer_than_14_chars"; private static final String TEST_KERBEROS_REALM_NAME = "kerberos"; - @ClassRule + public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer(Krb5kDcContainer.ProvisioningId.PEPPA); + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) // force localhost IPv4 otherwise it is a chicken and egg problem where we need the keytab for the hostname when starting the @@ -81,13 +88,16 @@ public class KerberosAuthenticationIT extends ESRestTestCase { .setting("xpack.security.authc.realms.kerberos.kerberos.keytab.path", "es.keytab") .setting("xpack.security.authc.realms.kerberos.kerberos.krb.debug", "true") .setting("xpack.security.authc.realms.kerberos.kerberos.remove_realm_name", "false") - .systemProperty("java.security.krb5.conf", System.getProperty("test.krb5.conf")) + .systemProperty("java.security.krb5.conf", () -> krb5Fixture.getConfPath().toString()) .systemProperty("sun.security.krb5.debug", "true") .user("test_admin", "x-pack-test-password") .user("test_kibana_user", "x-pack-test-password", "kibana_system", false) - .configFile("es.keytab", Resource.fromClasspath("HTTP_localhost.keytab")) + .configFile("es.keytab", Resource.fromFile(() -> krb5Fixture.getEsKeytab())) .build(); + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(krb5Fixture).around(cluster); + @Override protected String getTestRestCluster() { return cluster.getHttpAddresses(); @@ -130,20 +140,19 @@ public void setupRoleMapping() throws IOException { } public void testLoginByKeytab() throws IOException, PrivilegedActionException { - final String userPrincipalName = System.getProperty(TEST_USER_WITH_KEYTAB_KEY); - final String keytabPath = System.getProperty(TEST_USER_WITH_KEYTAB_PATH_KEY); - final boolean enabledDebugLogs = Boolean.parseBoolean(System.getProperty(ENABLE_KERBEROS_DEBUG_LOGS_KEY)); + final String keytabPath = krb5Fixture.getKeytab().toString(); + final boolean enabledDebugLogs = Boolean.parseBoolean(ENABLE_KERBEROS_DEBUG_LOGS_KEY); final SpnegoHttpClientConfigCallbackHandler callbackHandler = new SpnegoHttpClientConfigCallbackHandler( - userPrincipalName, + krb5Fixture.getPrincipal(), keytabPath, enabledDebugLogs ); - executeRequestAndVerifyResponse(userPrincipalName, callbackHandler); + executeRequestAndVerifyResponse(krb5Fixture.getPrincipal(), callbackHandler); } public void testLoginByUsernamePassword() throws IOException, PrivilegedActionException { - final String userPrincipalName = System.getProperty(TEST_USER_WITH_PWD_KEY); - final String password = System.getProperty(TEST_USER_WITH_PWD_PASSWD_KEY); + final String userPrincipalName = TEST_USER_WITH_PWD_KEY; + final String password = TEST_USER_WITH_PWD_PASSWD_KEY; final boolean enabledDebugLogs = Boolean.parseBoolean(System.getProperty(ENABLE_KERBEROS_DEBUG_LOGS_KEY)); final SpnegoHttpClientConfigCallbackHandler callbackHandler = new SpnegoHttpClientConfigCallbackHandler( userPrincipalName, @@ -154,8 +163,8 @@ public void testLoginByUsernamePassword() throws IOException, PrivilegedActionEx } public void testGetOauth2TokenInExchangeForKerberosTickets() throws PrivilegedActionException, GSSException, IOException { - final String userPrincipalName = System.getProperty(TEST_USER_WITH_PWD_KEY); - final String password = System.getProperty(TEST_USER_WITH_PWD_PASSWD_KEY); + final String userPrincipalName = TEST_USER_WITH_PWD_KEY; + final String password = TEST_USER_WITH_PWD_PASSWD_KEY; final boolean enabledDebugLogs = Boolean.parseBoolean(System.getProperty(ENABLE_KERBEROS_DEBUG_LOGS_KEY)); final SpnegoHttpClientConfigCallbackHandler callbackHandler = new SpnegoHttpClientConfigCallbackHandler( userPrincipalName, From 829ea4d34d260a72d8fc43fa160e2f7a6e2ba978 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Tue, 26 Mar 2024 09:12:55 +0100 Subject: [PATCH 55/79] ESQL: Sum, Min, Max and Avg of constants (#105454) Allow expressions like ... | STATS sum([1, -9]), sum(null), min(21.0*3), avg([1,2,3]) by substituting sum(const) by mv_sum(const)*count(*) and min(const) by mv_min(const) (and similarly for max and avg). --- docs/changelog/105454.yaml | 5 + .../src/main/resources/stats.csv-spec | 96 +++++++++ .../esql/expression/SurrogateExpression.java | 3 + .../expression/function/aggregate/Avg.java | 4 +- .../expression/function/aggregate/Max.java | 9 +- .../expression/function/aggregate/Min.java | 9 +- .../expression/function/aggregate/Sum.java | 19 +- .../esql/optimizer/LogicalPlanOptimizer.java | 62 ++++-- .../xpack/esql/planner/AggregateMapper.java | 8 +- .../optimizer/LogicalPlanOptimizerTests.java | 192 +++++++++++++++++- 10 files changed, 375 insertions(+), 32 deletions(-) create mode 100644 docs/changelog/105454.yaml diff --git a/docs/changelog/105454.yaml b/docs/changelog/105454.yaml new file mode 100644 index 0000000000000..fc814a343c46b --- /dev/null +++ b/docs/changelog/105454.yaml @@ -0,0 +1,5 @@ +pr: 105454 +summary: "ESQL: Sum of constants" +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 917735040c61d..91c79e64b2385 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1226,3 +1226,99 @@ FROM employees vals:l 183 ; + +sumOfConst#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s1 = sum(1), s2point1 = sum(2.1), s_mv = sum([-1, 0, 3]) * 3, s_null = sum(null), rows = count(*) +; + +s1:l | s2point1:d | s_mv:l | s_null:d | rows:l +100 | 210.0 | 600 | null | 100 +; + +sumOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s2point1 = round(sum(2.1), 1), s_mv = sum([-1, 0, 3]), rows = count(*) by languages +| SORT languages +; + +s2point1:d | s_mv:l | rows:l | languages:i +31.5 | 30 | 15 | 1 +39.9 | 38 | 19 | 2 +35.7 | 34 | 17 | 3 +37.8 | 36 | 18 | 4 +44.1 | 42 | 21 | 5 +21.0 | 20 | 10 | null +; + +avgOfConst#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s1 = avg(1), s_mv = avg([-1, 0, 3]) * 3, s_null = avg(null) +; + +s1:d | s_mv:d | s_null:d +1.0 | 2.0 | null +; + +avgOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s2point1 = avg(2.1), s_mv = avg([-1, 0, 3]) * 3 by languages +| SORT languages +; + +s2point1:d | s_mv:d | languages:i +2.1 | 2.0 | 1 +2.1 | 2.0 | 2 +2.1 | 2.0 | 3 +2.1 | 2.0 | 4 +2.1 | 2.0 | 5 +2.1 | 2.0 | null +; + +minOfConst#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s1 = min(1), s_mv = min([-1, 0, 3]), s_null = min(null) +; + +s1:i | s_mv:i | s_null:null +1 | -1 | null +; + +minOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s2point1 = min(2.1), s_mv = min([-1, 0, 3]) by languages +| SORT languages +; + +s2point1:d | s_mv:i | languages:i +2.1 | -1 | 1 +2.1 | -1 | 2 +2.1 | -1 | 3 +2.1 | -1 | 4 +2.1 | -1 | 5 +2.1 | -1 | null +; + +maxOfConst#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s1 = max(1), s_mv = max([-1, 0, 3]), s_null = max(null) +; + +s1:i | s_mv:i | s_null:null +1 | 3 | null +; + +maxOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] +FROM employees +| STATS s2point1 = max(2.1), s_mv = max([-1, 0, 3]) by languages +| SORT languages +; + +s2point1:d | s_mv:i | languages:i +2.1 | 3 | 1 +2.1 | 3 | 2 +2.1 | 3 | 3 +2.1 | 3 | 4 +2.1 | 3 | 5 +2.1 | 3 | null +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/SurrogateExpression.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/SurrogateExpression.java index e7f507e3983a7..bf48d1d806e18 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/SurrogateExpression.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/SurrogateExpression.java @@ -15,5 +15,8 @@ */ public interface SurrogateExpression { + /** + * Returns the expression to be replaced by or {@code null} if this cannot be replaced. + */ Expression surrogate(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java index aee07e6e044c6..3ea0721d52c00 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java @@ -10,6 +10,7 @@ import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAvg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; @@ -60,6 +61,7 @@ public Avg replaceChildren(List newChildren) { public Expression surrogate() { var s = source(); var field = field(); - return new Div(s, new Sum(s, field), new Count(s, field), dataType()); + + return field().foldable() ? new MvAvg(s, field) : new Div(s, new Sum(s, field), new Count(s, field), dataType()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 00c3fd30530cd..3e8030322caa7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -11,8 +11,10 @@ import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -20,7 +22,7 @@ import java.util.List; -public class Max extends NumericAggregate { +public class Max extends NumericAggregate implements SurrogateExpression { @FunctionInfo(returnType = { "double", "integer", "long" }, description = "The maximum value of a numeric field.", isAggregation = true) public Max(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { @@ -61,4 +63,9 @@ protected AggregatorFunctionSupplier intSupplier(List inputChannels) { protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { return new MaxDoubleAggregatorFunctionSupplier(inputChannels); } + + @Override + public Expression surrogate() { + return field().foldable() ? new MvMax(source(), field()) : null; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 6bbc349e2b523..c69d2f4a1fc2d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -11,8 +11,10 @@ import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -20,7 +22,7 @@ import java.util.List; -public class Min extends NumericAggregate { +public class Min extends NumericAggregate implements SurrogateExpression { @FunctionInfo(returnType = { "double", "integer", "long" }, description = "The minimum value of a numeric field.", isAggregation = true) public Min(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { @@ -61,4 +63,9 @@ protected AggregatorFunctionSupplier intSupplier(List inputChannels) { protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { return new MinDoubleAggregatorFunctionSupplier(inputChannels); } + + @Override + public Expression surrogate() { + return field().foldable() ? new MvMin(source(), field()) : null; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java index e6584262183fa..805724bfcd16c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java @@ -10,12 +10,18 @@ import org.elasticsearch.compute.aggregation.SumDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.util.StringUtils; import java.util.List; @@ -26,7 +32,7 @@ /** * Sum all values of a field in matching documents. */ -public class Sum extends NumericAggregate { +public class Sum extends NumericAggregate implements SurrogateExpression { @FunctionInfo(returnType = "long", description = "The sum of a numeric field.", isAggregation = true) public Sum(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { @@ -63,4 +69,15 @@ protected AggregatorFunctionSupplier intSupplier(List inputChannels) { protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { return new SumDoubleAggregatorFunctionSupplier(inputChannels); } + + @Override + public Expression surrogate() { + var s = source(); + var field = field(); + + // SUM(const) is equivalent to MV_SUM(const)*COUNT(*). + return field.foldable() + ? new Mul(s, new MvSum(s, field), new Count(s, new Literal(s, StringUtils.WILDCARD, DataTypes.KEYWORD))) + : null; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 93505fa4f20fc..3425306863585 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.AttributeMap; import org.elasticsearch.xpack.ql.expression.AttributeSet; +import org.elasticsearch.xpack.ql.expression.EmptyAttribute; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.ExpressionSet; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -107,6 +108,23 @@ protected List> batches() { return rules(); } + protected static Batch substitutions() { + return new Batch<>( + "Substitutions", + Limiter.ONCE, + // first extract nested aggs top-level - this simplifies the rest of the rules + new ReplaceStatsAggExpressionWithEval(), + // second extract nested aggs inside of them + new ReplaceStatsNestedExpressionWithEval(), + // lastly replace surrogate functions + new SubstituteSurrogates(), + new ReplaceRegexMatch(), + new ReplaceAliasingEvalWithProject(), + new SkipQueryOnEmptyMappings() + // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 + ); + } + protected static Batch operators() { return new Batch<>( "Operator Optimization", @@ -150,26 +168,11 @@ protected static Batch cleanup() { } protected static List> rules() { - var substitutions = new Batch<>( - "Substitutions", - Limiter.ONCE, - // first extract nested aggs top-level - this simplifies the rest of the rules - new ReplaceStatsAggExpressionWithEval(), - // second extract nested aggs inside of them - new ReplaceStatsNestedExpressionWithEval(), - // lastly replace surrogate functions - new SubstituteSurrogates(), - new ReplaceRegexMatch(), - new ReplaceAliasingEvalWithProject(), - new SkipQueryOnEmptyMappings() - // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 - ); - var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); var defaultTopN = new Batch<>("Add default TopN", new AddDefaultTopN()); var label = new Batch<>("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); - return asList(substitutions, operators(), skip, cleanup(), defaultTopN, label); + return asList(substitutions(), operators(), skip, cleanup(), defaultTopN, label); } // TODO: currently this rule only works for aggregate functions (AVG) @@ -191,8 +194,10 @@ protected LogicalPlan rule(Aggregate aggregate) { // first pass to check existing aggregates (to avoid duplication and alias waste) for (NamedExpression agg : aggs) { - if (Alias.unwrap(agg) instanceof AggregateFunction af && af instanceof SurrogateExpression == false) { - aggFuncToAttr.put(af, agg.toAttribute()); + if (Alias.unwrap(agg) instanceof AggregateFunction af) { + if ((af instanceof SurrogateExpression se && se.surrogate() != null) == false) { + aggFuncToAttr.put(af, agg.toAttribute()); + } } } @@ -200,7 +205,7 @@ protected LogicalPlan rule(Aggregate aggregate) { // 0. check list of surrogate expressions for (NamedExpression agg : aggs) { Expression e = Alias.unwrap(agg); - if (e instanceof SurrogateExpression sf) { + if (e instanceof SurrogateExpression sf && sf.surrogate() != null) { changed = true; Expression s = sf.surrogate(); @@ -240,9 +245,22 @@ protected LogicalPlan rule(Aggregate aggregate) { LogicalPlan plan = aggregate; if (changed) { var source = aggregate.source(); - plan = new Aggregate(aggregate.source(), aggregate.child(), aggregate.groupings(), newAggs); + if (newAggs.isEmpty() == false) { + plan = new Aggregate(source, aggregate.child(), aggregate.groupings(), newAggs); + } else { + // All aggs actually have been surrogates for (foldable) expressions, e.g. + // \_Aggregate[[],[AVG([1, 2][INTEGER]) AS s]] + // Replace by a local relation with one row, followed by an eval, e.g. + // \_Eval[[MVAVG([1, 2][INTEGER]) AS s]] + // \_LocalRelation[[{e}#21],[ConstantNullBlock[positions=1]]] + plan = new LocalRelation( + source, + List.of(new EmptyAttribute(source)), + LocalSupplier.of(new Block[] { BlockUtils.constantBlock(PlannerUtils.NON_BREAKING_BLOCK_FACTORY, null, 1) }) + ); + } // 5. force the initial projection in place - if (transientEval.size() > 0) { + if (transientEval.isEmpty() == false) { plan = new Eval(source, plan, transientEval); // project away transient fields and re-enforce the original order using references (not copies) to the original aggs // this works since the replaced aliases have their nameId copied to avoid having to update all references (which has @@ -500,6 +518,8 @@ public LogicalPlan apply(LogicalPlan plan) { plan = plan.transformUp(p -> { // Apply the replacement inside Filter and Eval (which shouldn't make a difference) + // TODO: also allow aggregates once aggs on constants are supported. + // C.f. https://github.com/elastic/elasticsearch/issues/100634 if (p instanceof Filter || p instanceof Eval) { p = p.transformExpressionsOnly(ReferenceAttribute.class, replaceReference); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 6ed191a6df500..a95d846133c45 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -11,11 +11,9 @@ import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Median; import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; import org.elasticsearch.xpack.esql.expression.function.aggregate.NumericAggregate; @@ -43,7 +41,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -55,12 +52,11 @@ public class AggregateMapper { static final List NUMERIC = List.of("Int", "Long", "Double"); static final List SPATIAL = List.of("GeoPoint", "CartesianPoint"); - /** List of all ESQL agg functions. */ + /** List of all mappable ESQL agg functions (excludes surrogates like AVG = SUM/COUNT). */ static final List> AGG_FUNCTIONS = List.of( Count.class, CountDistinct.class, Max.class, - Median.class, MedianAbsoluteDeviation.class, Min.class, Percentile.class, @@ -79,7 +75,7 @@ record AggDef(Class aggClazz, String type, String extra, boolean grouping) {} private final HashMap> cache = new HashMap<>(); AggregateMapper() { - this(AGG_FUNCTIONS.stream().filter(Predicate.not(SurrogateExpression.class::isAssignableFrom)).toList()); + this(AGG_FUNCTIONS); } AggregateMapper(List> aggregateFunctionClasses) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 952fbc6f265e4..3f0b39603ef89 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -112,9 +112,11 @@ import org.junit.BeforeClass; import java.lang.reflect.Constructor; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; @@ -173,6 +175,19 @@ public class LogicalPlanOptimizerTests extends ESTestCase { private static Analyzer analyzerAirports; private static EnrichResolution enrichResolution; + private static class SubstitutionOnlyOptimizer extends LogicalPlanOptimizer { + static SubstitutionOnlyOptimizer INSTANCE = new SubstitutionOnlyOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); + + SubstitutionOnlyOptimizer(LogicalOptimizerContext optimizerContext) { + super(optimizerContext); + } + + @Override + protected List> batches() { + return List.of(substitutions()); + } + } + @BeforeClass public static void init() { parser = new EsqlParser(); @@ -3272,6 +3287,177 @@ public void testStatsWithCanonicalAggregate() throws Exception { assertThat(Expressions.attribute(fields.get(1)), is(Expressions.attribute(sum_argument))); } + /** + * Expects after running the {@link LogicalPlanOptimizer#substitutions()}: + * + * Limit[1000[INTEGER]] + * \_EsqlProject[[s{r}#3, s_expr{r}#5, s_null{r}#7, w{r}#10]] + * \_Project[[s{r}#3, s_expr{r}#5, s_null{r}#7, w{r}#10]] + * \_Eval[[MVSUM([1, 2][INTEGER]) * $$COUNT$s$0{r}#25 AS s, MVSUM(314.0[DOUBLE] / 100[INTEGER]) * $$COUNT$s$0{r}#25 AS s + * _expr, MVSUM(null[NULL]) * $$COUNT$s$0{r}#25 AS s_null]] + * \_Aggregate[[w{r}#10],[COUNT(*[KEYWORD]) AS $$COUNT$s$0, w{r}#10]] + * \_Eval[[emp_no{f}#15 % 2[INTEGER] AS w]] + * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] + */ + public void testSumOfLiteral() { + var plan = plan(""" + from test + | stats s = sum([1,2]), + s_expr = sum(314.0/100), + s_null = sum(null) + by w = emp_no % 2 + | keep s, s_expr, s_null, w + """, SubstitutionOnlyOptimizer.INSTANCE); + + var limit = as(plan, Limit.class); + var esqlProject = as(limit.child(), EsqlProject.class); + var project = as(esqlProject.child(), Project.class); + var eval = as(project.child(), Eval.class); + var agg = as(eval.child(), Aggregate.class); + + var exprs = eval.fields(); + // s = count(*) * 3 + var s = as(exprs.get(0), Alias.class); + assertThat(s.name(), equalTo("s")); + var mul = as(s.child(), Mul.class); + var mvSum = as(mul.left(), MvSum.class); + assertThat(mvSum.fold(), equalTo(3)); + var count = as(mul.right(), ReferenceAttribute.class); + assertThat(count.name(), equalTo("$$COUNT$s$0")); + + // s_expr = count(*) * 3.14 + var s_expr = as(exprs.get(1), Alias.class); + assertThat(s_expr.name(), equalTo("s_expr")); + var mul_expr = as(s_expr.child(), Mul.class); + var mvSum_expr = as(mul_expr.left(), MvSum.class); + assertThat(mvSum_expr.fold(), equalTo(3.14)); + var count_expr = as(mul_expr.right(), ReferenceAttribute.class); + assertThat(count_expr.name(), equalTo("$$COUNT$s$0")); + + // s_null = null + var s_null = as(exprs.get(2), Alias.class); + assertThat(s_null.name(), equalTo("s_null")); + var mul_null = as(s_null.child(), Mul.class); + var mvSum_null = as(mul_null.left(), MvSum.class); + assertThat(mvSum_null.field(), equalTo(NULL)); + var count_null = as(mul_null.right(), ReferenceAttribute.class); + assertThat(count_null.name(), equalTo("$$COUNT$s$0")); + + var count_agg = as(Alias.unwrap(agg.aggregates().get(0)), Count.class); + assertThat(count_agg.children().get(0), instanceOf(Literal.class)); + var w = as(Alias.unwrap(agg.groupings().get(0)), ReferenceAttribute.class); + assertThat(w.name(), equalTo("w")); + } + + private record AggOfLiteralTestCase( + String aggFunctionName, + Class substitution, + Function aggMultiValue + ) {}; + + private static List AGG_OF_CONST_CASES = List.of( + new AggOfLiteralTestCase("avg", MvAvg.class, ints -> ((double) Arrays.stream(ints).sum()) / ints.length), + new AggOfLiteralTestCase("min", MvMin.class, ints -> Arrays.stream(ints).min().getAsInt()), + new AggOfLiteralTestCase("max", MvMax.class, ints -> Arrays.stream(ints).max().getAsInt()) + ); + + /** + * Aggs of literals in case that the agg can be simply replaced by a corresponding mv-function; + * e.g. avg([1,2,3]) which is equivalent to mv_avg([1,2,3]). + * + * Expects after running the {@link LogicalPlanOptimizer#substitutions()}: + * + * Limit[1000[INTEGER]] + * \_EsqlProject[[s{r}#3, s_expr{r}#5, s_null{r}#7]] + * \_Project[[s{r}#3, s_expr{r}#5, s_null{r}#7]] + * \_Eval[[MVAVG([1, 2][INTEGER]) AS s, MVAVG(314.0[DOUBLE] / 100[INTEGER]) AS s_expr, MVAVG(null[NULL]) AS s_null]] + * \_LocalRelation[[{e}#21],[ConstantNullBlock[positions=1]]] + */ + public void testAggOfLiteral() { + for (AggOfLiteralTestCase testCase : AGG_OF_CONST_CASES) { + String query = LoggerMessageFormat.format(null, """ + from test + | stats s = {}([1,2]), + s_expr = {}(314.0/100), + s_null = {}(null) + | keep s, s_expr, s_null + """, testCase.aggFunctionName, testCase.aggFunctionName, testCase.aggFunctionName); + + var plan = plan(query, SubstitutionOnlyOptimizer.INSTANCE); + + var limit = as(plan, Limit.class); + var esqlProject = as(limit.child(), EsqlProject.class); + var project = as(esqlProject.child(), Project.class); + var eval = as(project.child(), Eval.class); + var singleRowRelation = as(eval.child(), LocalRelation.class); + var singleRow = singleRowRelation.supplier().get(); + assertThat(singleRow.length, equalTo(1)); + assertThat(singleRow[0].getPositionCount(), equalTo(1)); + + var exprs = eval.fields(); + var s = as(exprs.get(0), Alias.class); + assertThat(s.child(), instanceOf(testCase.substitution)); + assertThat(s.child().fold(), equalTo(testCase.aggMultiValue.apply(new int[] { 1, 2 }))); + var s_expr = as(exprs.get(1), Alias.class); + assertThat(s_expr.child(), instanceOf(testCase.substitution)); + assertThat(s_expr.child().fold(), equalTo(3.14)); + var s_null = as(exprs.get(2), Alias.class); + assertThat(s_null.child(), instanceOf(testCase.substitution)); + assertThat(s_null.child().fold(), equalTo(null)); + } + } + + /** + * Like {@link LogicalPlanOptimizerTests#testAggOfLiteral()} but with a grouping key. + * + * Expects after running the {@link LogicalPlanOptimizer#substitutions()}: + * + * Limit[1000[INTEGER]] + * \_EsqlProject[[s{r}#3, s_expr{r}#5, s_null{r}#7, emp_no{f}#13]] + * \_Project[[s{r}#3, s_expr{r}#5, s_null{r}#7, emp_no{f}#13]] + * \_Eval[[MVAVG([1, 2][INTEGER]) AS s, MVAVG(314.0[DOUBLE] / 100[INTEGER]) AS s_expr, MVAVG(null[NULL]) AS s_null]] + * \_Aggregate[[emp_no{f}#13],[emp_no{f}#13]] + * \_EsRelation[test][_meta_field{f}#19, emp_no{f}#13, first_name{f}#14, ..] + */ + public void testAggOfLiteralGrouped() { + for (AggOfLiteralTestCase testCase : AGG_OF_CONST_CASES) { + String query = LoggerMessageFormat.format(null, """ + from test + | stats s = {}([1,2]), + s_expr = {}(314.0/100), + s_null = {}(null) + by emp_no + | keep s, s_expr, s_null, emp_no + """, testCase.aggFunctionName, testCase.aggFunctionName, testCase.aggFunctionName); + + var plan = plan(query, SubstitutionOnlyOptimizer.INSTANCE); + + var limit = as(plan, Limit.class); + var esqlProject = as(limit.child(), EsqlProject.class); + var project = as(esqlProject.child(), Project.class); + var eval = as(project.child(), Eval.class); + var agg = as(eval.child(), Aggregate.class); + assertThat(agg.child(), instanceOf(EsRelation.class)); + + // Assert exprs + var exprs = eval.fields(); + + var s = as(exprs.get(0), Alias.class); + assertThat(s.child(), instanceOf(testCase.substitution)); + assertThat(s.child().fold(), equalTo(testCase.aggMultiValue.apply(new int[] { 1, 2 }))); + var s_expr = as(exprs.get(1), Alias.class); + assertThat(s_expr.child(), instanceOf(testCase.substitution)); + assertThat(s_expr.child().fold(), equalTo(3.14)); + var s_null = as(exprs.get(2), Alias.class); + assertThat(s_null.child(), instanceOf(testCase.substitution)); + assertThat(s_null.child().fold(), equalTo(null)); + + // Assert that the aggregate only does the grouping by emp_no + assertThat(Expressions.names(agg.groupings()), contains("emp_no")); + assertThat(agg.aggregates().size(), equalTo(1)); + } + } + public void testEmptyMappingIndex() { EsIndex empty = new EsIndex("empty_test", emptyMap(), emptySet()); IndexResolution getIndexResultAirports = IndexResolution.valid(empty); @@ -3455,9 +3641,13 @@ private LogicalPlan optimizedPlan(String query) { } private LogicalPlan plan(String query) { + return plan(query, logicalOptimizer); + } + + private LogicalPlan plan(String query, LogicalPlanOptimizer optimizer) { var analyzed = analyzer.analyze(parser.createStatement(query)); // System.out.println(analyzed); - var optimized = logicalOptimizer.optimize(analyzed); + var optimized = optimizer.optimize(analyzed); // System.out.println(optimized); return optimized; } From daf46b56e943aed8e9c545eb657105fae75d8e45 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 26 Mar 2024 10:25:12 +0100 Subject: [PATCH 56/79] Print out state of index if didn't recover from the snapshot (#106726) Add additional debug information about the index and its recovery state if the assertion for `recoveredFromSnapshotBytes` fails See #87568 --- .../recovery/SnapshotBasedIndexRecoveryIT.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index 1465911490f61..8951b91cb76a3 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -986,7 +986,11 @@ public void testRecoveryConcurrentlyWithIndexing() throws Exception { if (waitForSnapshotDownloadToStart) { // must complete using snapshots alone. RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); - assertThat(recoveryState.getIndex().recoveredFromSnapshotBytes(), equalTo(snapshotSizeForIndex)); + assertThat( + "Index " + recoveryState.getIndex() + " should be completely recovered from the snapshot", + recoveryState.getIndex().recoveredFromSnapshotBytes(), + equalTo(snapshotSizeForIndex) + ); } assertDocumentsAreEqual(indexName, numDocs.get()); From 2c38fa7f94315e8689a29d895d170afc3bfa1388 Mon Sep 17 00:00:00 2001 From: Tommaso Teofili Date: Tue, 26 Mar 2024 13:56:58 +0100 Subject: [PATCH 57/79] knn qvb test disable id assertion (#106746) --- .../rest-api-spec/test/ml/search_knn_query_vector_builder.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml index 4cab2c7908748..869bba90345c4 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml @@ -121,7 +121,6 @@ setup: model_id: text_embedding_model model_text: "the octopus comforter smells" - length: { hits.hits: 3 } - - match: { hits.hits.0._id: "0" } --- "nested kNN search with inner_hits size": From 84872e24800b7d243488db2aed54aec7a5e69d6a Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Tue, 26 Mar 2024 15:20:40 +0200 Subject: [PATCH 58/79] Rename DataStreamGlobalRetentionTests (#106754) --- .../get/GetComponentTemplateResponseTests.java | 11 ++++------- .../cluster/metadata/ComponentTemplateTests.java | 2 +- .../metadata/ComposableIndexTemplateTests.java | 2 +- ...Tests.java => DataStreamGlobalRetentionTests.java} | 2 +- .../cluster/metadata/DataStreamLifecycleTests.java | 2 +- .../cluster/metadata/DataStreamTests.java | 2 +- 6 files changed, 9 insertions(+), 12 deletions(-) rename server/src/test/java/org/elasticsearch/cluster/metadata/{DataStreamGlobalRetentionSerializationTests.java => DataStreamGlobalRetentionTests.java} (96%) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java index 2af4bf5016ad2..025f51b7df997 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComponentTemplateTests; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSerializationTests; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionTests; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.Strings; @@ -47,7 +47,7 @@ protected GetComponentTemplateAction.Response createTestInstance() { return new GetComponentTemplateAction.Response( randomBoolean() ? Map.of() : randomTemplates(), RolloverConfigurationTests.randomRolloverConditions(), - DataStreamGlobalRetentionSerializationTests.randomGlobalRetention() + DataStreamGlobalRetentionTests.randomGlobalRetention() ); } @@ -59,10 +59,7 @@ protected GetComponentTemplateAction.Response mutateInstance(GetComponentTemplat switch (randomInt(2)) { case 0 -> templates = templates == null ? randomTemplates() : null; case 1 -> rolloverConditions = randomValueOtherThan(rolloverConditions, RolloverConfigurationTests::randomRolloverConditions); - case 2 -> globalRetention = randomValueOtherThan( - globalRetention, - DataStreamGlobalRetentionSerializationTests::randomGlobalRetention - ); + case 2 -> globalRetention = randomValueOtherThan(globalRetention, DataStreamGlobalRetentionTests::randomGlobalRetention); } return new GetComponentTemplateAction.Response(templates, rolloverConditions, globalRetention); } @@ -88,7 +85,7 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws null, false ); - var globalRetention = DataStreamGlobalRetentionSerializationTests.randomGlobalRetention(); + var globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); var rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); var response = new GetComponentTemplateAction.Response( Map.of(randomAlphaOfLength(10), template), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java index 7efa624b49148..067a67ee025a1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java @@ -289,7 +289,7 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); - DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionSerializationTests.randomGlobalRetention(); + DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); ToXContent.Params withEffectiveRetention = new ToXContent.MapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS); template.toXContent(builder, withEffectiveRetention, rolloverConfiguration, globalRetention); String serialized = Strings.toString(builder); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index 6485634f879ba..15b55b5f002bb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -238,7 +238,7 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); - DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionSerializationTests.randomGlobalRetention(); + DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); ToXContent.Params withEffectiveRetention = new ToXContent.MapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS); template.toXContent(builder, withEffectiveRetention, rolloverConfiguration, globalRetention); String serialized = Strings.toString(builder); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java rename to server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionTests.java index 5cd104f1f59b5..e65b4d41bbe02 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionTests.java @@ -18,7 +18,7 @@ import java.util.List; -public class DataStreamGlobalRetentionSerializationTests extends SimpleDiffableWireSerializationTestCase { +public class DataStreamGlobalRetentionTests extends SimpleDiffableWireSerializationTestCase { @Override protected ClusterState.Custom makeTestChanges(ClusterState.Custom testInstance) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index fc650a5e65909..38b09f3690870 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -112,7 +112,7 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); - DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionSerializationTests.randomGlobalRetention(); + DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); ToXContent.Params withEffectiveRetention = new ToXContent.MapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS); lifecycle.toXContent(builder, withEffectiveRetention, rolloverConfiguration, globalRetention); String serialized = Strings.toString(builder); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index a1a523ddb584d..9db7d1047e249 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1697,7 +1697,7 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); - DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionSerializationTests.randomGlobalRetention(); + DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); ToXContent.Params withEffectiveRetention = new ToXContent.MapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS); dataStream.toXContent(builder, withEffectiveRetention, rolloverConfiguration, globalRetention); From f34f5d4bc98d736d16bb93adbd94a51b2c48749d Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 26 Mar 2024 09:44:16 -0400 Subject: [PATCH 59/79] [Transform] Auto retry Transform start (#106243) * [Transform] Auto retry Transform start Currently, unattended Transforms can fail to start due to failure to load the Config from its internal index. This usually happens when a Transform is created and immediately started by a system. The error looks like: ``` Failed to load transform configuration for transform [id] ``` Now, we will automatically retry the startup logic until the Config is ready. Some notes: - We cannot determine if a transform is unattended or not, so at this stage we will assume all transforms are unattended. - The persistent task running the transform will move into the `STARTED` state. Users can stop the persistent task and retry logic using the Transform's Stop API. - While retrying, the Transform will report `Yellow` health in the API and `degraded` in Kibana. The health message will include that the transform is automatically retrying and what error it had encountered. --- docs/changelog/106243.yaml | 5 + .../transforms/TransformConfigTests.java | 38 ++- .../transforms/TransformContext.java | 34 +++ .../transforms/TransformHealthChecker.java | 24 +- .../TransformPersistentTasksExecutor.java | 75 +++++- .../TransformRetryableStartUpListener.java | 102 ++++++++ .../transform/transforms/TransformTask.java | 2 +- .../transforms/TransformContextTests.java | 70 ++++- .../TransformHealthCheckerTests.java | 25 ++ ...TransformPersistentTasksExecutorTests.java | 179 +++++++++++-- ...ransformRetryableStartUpListenerTests.java | 239 ++++++++++++++++++ 11 files changed, 743 insertions(+), 50 deletions(-) create mode 100644 docs/changelog/106243.yaml create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListener.java create mode 100644 x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListenerTests.java diff --git a/docs/changelog/106243.yaml b/docs/changelog/106243.yaml new file mode 100644 index 0000000000000..6b02e3f1699d4 --- /dev/null +++ b/docs/changelog/106243.yaml @@ -0,0 +1,5 @@ +pr: 106243 +summary: "[Transform] Auto retry Transform start" +area: "Transform" +type: bug +issues: [] diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java index ba2cd0ba04312..f1c2de11496bf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -123,6 +122,20 @@ public static TransformConfig randomTransformConfig(String id, TransformConfigVe return randomTransformConfig(id, version, pivotConfig, latestConfig); } + public static TransformConfig randomTransformConfig(String id, TimeValue frequency, TransformConfigVersion version) { + PivotConfig pivotConfig; + LatestConfig latestConfig; + if (randomBoolean()) { + pivotConfig = PivotConfigTests.randomPivotConfig(); + latestConfig = null; + } else { + pivotConfig = null; + latestConfig = LatestConfigTests.randomLatestConfig(); + } + + return randomTransformConfig(id, frequency, version, pivotConfig, latestConfig); + } + public static TransformConfig randomTransformConfigWithSettings(SettingsConfig settingsConfig) { PivotConfig pivotConfig; LatestConfig latestConfig; @@ -157,12 +170,28 @@ public static TransformConfig randomTransformConfig( TransformConfigVersion version, PivotConfig pivotConfig, LatestConfig latestConfig + ) { + return randomTransformConfig( + id, + randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), + version, + pivotConfig, + latestConfig + ); + } + + public static TransformConfig randomTransformConfig( + String id, + TimeValue frequency, + TransformConfigVersion version, + PivotConfig pivotConfig, + LatestConfig latestConfig ) { return new TransformConfig( id, randomSourceConfig(), randomDestConfig(), - randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), + frequency, randomBoolean() ? null : randomSyncConfig(), randomHeaders(), pivotConfig, @@ -281,10 +310,7 @@ protected ToXContent.Params getToXContentParams() { } private static Map randomHeaders() { - Map headers = Maps.newMapWithExpectedSize(1); - headers.put("key", "value"); - - return headers; + return Map.of("key", "value"); } public void testDefaultMatchAll() throws IOException { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java index 7fdabda6189a9..6119f446e8dc4 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java @@ -39,6 +39,9 @@ public interface Listener { private final AtomicInteger statePersistenceFailureCount = new AtomicInteger(); private final AtomicReference lastStatePersistenceFailure = new AtomicReference<>(); private volatile Instant lastStatePersistenceFailureStartTime; + private final AtomicInteger startUpFailureCount = new AtomicInteger(); + private final AtomicReference lastStartUpFailure = new AtomicReference<>(); + private volatile Instant startUpFailureTime; private volatile Instant changesLastDetectedAt; private volatile Instant lastSearchTime; private volatile boolean shouldStopAtCheckpoint = false; @@ -214,6 +217,37 @@ Instant getLastStatePersistenceFailureStartTime() { return lastStatePersistenceFailureStartTime; } + void resetStartUpFailureCount() { + startUpFailureCount.set(0); + lastStartUpFailure.set(null); + startUpFailureTime = null; + } + + int getStartUpFailureCount() { + return startUpFailureCount.get(); + } + + Throwable getStartUpFailure() { + return lastStartUpFailure.get(); + } + + int incrementAndGetStartUpFailureCount(Throwable failure) { + lastStartUpFailure.set(failure); + int newFailureCount = startUpFailureCount.incrementAndGet(); + if (newFailureCount == 1) { + startUpFailureTime = Instant.now(); + } + return newFailureCount; + } + + Instant getStartUpFailureTime() { + return startUpFailureTime; + } + + boolean doesNotHaveFailures() { + return getFailureCount() == 0 && getStatePersistenceFailureCount() == 0 && getStartUpFailureCount() == 0; + } + void shutdown() { taskListener.shutdown(); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java index 86d8ce4a6173c..24c5d45a38f75 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java @@ -38,7 +38,8 @@ public enum IssueType { PRIVILEGES_CHECK_FAILED("Privileges check failed"), TRANSFORM_TASK_FAILED("Transform task state is [failed]"), TRANSFORM_INDEXER_FAILED("Transform indexer failed"), - TRANSFORM_INTERNAL_STATE_UPDATE_FAILED("Task encountered failures updating internal state"); + TRANSFORM_INTERNAL_STATE_UPDATE_FAILED("Task encountered failures updating internal state"), + TRANSFORM_STARTUP_FAILED("Transform task is automatically retrying its startup process"); private final String issue; @@ -88,8 +89,7 @@ public static TransformHealth checkTransform(TransformTask transformTask) { public static TransformHealth checkTransform(TransformTask transformTask, @Nullable AuthorizationState authState) { // quick check if (TransformTaskState.FAILED.equals(transformTask.getState().getTaskState()) == false - && transformTask.getContext().getFailureCount() == 0 - && transformTask.getContext().getStatePersistenceFailureCount() == 0 + && transformTask.getContext().doesNotHaveFailures() && AuthorizationState.isNullOrGreen(authState)) { return TransformHealth.GREEN; } @@ -145,6 +145,24 @@ public static TransformHealth checkTransform(TransformTask transformTask, @Nulla ); } + if (transformContext.getStartUpFailureCount() != 0) { + if (HealthStatus.RED.equals(maxStatus) == false) { + maxStatus = HealthStatus.YELLOW; + } + + var lastFailure = transformContext.getStartUpFailure(); + var lastFailureMessage = lastFailure instanceof ElasticsearchException elasticsearchException + ? elasticsearchException.getDetailedMessage() + : lastFailure.getMessage(); + issues.add( + IssueType.TRANSFORM_STARTUP_FAILED.newIssue( + lastFailureMessage, + transformContext.getStartUpFailureCount(), + transformContext.getStartUpFailureTime() + ) + ); + } + return new TransformHealth(maxStatus, issues); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index ae9678893df9a..f18414e3aaead 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -45,6 +45,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformState; import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.TransformExtension; @@ -203,6 +204,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa final SetOnce stateHolder = new SetOnce<>(); + // <7> log the start result ActionListener startTaskListener = ActionListener.wrap( response -> logger.info("[{}] successfully completed and scheduled task in node operation", transformId), failure -> { @@ -348,21 +350,18 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa }); // <2> Get the transform config - ActionListener templateCheckListener = ActionListener.wrap( - aVoid -> transformServices.getConfigManager().getTransformConfiguration(transformId, getTransformConfigListener), - error -> { - Throwable cause = ExceptionsHelper.unwrapCause(error); - String msg = "Failed to create internal index mappings"; - markAsFailed(buildTask, error, msg + "[" + cause + "]"); - } - ); + var templateCheckListener = getTransformConfig(buildTask, params, getTransformConfigListener); // <1> Check the latest internal index (IMPORTANT: according to _this_ node, which might be newer than master) is installed TransformInternalIndex.createLatestVersionedIndexIfRequired( clusterService, parentTaskClient, transformExtension.getTransformInternalIndexAdditionalSettings(), - templateCheckListener + templateCheckListener.delegateResponse((l, e) -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + String msg = "Failed to create internal index mappings"; + markAsFailed(buildTask, e, msg + "[" + cause + "]"); + }) ); } @@ -401,6 +400,64 @@ private static void markAsFailed(TransformTask task, Throwable exception, String } } + private ActionListener getTransformConfig( + TransformTask task, + TransformTaskParams params, + ActionListener listener + ) { + return ActionListener.running(() -> { + var transformId = params.getId(); + // if this call fails for the first time, we are going to retry it indefinitely + // register the retry using the TransformScheduler, when the call eventually succeeds, deregister it before returning + var scheduler = transformServices.getScheduler(); + scheduler.registerTransform( + params, + new TransformRetryableStartUpListener<>( + transformId, + l -> transformServices.getConfigManager().getTransformConfiguration(transformId, l), + ActionListener.runBefore(listener, () -> scheduler.deregisterTransform(transformId)), + retryListener(task), + () -> true, // because we can't determine if this is an unattended transform yet, retry indefinitely + task.getContext() + ) + ); + }); + } + + /** + * This listener is always called after the first execution of a {@link TransformRetryableStartUpListener}. + * + * When the result is true, then the first call has failed and will retry. Save the state as Started and unblock the network thread, + * notifying the user with a 200 OK (acknowledged). + * + * When the result is false, then the first call has succeeded, and no further action is required for this listener. + */ + private ActionListener retryListener(TransformTask task) { + return ActionListener.wrap(isRetrying -> { + if (isRetrying) { + var oldState = task.getState(); + var newState = new TransformState( + TransformTaskState.STARTED, + oldState.getIndexerState(), + oldState.getPosition(), + oldState.getCheckpoint(), + "Retrying transform start.", + oldState.getProgress(), + oldState.getNode(), + oldState.shouldStopAtNextCheckpoint(), + oldState.getAuthState() + ); + task.persistStateToClusterState( + newState, + ActionListener.wrap( + rr -> logger.debug("[{}] marked as retrying in TransformState.", task.getTransformId()), + ee -> logger.atWarn().withThrowable(ee).log("[{}] failed to persist state.", task.getTransformId()) + ) + ); + } + }, e -> markAsFailed(task, e, "Failed to initiate retries for Transform.")); + } + private void startTask( TransformTask buildTask, ClientTransformIndexerBuilder indexerBuilder, diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListener.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListener.java new file mode 100644 index 0000000000000..17548fd8d427f --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListener.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.transform.transforms.scheduling.TransformScheduler; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Supplier; + +class TransformRetryableStartUpListener implements TransformScheduler.Listener { + private final String transformId; + private final Consumer> action; + private final ActionListener actionListener; + private final ActionListener retryScheduledListener; + private final Supplier shouldRetry; + private final TransformContext context; + private final AtomicBoolean isFirstRun; + private final AtomicBoolean isRunning; + + /** + * @param transformId the transform associated with this listener. All events to this listener must be for the same transformId. + * @param action the action this listener will take. When the TransformScheduler invokes {@link #triggered(TransformScheduler.Event)}, + * the call is forwarded to this action. + * @param actionListener actionListener will be notified via #onResponse when the action succeeds or via #onFailure when retries have + * stopped. If the Transform Stop API deregisters this class from the Scheduler, this actionListener will *not* be + * invoked. + * @param retryScheduledListener retryScheduledListener will be notified after the first call. If true, another thread has started the + * retry process. If false, the original call was successful, and no retries will happen. + * @param shouldRetry allows an external entity to gracefully stop these retries, invoking the actionListener's #onFailure method. + * Note that external entities are still required to deregister this listener from the Scheduler. + * @param context the transform's context object. This listener will update the StartUpFailureCount information in the context as it + * encounters errors and retries. + */ + TransformRetryableStartUpListener( + String transformId, + Consumer> action, + ActionListener actionListener, + ActionListener retryScheduledListener, + Supplier shouldRetry, + TransformContext context + ) { + this.transformId = transformId; + this.action = action; + this.actionListener = actionListener; + this.retryScheduledListener = retryScheduledListener; + this.shouldRetry = shouldRetry; + this.context = context; + this.isFirstRun = new AtomicBoolean(true); + this.isRunning = new AtomicBoolean(true); + } + + @Override + public void triggered(TransformScheduler.Event event) { + if (isRunning.get() && transformId.equals(event.transformId())) { + action.accept(ActionListener.wrap(this::actionSucceeded, this::actionFailed)); + } + } + + private void markDone() { + if (isRunning.compareAndSet(true, false)) { + synchronized (context) { + context.resetStartUpFailureCount(); + } + } + } + + private void actionSucceeded(Response r) { + maybeNotifyRetryListener(false); + markDone(); + actionListener.onResponse(r); + } + + private void maybeNotifyRetryListener(boolean response) { + if (isFirstRun.compareAndSet(true, false)) { + retryScheduledListener.onResponse(response); + } + } + + private void actionFailed(Exception e) { + if (shouldRetry.get()) { + maybeNotifyRetryListener(true); + recordError(e); + } else { + maybeNotifyRetryListener(false); + markDone(); + actionListener.onFailure(e); + } + } + + private void recordError(Exception e) { + synchronized (context) { + context.incrementAndGetStartUpFailureCount(e); + } + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java index ac81579e8dd71..dbfc30a38f4c3 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java @@ -71,7 +71,7 @@ public class TransformTask extends AllocatedPersistentTask implements TransformS private final SetOnce indexer = new SetOnce<>(); @SuppressWarnings("this-escape") - public TransformTask( + TransformTask( long id, String type, String action, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformContextTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformContextTests.java index 86193ef511618..d8e505ad16a49 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformContextTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformContextTests.java @@ -21,9 +21,11 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.verifyNoMoreInteractions; public class TransformContextTests extends ESTestCase { @@ -41,19 +43,81 @@ public void verifyNoMoreInteractionsOnMocks() { } public void testFailureCount() { - TransformContext context = new TransformContext(null, null, 0, listener); - assertThat(context.incrementAndGetFailureCount(new RuntimeException("some_exception")), is(equalTo(1))); + var context = new TransformContext(null, null, 0, listener); + + var someException = someException(); + assertThat(context.incrementAndGetFailureCount(someException), is(equalTo(1))); assertThat(context.getFailureCount(), is(equalTo(1))); - assertThat(context.incrementAndGetFailureCount(new IllegalArgumentException("some_other_exception")), is(equalTo(2))); + assertThat(context.getLastFailure(), is(sameInstance(someException))); + assertFalse(context.doesNotHaveFailures()); + + var someOtherException = someOtherException(); + assertThat(context.incrementAndGetFailureCount(someOtherException), is(equalTo(2))); assertThat(context.getFailureCount(), is(equalTo(2))); + assertThat(context.getLastFailure(), is(sameInstance(someOtherException))); + assertFalse(context.doesNotHaveFailures()); + context.resetReasonAndFailureCounter(); assertThat(context.getFailureCount(), is(equalTo(0))); assertThat(context.getLastFailure(), is(nullValue())); + assertTrue(context.doesNotHaveFailures()); // Verify that the listener is notified every time the failure count is incremented or reset verify(listener, times(3)).failureCountChanged(); } + private Throwable someException() { + return new RuntimeException("some_exception"); + } + + private Throwable someOtherException() { + return new IllegalArgumentException("some_other_exception"); + } + + public void testStatePersistenceFailureCount() { + var context = new TransformContext(null, null, 0, listener); + + var someException = someException(); + assertThat(context.incrementAndGetStatePersistenceFailureCount(someException), is(equalTo(1))); + assertThat(context.getStatePersistenceFailureCount(), is(equalTo(1))); + assertThat(context.getLastStatePersistenceFailure(), is(sameInstance(someException))); + assertFalse(context.doesNotHaveFailures()); + + var someOtherException = someOtherException(); + assertThat(context.incrementAndGetStatePersistenceFailureCount(someOtherException), is(equalTo(2))); + assertThat(context.getStatePersistenceFailureCount(), is(equalTo(2))); + assertThat(context.getLastStatePersistenceFailure(), is(sameInstance(someOtherException))); + assertFalse(context.doesNotHaveFailures()); + + context.resetStatePersistenceFailureCount(); + assertThat(context.getStatePersistenceFailureCount(), is(equalTo(0))); + assertThat(context.getLastStatePersistenceFailure(), is(nullValue())); + assertTrue(context.doesNotHaveFailures()); + verifyNoInteractions(listener); + } + + public void testStartUpFailureCount() { + var context = new TransformContext(null, null, 0, listener); + + var someException = someException(); + assertThat(context.incrementAndGetStartUpFailureCount(someException), is(equalTo(1))); + assertThat(context.getStartUpFailureCount(), is(equalTo(1))); + assertThat(context.getStartUpFailure(), is(sameInstance(someException))); + assertFalse(context.doesNotHaveFailures()); + + var someOtherException = someOtherException(); + assertThat(context.incrementAndGetStartUpFailureCount(someOtherException), is(equalTo(2))); + assertThat(context.getStartUpFailureCount(), is(equalTo(2))); + assertThat(context.getStartUpFailure(), is(sameInstance(someOtherException))); + assertFalse(context.doesNotHaveFailures()); + + context.resetStartUpFailureCount(); + assertThat(context.getStartUpFailureCount(), is(equalTo(0))); + assertThat(context.getStartUpFailure(), is(nullValue())); + assertTrue(context.doesNotHaveFailures()); + verifyNoInteractions(listener); + } + public void testCheckpoint() { TransformContext context = new TransformContext(null, null, 13, listener); assertThat(context.getCheckpoint(), is(equalTo(13L))); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformHealthCheckerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformHealthCheckerTests.java index 3b4604caca5cd..e52428bc94c13 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformHealthCheckerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformHealthCheckerTests.java @@ -17,6 +17,7 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -101,6 +102,30 @@ public void testStatusSwitchingAndMultipleFailures() { assertThat(TransformHealthChecker.checkTransform(task), equalTo(TransformHealth.GREEN)); } + public void testStartUpFailures() { + var task = mock(TransformTask.class); + var context = createTestContext(); + var now = getNow(); + + withIdStateAndContext(task, randomAlphaOfLength(10), context); + assertThat(TransformHealthChecker.checkTransform(task), equalTo(TransformHealth.GREEN)); + + context.incrementAndGetStartUpFailureCount(new ElasticsearchException("failed to persist")); + + var health = TransformHealthChecker.checkTransform(task); + assertThat(health.getStatus(), equalTo(HealthStatus.YELLOW)); + assertEquals(1, health.getIssues().size()); + assertThat(health.getIssues().get(0).getIssue(), equalTo("Transform task is automatically retrying its startup process")); + assertThat(health.getIssues().get(0).getFirstOccurrence(), greaterThanOrEqualTo(now)); + assertThat(health.getIssues().get(0).getFirstOccurrence(), lessThan(Instant.MAX)); + + IntStream.range(0, 10).forEach(i -> context.incrementAndGetStartUpFailureCount(new ElasticsearchException("failed to persist"))); + assertThat("Start up failures should always be yellow regardless of count", health.getStatus(), equalTo(HealthStatus.YELLOW)); + + context.resetStartUpFailureCount(); + assertThat(TransformHealthChecker.checkTransform(task), equalTo(TransformHealth.GREEN)); + } + private TransformContext createTestContext() { return new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index b927a248faf31..b5192535e911a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.transform.transforms; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -24,6 +25,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; @@ -31,33 +33,74 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.transform.TransformConfigVersion; +import org.elasticsearch.xpack.core.transform.transforms.AuthorizationState; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import org.elasticsearch.xpack.transform.DefaultTransformExtension; import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.TransformServices; import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; -import org.elasticsearch.xpack.transform.persistence.IndexBasedTransformConfigManager; +import org.elasticsearch.xpack.transform.persistence.InMemoryTransformConfigManager; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import org.elasticsearch.xpack.transform.persistence.TransformInternalIndexTests; import org.elasticsearch.xpack.transform.transforms.scheduling.TransformScheduler; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.time.Clock; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class TransformPersistentTasksExecutorTests extends ESTestCase { + private static ThreadPool threadPool; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(TransformPersistentTasksExecutorTests.class.getSimpleName()) { + @Override + public ExecutorService executor(String name) { + return EsExecutors.DIRECT_EXECUTOR_SERVICE; + } + + @Override + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor name) { + command.run(); + return null; + } + }; + } + + @AfterClass + public static void tearDownThreadPool() { + terminate(threadPool); + } public void testNodeVersionAssignment() { DiscoveryNodes.Builder nodes = buildNodes(false, true, true, true, true); @@ -262,6 +305,88 @@ public void testVerifyIndicesPrimaryShardsAreActive() { assertEquals(indexToRemove, result.get(0)); } + public void testNodeOperation() { + var transformsConfigManager = new InMemoryTransformConfigManager(); + var transformScheduler = new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO); + var taskExecutor = buildTaskExecutor(transformServices(transformsConfigManager, transformScheduler)); + + var transformId = "testNodeOperation"; + var params = mockTaskParams(transformId); + + putTransformConfiguration(transformsConfigManager, transformId); + var task = mockTransformTask(); + taskExecutor.nodeOperation(task, params, mock()); + + verify(task).start(isNull(), any()); + } + + private void putTransformConfiguration(TransformConfigManager configManager, String transformId) { + configManager.putTransformConfiguration( + TransformConfigTests.randomTransformConfig(transformId, TimeValue.timeValueMillis(1), TransformConfigVersion.CURRENT), + ActionListener.noop().delegateResponse((l, e) -> fail(e)) + ); + } + + public void testNodeOperationStartupRetry() throws Exception { + var failFirstCall = new AtomicBoolean(true); + var transformsConfigManager = new InMemoryTransformConfigManager() { + @Override + public void getTransformConfiguration(String transformId, ActionListener resultListener) { + if (failFirstCall.compareAndSet(true, false)) { + resultListener.onFailure(new IllegalStateException("Failing first call.")); + } else { + super.getTransformConfiguration(transformId, resultListener); + } + } + }; + + var transformScheduler = new TransformScheduler(Clock.systemUTC(), threadPool, fastRetry(), TimeValue.ZERO); + var taskExecutor = buildTaskExecutor(transformServices(transformsConfigManager, transformScheduler)); + + var transformId = "testNodeOperationStartupRetry"; + var params = mockTaskParams(transformId); + putTransformConfiguration(transformsConfigManager, transformId); + + var task = mockTransformTask(); + taskExecutor.nodeOperation(task, params, mock()); + + // skip waiting for the scheduler to run the task a second time and just rerun it now + transformScheduler.scheduleNow(transformId); + + // verify the retry listener set the state to TransformTaskState.STARTED + IndexerState.STOPPED + verify(task).persistStateToClusterState(argThat(state -> { + assertThat(TransformTaskState.STARTED, equalTo(state.getTaskState())); + assertThat(IndexerState.STOPPED, equalTo(state.getIndexerState())); + return true; + }), any()); + verify(task).start(isNull(), any()); + } + + private Settings fastRetry() { + // must be >= [1s] + return Settings.builder().put(Transform.SCHEDULER_FREQUENCY.getKey(), TimeValue.timeValueSeconds(1)).build(); + } + + private TransformTaskParams mockTaskParams(String transformId) { + var params = mock(TransformTaskParams.class); + when(params.getId()).thenReturn(transformId); + when(params.getFrequency()).thenReturn(TimeValue.timeValueSeconds(1)); + return params; + } + + private TransformTask mockTransformTask() { + var task = mock(TransformTask.class); + when(task.setAuthState(any(AuthorizationState.class))).thenReturn(task); + when(task.setNumFailureRetries(anyInt())).thenReturn(task); + when(task.getParentTaskId()).thenReturn(TaskId.EMPTY_TASK_ID); + when(task.getContext()).thenReturn(mock()); + doAnswer(a -> fail(a.getArgument(0, Throwable.class))).when(task).fail(any(Throwable.class), any(String.class), any()); + when(task.getState()).thenReturn( + new TransformState(TransformTaskState.STOPPED, IndexerState.STOPPED, null, 0, null, null, null, false, null) + ); + return task; + } + private void addIndices(Metadata.Builder metadata, RoutingTable.Builder routingTable) { List indices = new ArrayList<>(); indices.add(TransformInternalIndexConstants.AUDIT_INDEX); @@ -415,23 +540,20 @@ private ClusterState buildClusterState(DiscoveryNodes.Builder nodes) { csBuilder.metadata(metadata); return csBuilder.build(); - } private TransformPersistentTasksExecutor buildTaskExecutor() { - ClusterService clusterService = mock(ClusterService.class); - Client client = mock(Client.class); - TransformAuditor mockAuditor = mock(TransformAuditor.class); - IndexBasedTransformConfigManager transformsConfigManager = new IndexBasedTransformConfigManager( - clusterService, - TestIndexNameExpressionResolver.newInstance(), - client, - xContentRegistry() + var transformServices = transformServices( + new InMemoryTransformConfigManager(), + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); - Clock clock = Clock.systemUTC(); - ThreadPool threadPool = mock(ThreadPool.class); - TransformCheckpointService transformCheckpointService = new TransformCheckpointService( - clock, + return buildTaskExecutor(transformServices); + } + + private TransformServices transformServices(TransformConfigManager configManager, TransformScheduler scheduler) { + var mockAuditor = mock(TransformAuditor.class); + var transformCheckpointService = new TransformCheckpointService( + Clock.systemUTC(), Settings.EMPTY, new ClusterService( Settings.EMPTY, @@ -439,28 +561,29 @@ private TransformPersistentTasksExecutor buildTaskExecutor() { null, (TaskManager) null ), - transformsConfigManager, + configManager, mockAuditor ); - TransformServices transformServices = new TransformServices( - transformsConfigManager, - transformCheckpointService, - mockAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) - ); - - ClusterSettings cSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(Transform.NUM_FAILURE_RETRIES_SETTING)); - when(clusterService.getClusterSettings()).thenReturn(cSettings); - when(clusterService.state()).thenReturn(TransformInternalIndexTests.randomTransformClusterState()); + return new TransformServices(configManager, transformCheckpointService, mockAuditor, scheduler); + } + private TransformPersistentTasksExecutor buildTaskExecutor(TransformServices transformServices) { return new TransformPersistentTasksExecutor( - client, + mock(Client.class), transformServices, threadPool, - clusterService, + clusterService(), Settings.EMPTY, new DefaultTransformExtension(), TestIndexNameExpressionResolver.newInstance() ); } + + private ClusterService clusterService() { + var clusterService = mock(ClusterService.class); + var cSettings = new ClusterSettings(Settings.EMPTY, Set.of(Transform.NUM_FAILURE_RETRIES_SETTING)); + when(clusterService.getClusterSettings()).thenReturn(cSettings); + when(clusterService.state()).thenReturn(TransformInternalIndexTests.randomTransformClusterState()); + return clusterService; + } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListenerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListenerTests.java new file mode 100644 index 0000000000000..1a2bbfd434455 --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformRetryableStartUpListenerTests.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.transform.transforms.scheduling.TransformScheduler; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.only; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; + +public class TransformRetryableStartUpListenerTests extends ESTestCase { + /** + * When the action succeeds on the first try + * Then we invoked the retryListener with "false" and then invoked the actionListener's onResponse. + */ + public void testFirstRunPasses() { + var retryResult = new AtomicReference(); + var responseResult = new AtomicInteger(0); + var context = mock(TransformContext.class); + + var listener = new TransformRetryableStartUpListener<>( + "transformId", + immediatelyReturn(), + responseListener(responseResult), + retryListener(retryResult), + () -> true, + context + ); + + callThreeTimes("transformId", listener); + + // assert only 1 success and no retries + assertEquals("Response Listener should only be called once.", 1, responseResult.get()); + assertNotNull("Retry Listener should be called.", retryResult.get()); + assertFalse("Retries should not be scheduled.", retryResult.get()); + verify(context, only()).resetStartUpFailureCount(); + } + + private Consumer> immediatelyReturn() { + return l -> l.onResponse(null); + } + + private ActionListener responseListener(AtomicInteger result) { + return ActionListener.wrap(r -> { + if (result.compareAndSet(0, 1) == false) { + fail("Response Listener should only be called at most once for every test."); + } + }, e -> { + if (result.compareAndSet(0, -1) == false) { + fail("Response Listener should only be called at most once for every test."); + } + }); + } + + private ActionListener retryListener(AtomicReference result) { + return ActionListener.wrap(result::set, e -> fail("Retry Listener is never expected to fail.")); + } + + private void callThreeTimes(String transformId, TransformRetryableStartUpListener listener) { + listener.triggered(event(transformId)); + listener.triggered(event(transformId)); + listener.triggered(event(transformId)); + } + + private TransformScheduler.Event event(String transformId) { + return new TransformScheduler.Event(transformId, System.currentTimeMillis(), System.currentTimeMillis()); + } + + /** + * When the action fails once then succeeds on the second try + * Then we invoked the retryListener with "true" and then invoked the actionListener's onResponse. + */ + public void testFirstRunFails() { + var retryResult = new AtomicReference(); + var responseResult = new AtomicInteger(0); + var context = mock(TransformContext.class); + + var listener = new TransformRetryableStartUpListener<>( + "transformId", + failOnceThen(immediatelyReturn()), + responseListener(responseResult), + retryListener(retryResult), + () -> true, + context + ); + + callThreeTimes("transformId", listener); + + // assert only 1 retry and 1 success + assertEquals("Response Listener should only be called once.", 1, responseResult.get()); + assertNotNull("Retry Listener should be called.", retryResult.get()); + assertTrue("Retries should be scheduled.", retryResult.get()); + verify(context, times(1)).incrementAndGetStartUpFailureCount(any(IllegalStateException.class)); + verify(context, times(1)).resetStartUpFailureCount(); + } + + private Consumer> failOnceThen(Consumer> followup) { + var firstRun = new AtomicBoolean(true); + return l -> { + if (firstRun.compareAndSet(true, false)) { + l.onFailure(new IllegalStateException("first call fails")); + } else { + followup.accept(l); + } + }; + } + + /** + * When the TransformRetryableStartUpListener is never invoked + * Then there should be no failures to report + */ + public void testUnusedRetryableIsNotReported() { + var context = mock(TransformContext.class); + + new TransformRetryableStartUpListener<>( + "transformId", + failOnceThen(immediatelyReturn()), + responseListener(), + retryListener(), + () -> true, + context + ); + + verifyNoInteractions(context); + } + + private ActionListener retryListener() { + return retryListener(new AtomicReference<>()); + } + + private ActionListener responseListener() { + return responseListener(new AtomicInteger()); + } + + /** + * Given one transformId + * When we receive an event for another transformId + * Then we should not take any action + */ + public void testWrongTransformIdIsIgnored() { + var correctTransformId = "transformId"; + var incorrectTransformId = "someOtherTransformId"; + var retryResult = new AtomicReference(); + var responseResult = new AtomicInteger(0); + var context = mock(TransformContext.class); + + var listener = new TransformRetryableStartUpListener<>( + correctTransformId, + failOnceThen(immediatelyReturn()), + responseListener(responseResult), + retryListener(retryResult), + () -> true, + context + ); + + listener.triggered(event(incorrectTransformId)); + + assertEquals("Response Listener should never be called once.", 0, responseResult.get()); + assertNull("Retry Listener should not be called.", retryResult.get()); + verifyNoInteractions(context); + } + + /** + * Given an action that always fails + * When shouldRetry returns true and then false + * Then we should call the actionListener's onFailure handler + */ + public void testCancelRetries() { + var retryResult = new AtomicReference(); + var responseResult = new AtomicInteger(0); + var context = mock(TransformContext.class); + var runTwice = new AtomicBoolean(true); + + var listener = new TransformRetryableStartUpListener<>( + "transformId", + alwaysFail(), + responseListener(responseResult), + retryListener(retryResult), + () -> runTwice.compareAndSet(true, false), + context + ); + + callThreeTimes("transformId", listener); + + // assert only 1 retry and 1 failure + assertEquals("Response Listener should only be called once.", -1, responseResult.get()); + assertNotNull("Retry Listener should be called.", retryResult.get()); + assertTrue("Retries should be scheduled.", retryResult.get()); + verify(context, times(1)).incrementAndGetStartUpFailureCount(any(IllegalStateException.class)); + verify(context, times(1)).resetStartUpFailureCount(); + } + + private Consumer> alwaysFail() { + return l -> l.onFailure(new IllegalStateException("always fail")); + } + + /** + * Given an action that always fails + * When shouldRetry returns false + * Then we should call the actionListener's onFailure handler and the retryListener with "false" + */ + public void testCancelRetryImmediately() { + var retryResult = new AtomicReference(); + var responseResult = new AtomicInteger(0); + var context = mock(TransformContext.class); + + var listener = new TransformRetryableStartUpListener<>( + "transformId", + alwaysFail(), + responseListener(responseResult), + retryListener(retryResult), + () -> false, + context + ); + + callThreeTimes("transformId", listener); + + // assert no retries and 1 failure + assertEquals("Response Listener should only be called once.", -1, responseResult.get()); + assertNotNull("Retry Listener should be called.", retryResult.get()); + assertFalse("Retries should not be scheduled.", retryResult.get()); + verify(context, only()).resetStartUpFailureCount(); + } +} From 16888016048d891befdc026d783f2ca70738154c Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 26 Mar 2024 15:18:22 +0100 Subject: [PATCH 60/79] InternalMultiTermsTests#testReduceWithDoublePromotion should reduce for final (#106750) --- .../analytics/multiterms/InternalMultiTermsTests.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTermsTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTermsTests.java index 76d8130f954de..be020f74eafff 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTermsTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTermsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.xpack.analytics.AnalyticsPlugin; @@ -358,18 +359,20 @@ public void testReduceWithDoublePromotion() { keyConverters2, null ); - AggregationReduceContext context = new AggregationReduceContext.ForPartial( + AggregationReduceContext context = new AggregationReduceContext.ForFinal( bigArrays, mockScriptService, () -> false, - mock(AggregationBuilder.class) + mock(AggregationBuilder.class), + i -> {}, + PipelineAggregator.PipelineTree.EMPTY ); InternalMultiTerms result = (InternalMultiTerms) InternalAggregationTestCase.reduce(List.of(terms1, terms2), context); assertThat(result.buckets, hasSize(3)); - assertThat(result.buckets.get(0).getKeyAsString(), equalTo("4|9.223372036854776E18|4.0")); + assertThat(result.buckets.get(0).getKeyAsString(), equalTo("4|9.223372036854776E18|1.0")); assertThat(result.buckets.get(0).getDocCount(), equalTo(3L)); - assertThat(result.buckets.get(1).getKeyAsString(), equalTo("4|9.223372036854776E18|1.0")); + assertThat(result.buckets.get(1).getKeyAsString(), equalTo("4|9.223372036854776E18|4.0")); assertThat(result.buckets.get(1).getDocCount(), equalTo(3L)); assertThat(result.buckets.get(2).getKeyAsString(), equalTo("3|9.223372036854776E18|3.0")); assertThat(result.buckets.get(2).getDocCount(), equalTo(2L)); From 58ed0936773fc20aa2e9d9e57ebe1ca1c8b049ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Tue, 26 Mar 2024 15:21:20 +0100 Subject: [PATCH 61/79] [Transform] Raise loglevel of events related to transform lifecycle from DEBUG to INFO (#106602) --- docs/changelog/106602.yaml | 5 +++++ .../transform/action/TransportDeleteTransformAction.java | 2 +- .../xpack/transform/action/TransportPutTransformAction.java | 2 +- .../transform/action/TransportResetTransformAction.java | 2 +- .../transform/action/TransportUpdateTransformAction.java | 2 +- .../transform/action/TransportUpgradeTransformsAction.java | 2 +- 6 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/106602.yaml diff --git a/docs/changelog/106602.yaml b/docs/changelog/106602.yaml new file mode 100644 index 0000000000000..972d7b5d163d3 --- /dev/null +++ b/docs/changelog/106602.yaml @@ -0,0 +1,5 @@ +pr: 106602 +summary: Raise loglevel of events related to transform lifecycle from DEBUG to INFO +area: Transform +type: enhancement +issues: [] diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java index d96ba88faff9a..51379b81d7e9d 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java @@ -95,7 +95,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A // <3> Delete transform config ActionListener deleteDestIndexListener = ActionListener.wrap( unusedAcknowledgedResponse -> transformConfigManager.deleteTransform(request.getId(), ActionListener.wrap(r -> { - logger.debug("[{}] deleted transform", request.getId()); + logger.info("[{}] deleted transform", request.getId()); auditor.info(request.getId(), "Deleted transform."); listener.onResponse(AcknowledgedResponse.of(r)); }, listener::onFailure)), diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java index 8a82880f4d9a3..df36a850a3b0a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java @@ -168,7 +168,7 @@ private void putTransform(Request request, ActionListener var config = request.getConfig(); transformConfigManager.putTransformConfiguration(config, listener.delegateFailureAndWrap((l, unused) -> { var transformId = config.getId(); - logger.debug("[{}] created transform", transformId); + logger.info("[{}] created transform", transformId); auditor.info(transformId, "Created transform."); var validationFunc = FunctionFactory.create(config); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java index 87f24ae7c2bc8..6d0e3213d67fc 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java @@ -110,7 +110,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A // <4> Reset transform ActionListener updateTransformListener = ActionListener.wrap( unusedUpdateResult -> transformConfigManager.resetTransform(request.getId(), ActionListener.wrap(resetResponse -> { - logger.debug("[{}] reset transform", request.getId()); + logger.info("[{}] reset transform", request.getId()); auditor.info(request.getId(), "Reset transform."); listener.onResponse(AcknowledgedResponse.of(resetResponse)); }, listener::onFailure)), diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java index b35566c6467c4..3fb271aeb1535 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java @@ -154,7 +154,7 @@ protected void doExecute(Task task, Request request, ActionListener li TransformConfig updatedConfig = updateResult.getConfig(); AuthorizationState authState = updateResult.getAuthState(); auditor.info(updatedConfig.getId(), "Updated transform."); - logger.debug("[{}] Updated transform [{}]", updatedConfig.getId(), updateResult.getStatus()); + logger.info("[{}] Updated transform [{}]", updatedConfig.getId(), updateResult.getStatus()); checkTransformConfigAndLogWarnings(updatedConfig); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java index 8dd7b541b4e28..592b7b423c053 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java @@ -199,7 +199,7 @@ private void recursiveUpdate( updateOneTransform(next, dryRun, timeout, ActionListener.wrap(updateResponse -> { if (UpdateResult.Status.DELETED.equals(updateResponse.getStatus()) == false) { auditor.info(next, "Updated transform."); - logger.debug("[{}] Updated transform [{}]", next, updateResponse.getStatus()); + logger.info("[{}] Updated transform [{}]", next, updateResponse.getStatus()); updatesByStatus.compute(updateResponse.getStatus(), (k, v) -> (v == null) ? 1 : v + 1L); } if (transformsToUpgrade.isEmpty() == false) { From ceb2701719f4a976ea92113b9fcc752881ac2679 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 26 Mar 2024 15:31:38 +0100 Subject: [PATCH 62/79] Ignore repository-hdfs integ tests in fips mode (#106762) Fixes https://github.com/elastic/elasticsearch/issues/106757 --- plugins/repository-hdfs/build.gradle | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index beaf8723df4d5..49fc88a15f7d3 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -7,6 +7,7 @@ */ import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' @@ -81,6 +82,7 @@ tasks.named("dependencyLicenses").configure { tasks.withType(RestIntegTestTask).configureEach { usesDefaultDistribution() + BuildParams.withFipsEnabledOnly(it) jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } From 08d7542a31bea6d2965a50e8cfa757acfb177500 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 26 Mar 2024 16:35:36 +0100 Subject: [PATCH 63/79] Fix concurrency bug in AbstractStringScriptFieldAutomatonQuery (#106678) Back when we introduced queries against runtime fields, Elasticsearch did not support inter-segment concurrency yet. At the time, it was fine to assume that segments will be searched sequentially. AbstractStringScriptFieldAutomatonQuery used to have a BytesRefBuilder instance shared across the segments, which gets re-initialized when each segment starts its work. This is no longer possible with inter-segment concurrency. Closes #105911 --- docs/changelog/106678.yaml | 6 ++ .../AbstractBooleanScriptFieldQuery.java | 2 +- .../AbstractDoubleScriptFieldQuery.java | 2 +- .../AbstractGeoPointScriptFieldQuery.java | 2 +- .../runtime/AbstractIpScriptFieldQuery.java | 2 +- .../runtime/AbstractLongScriptFieldQuery.java | 2 +- .../runtime/AbstractScriptFieldQuery.java | 31 +++++--- ...stractStringScriptFieldAutomatonQuery.java | 26 ++++++- .../mapper/BooleanScriptFieldTypeTests.java | 41 +++++----- .../mapper/DateScriptFieldTypeTests.java | 37 ++++----- .../mapper/DoubleScriptFieldTypeTests.java | 37 ++++----- .../mapper/GeoPointScriptFieldTypeTests.java | 17 +++-- .../index/mapper/IpScriptFieldTypeTests.java | 43 +++++------ .../mapper/KeywordScriptFieldTypeTests.java | 75 ++++++++++--------- .../mapper/LongScriptFieldTypeTests.java | 41 +++++----- .../StringScriptFieldFuzzyQueryTests.java | 22 +++--- .../StringScriptFieldRegexpQueryTests.java | 21 +++--- .../StringScriptFieldWildcardQueryTests.java | 22 +++--- .../AbstractScriptFieldTypeTestCase.java | 13 ++++ .../AbstractGeoShapeScriptFieldQuery.java | 2 +- .../mapper/GeoShapeScriptFieldTypeTests.java | 15 ++-- 21 files changed, 260 insertions(+), 199 deletions(-) create mode 100644 docs/changelog/106678.yaml diff --git a/docs/changelog/106678.yaml b/docs/changelog/106678.yaml new file mode 100644 index 0000000000000..20bf12d6d4346 --- /dev/null +++ b/docs/changelog/106678.yaml @@ -0,0 +1,6 @@ +pr: 106678 +summary: Fix concurrency bug in `AbstractStringScriptFieldAutomatonQuery` +area: Search +type: bug +issues: + - 105911 diff --git a/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java b/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java index 38363ee3e3fdd..c6ddd1964188f 100644 --- a/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java @@ -23,7 +23,7 @@ abstract class AbstractBooleanScriptFieldQuery extends AbstractScriptFieldQuery< } @Override - protected boolean matches(BooleanFieldScript scriptContext, int docId) { + protected final boolean matches(BooleanFieldScript scriptContext, int docId) { scriptContext.runForDoc(docId); return matches(scriptContext.trues(), scriptContext.falses()); } diff --git a/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java b/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java index 500d00628bd19..722cff6fc0edf 100644 --- a/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java @@ -22,7 +22,7 @@ abstract class AbstractDoubleScriptFieldQuery extends AbstractScriptFieldQuery values) { + protected TwoPhaseIterator createTwoPhaseIterator(StringFieldScript scriptContext, DocIdSetIterator approximation) { + BytesRefBuilder scratch = new BytesRefBuilder(); + return new TwoPhaseIterator(approximation) { + @Override + public boolean matches() { + scriptContext.runForDoc(approximation.docID()); + return AbstractStringScriptFieldAutomatonQuery.this.matches(scriptContext.getValues(), scratch); + } + + @Override + public float matchCost() { + return MATCH_COST; + } + }; + } + + protected final boolean matches(List values, BytesRefBuilder scratch) { for (String value : values) { scratch.copyChars(value); if (automaton.run(scratch.bytes(), 0, scratch.length())) { @@ -41,6 +58,11 @@ protected final boolean matches(List values) { return false; } + @Override + protected final boolean matches(List values) { + throw new UnsupportedOperationException(); + } + @Override public final void visit(QueryVisitor visitor) { if (visitor.acceptField(fieldName())) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index d55eaf9df3452..0cdc9568f1fac 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -55,6 +55,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -73,8 +74,8 @@ protected ScriptFactory dummyScript() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -104,7 +105,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(1L, 0L, 1L))); + assertThat(results, containsInAnyOrder(1L, 0L, 1L)); } } } @@ -112,8 +113,8 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); BooleanScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -128,8 +129,8 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); { @@ -185,10 +186,10 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(3)); @@ -199,7 +200,7 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -210,7 +211,7 @@ public void testRangeQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -221,8 +222,8 @@ public void testRangeQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -269,7 +270,7 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery(true, mockContext())), equalTo(1)); @@ -282,7 +283,7 @@ public void testTermQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery(false, mockContext())), equalTo(1)); @@ -305,7 +306,7 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of(true, true), mockContext())), equalTo(1)); @@ -315,7 +316,7 @@ public void testTermsQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of(false, false), mockContext())), equalTo(1)); @@ -364,7 +365,7 @@ public XContentParser parser() { while (ctx.parser().nextToken() != Token.END_ARRAY) { ootb.parse(ctx); } - iw.addDocument(ctx.doc()); + addDocument(iw, ctx.doc()); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertSameCount( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index 25a79022c245e..09d4b62fb157c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -60,6 +60,7 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -160,8 +161,8 @@ public void testFormatDuel() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356, 1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356, 1595432181351]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -191,7 +192,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(1595518581354L, 1595518581351L, 1595518581356L))); + assertThat(results, containsInAnyOrder(1595518581354L, 1595518581351L, 1595518581356L)); } } } @@ -199,9 +200,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); DateScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -220,9 +221,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -300,8 +301,8 @@ private Query randomDistanceFeatureQuery(MappedFieldType ft, SearchExecutionCont @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -312,9 +313,9 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -394,8 +395,8 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery("2020-07-22T15:36:21.354Z", mockContext())), equalTo(1)); @@ -422,8 +423,8 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); try (DirectoryReader reader = iw.getReader()) { MappedFieldType ft = simpleMappedFieldType(); IndexSearcher searcher = newSearcher(reader); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java index ed365a2460203..9547b4f9cb9a3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java @@ -45,6 +45,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; public class DoubleScriptFieldTypeTests extends AbstractNonTextScriptFieldTypeTestCase { @@ -71,8 +72,8 @@ public void testFormat() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.0]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [3.14, 1.4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.0]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [3.14, 1.4]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -102,7 +103,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(2.0, 2.4, 4.140000000000001))); + assertThat(results, containsInAnyOrder(2.0, 2.4, 4.140000000000001)); } } } @@ -110,9 +111,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); DoubleScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -128,9 +129,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -158,8 +159,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -170,9 +171,9 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.5]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.5]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -195,8 +196,8 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery("1", mockContext())), equalTo(1)); @@ -218,8 +219,8 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of("1"), mockContext())), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java index 36f691341425c..3289e46941a45 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java @@ -44,6 +44,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; public class GeoPointScriptFieldTypeTests extends AbstractNonTextScriptFieldTypeTestCase { @@ -71,8 +72,8 @@ protected boolean supportsRangeQueries() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -103,7 +104,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(new GeoPoint(45.0, 45.0), new GeoPoint(0.0, 0.0)))); + assertThat(results, containsInAnyOrder(new GeoPoint(45.0, 45.0), new GeoPoint(0.0, 0.0))); } } } @@ -117,7 +118,7 @@ public void testSort() throws IOException { public void testFetch() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef(""" + addDocument(iw, List.of(new StoredField("_source", new BytesRef(""" {"foo": {"lat": 45.0, "lon" : 45.0}}""")))); try (DirectoryReader reader = iw.getReader()) { SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -138,8 +139,8 @@ public void testFetch() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -167,8 +168,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index 5eb66e631d86f..4726424ada5f2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -49,6 +49,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; @@ -75,8 +76,8 @@ public void testFormat() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.2\", \"192.168.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.2\", \"192.168.1\"]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -107,7 +108,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of("192.168.0.1", "192.168.1.1", "192.168.2.1"))); + assertThat(results, containsInAnyOrder("192.168.0.1", "192.168.1.1", "192.168.2.1")); } } } @@ -115,9 +116,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); BinaryScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -142,9 +143,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -172,8 +173,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -184,9 +185,9 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -207,9 +208,9 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); IpScriptFieldType fieldType = build("append_param", Map.of("param", ".1"), OnScriptError.FAIL); @@ -229,10 +230,10 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java index d8903251e6c3b..6912194625bb7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java @@ -49,6 +49,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; public class KeywordScriptFieldTypeTests extends AbstractScriptFieldTypeTestCase { @@ -66,8 +67,8 @@ protected ScriptFactory dummyScript() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -97,7 +98,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of("1-suffix", "1-suffix", "2-suffix"))); + assertThat(results, containsInAnyOrder("1-suffix", "1-suffix", "2-suffix")); } } } @@ -105,9 +106,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"d\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"d\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); BinaryScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -123,9 +124,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aaa\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aa\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aaa\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aa\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -153,8 +154,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -164,11 +165,11 @@ public void testExistsQuery() throws IOException { public void testFuzzyQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); // No edits, matches - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caat\"]}")))); // Single insertion, matches - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cta\"]}")))); // Single transposition, matches - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caaat\"]}")))); // Two insertions, no match - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); // Totally wrong, no match + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); // No edits, matches + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caat\"]}")))); // Single insertion, matches + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cta\"]}")))); // Single transposition, matches + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caaat\"]}")))); // Two insertions, no match + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); // Totally wrong, no match try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -200,9 +201,9 @@ private Query randomFuzzyQuery(MappedFieldType ft, SearchExecutionContext ctx) { public void testPrefixQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().prefixQuery("cat", null, mockContext())), equalTo(2)); @@ -225,9 +226,9 @@ private Query randomPrefixQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -268,9 +269,9 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) public void testRegexpQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -294,8 +295,8 @@ private Query randomRegexpQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); KeywordScriptFieldType fieldType = build("append_param", Map.of("param", "-suffix"), OnScriptError.FAIL); @@ -312,10 +313,10 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [3]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [3]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of("1", "2"), mockContext())), equalTo(2)); @@ -330,8 +331,8 @@ protected Query randomTermsQuery(MappedFieldType ft, SearchExecutionContext ctx) public void testWildcardQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().wildcardQuery("a*b", null, mockContext())), equalTo(1)); @@ -342,8 +343,8 @@ public void testWildcardQuery() throws IOException { // Normalized WildcardQueries are requested by the QueryStringQueryParser public void testNormalizedWildcardQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().normalizedWildcardQuery("a*b", null, mockContext())), equalTo(1)); @@ -365,8 +366,8 @@ private Query randomWildcardQuery(MappedFieldType ft, SearchExecutionContext ctx public void testMatchQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); KeywordScriptFieldType fieldType = build("append_param", Map.of("param", "-Suffix"), OnScriptError.FAIL); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java index debcd3c5fa911..83b3dbe858471 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java @@ -47,6 +47,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -83,8 +84,8 @@ public void testLongFromSource() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -114,7 +115,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(2L, 2L, 3L))); + assertThat(results, containsInAnyOrder(2L, 2L, 3L)); } } } @@ -122,9 +123,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); LongScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -139,9 +140,9 @@ public void testSort() throws IOException { public void testNow() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); LongScriptFieldData ifd = build("millis_ago", Map.of(), OnScriptError.FAIL).fielddataBuilder(mockFielddataContext()) @@ -164,9 +165,9 @@ public void testNow() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -194,8 +195,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -206,8 +207,8 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -228,8 +229,8 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery("1", mockContext())), equalTo(1)); @@ -251,8 +252,8 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of("1"), mockContext())), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java index 86486cac893cf..3ded47b6d2671 100644 --- a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.runtime; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.elasticsearch.script.Script; @@ -68,18 +69,19 @@ protected StringScriptFieldFuzzyQuery mutate(StringScriptFieldFuzzyQuery orig) { @Override public void testMatches() { StringScriptFieldFuzzyQuery query = StringScriptFieldFuzzyQuery.build(randomScript(), leafFactory, "test", "foo", 1, 0, false); - assertTrue(query.matches(List.of("foo"))); - assertTrue(query.matches(List.of("foa"))); - assertTrue(query.matches(List.of("foo", "bar"))); - assertFalse(query.matches(List.of("bar"))); + BytesRefBuilder scratch = new BytesRefBuilder(); + assertTrue(query.matches(List.of("foo"), scratch)); + assertTrue(query.matches(List.of("foa"), scratch)); + assertTrue(query.matches(List.of("foo", "bar"), scratch)); + assertFalse(query.matches(List.of("bar"), scratch)); query = StringScriptFieldFuzzyQuery.build(randomScript(), leafFactory, "test", "foo", 0, 0, false); - assertTrue(query.matches(List.of("foo"))); - assertFalse(query.matches(List.of("foa"))); + assertTrue(query.matches(List.of("foo"), scratch)); + assertFalse(query.matches(List.of("foa"), scratch)); query = StringScriptFieldFuzzyQuery.build(randomScript(), leafFactory, "test", "foo", 2, 0, false); - assertTrue(query.matches(List.of("foo"))); - assertTrue(query.matches(List.of("foa"))); - assertTrue(query.matches(List.of("faa"))); - assertFalse(query.matches(List.of("faaa"))); + assertTrue(query.matches(List.of("foo"), scratch)); + assertTrue(query.matches(List.of("foa"), scratch)); + assertTrue(query.matches(List.of("faa"), scratch)); + assertFalse(query.matches(List.of("faaa"), scratch)); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java index 50c6786de1282..46f841c344e5f 100644 --- a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.runtime; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; @@ -84,13 +85,14 @@ public void testMatches() { 0, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT ); - assertTrue(query.matches(List.of("astuffb"))); - assertFalse(query.matches(List.of("astuffB"))); - assertFalse(query.matches(List.of("fffff"))); - assertFalse(query.matches(List.of("ab"))); - assertFalse(query.matches(List.of("aasdf"))); - assertFalse(query.matches(List.of("dsfb"))); - assertTrue(query.matches(List.of("astuffb", "fffff"))); + BytesRefBuilder scratch = new BytesRefBuilder(); + assertTrue(query.matches(List.of("astuffb"), scratch)); + assertFalse(query.matches(List.of("astuffB"), scratch)); + assertFalse(query.matches(List.of("fffff"), scratch)); + assertFalse(query.matches(List.of("ab"), scratch)); + assertFalse(query.matches(List.of("aasdf"), scratch)); + assertFalse(query.matches(List.of("dsfb"), scratch)); + assertTrue(query.matches(List.of("astuffb", "fffff"), scratch)); StringScriptFieldRegexpQuery ciQuery = new StringScriptFieldRegexpQuery( randomScript(), @@ -101,9 +103,8 @@ public void testMatches() { RegExp.ASCII_CASE_INSENSITIVE, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT ); - assertTrue(ciQuery.matches(List.of("astuffB"))); - assertTrue(ciQuery.matches(List.of("Astuffb", "fffff"))); - + assertTrue(ciQuery.matches(List.of("astuffB"), scratch)); + assertTrue(ciQuery.matches(List.of("Astuffb", "fffff"), scratch)); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java index 37e24553f9fce..f6cd59f4254ad 100644 --- a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.runtime; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.elasticsearch.script.Script; @@ -52,18 +53,19 @@ protected StringScriptFieldWildcardQuery mutate(StringScriptFieldWildcardQuery o @Override public void testMatches() { StringScriptFieldWildcardQuery query = new StringScriptFieldWildcardQuery(randomScript(), leafFactory, "test", "a*b", false); - assertTrue(query.matches(List.of("astuffb"))); - assertFalse(query.matches(List.of("Astuffb"))); - assertFalse(query.matches(List.of("fffff"))); - assertFalse(query.matches(List.of("a"))); - assertFalse(query.matches(List.of("b"))); - assertFalse(query.matches(List.of("aasdf"))); - assertFalse(query.matches(List.of("dsfb"))); - assertTrue(query.matches(List.of("astuffb", "fffff"))); + BytesRefBuilder scratch = new BytesRefBuilder(); + assertTrue(query.matches(List.of("astuffb"), scratch)); + assertFalse(query.matches(List.of("Astuffb"), scratch)); + assertFalse(query.matches(List.of("fffff"), scratch)); + assertFalse(query.matches(List.of("a"), scratch)); + assertFalse(query.matches(List.of("b"), scratch)); + assertFalse(query.matches(List.of("aasdf"), scratch)); + assertFalse(query.matches(List.of("dsfb"), scratch)); + assertTrue(query.matches(List.of("astuffb", "fffff"), scratch)); StringScriptFieldWildcardQuery ciQuery = new StringScriptFieldWildcardQuery(randomScript(), leafFactory, "test", "a*b", true); - assertTrue(ciQuery.matches(List.of("Astuffb"))); - assertTrue(ciQuery.matches(List.of("astuffB", "fffff"))); + assertTrue(ciQuery.matches(List.of("Astuffb"), scratch)); + assertTrue(ciQuery.matches(List.of("astuffB", "fffff"), scratch)); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java index ea97bafc5e4c8..675b5959f35a3 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java @@ -13,6 +13,7 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -64,6 +65,18 @@ public abstract class AbstractScriptFieldTypeTestCase extends MapperServiceTestC protected abstract String typeName(); + /** + * Add the provided document to the provided writer, and randomly flush. + * This is useful for situations where there are not enough documents indexed to trigger random flush and commit performed + * by {@link RandomIndexWriter}. Flushing is important to obtain multiple slices and inter-segment concurrency. + */ + protected static void addDocument(RandomIndexWriter iw, Iterable indexableFields) throws IOException { + iw.addDocument(indexableFields); + if (randomBoolean()) { + iw.flush(); + } + } + public final void testMinimalSerializesToItself() throws IOException { XContentBuilder orig = JsonXContent.contentBuilder().startObject(); createMapperService(runtimeFieldMapping(this::minimalMapping)).documentMapper().mapping().toXContent(orig, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java index 18020bd44ca6e..c178b20530f0c 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java @@ -22,7 +22,7 @@ abstract class AbstractGeoShapeScriptFieldQuery extends AbstractScriptFieldQuery } @Override - protected boolean matches(GeometryFieldScript scriptContext, int docId) { + protected final boolean matches(GeometryFieldScript scriptContext, int docId) { scriptContext.runForDoc(docId); return matches(scriptContext.geometry()); } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java index 331bfbf8cd305..592cb65800b71 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java @@ -99,8 +99,8 @@ protected ScriptFactory dummyScript() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -128,6 +128,7 @@ public void collect(int doc) throws IOException { }; } }); + assertEquals(2, results.size()); } } } @@ -141,7 +142,7 @@ public void testSort() throws IOException { public void testFetch() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef(""" + addDocument(iw, List.of(new StoredField("_source", new BytesRef(""" {"foo": {"coordinates": [[45.0, 45.0], [0.0, 0.0]], "type" : "LineString"}}""")))); try (DirectoryReader reader = iw.getReader()) { SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -162,8 +163,8 @@ public void testFetch() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -196,8 +197,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(2)); From f51064aebdef0c66b4230b8306d1ad0de9d39c0d Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 26 Mar 2024 08:39:17 -0700 Subject: [PATCH 64/79] Set index mode earlier for new downsample index (#106728) * Set index mode earlier for new downsample index Downsample index is created using temporary index service using a static predefined subset of index settings. All other settings are later copied over from source index. As discovered in #106338 this causes context like index mode to be missing during initial index creation process. This PR adds index mode and related required settings to initial set of index settings in order to have access to this information during initial create index operation. --- .../xpack/downsample/TransportDownsampleAction.java | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 0570d93441be1..58401451fa86b 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -805,7 +805,17 @@ private void createDownsampleIndex( .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, String.valueOf(numberOfReplicas)) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey(), DownsampleTaskStatus.STARTED) - .put(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey(), downsampleInterval); + .put(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey(), downsampleInterval) + .put(IndexSettings.MODE.getKey(), sourceIndexMetadata.getIndexMode()) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), sourceIndexMetadata.getRoutingPaths()) + .put( + IndexSettings.TIME_SERIES_START_TIME.getKey(), + sourceIndexMetadata.getSettings().get(IndexSettings.TIME_SERIES_START_TIME.getKey()) + ) + .put( + IndexSettings.TIME_SERIES_END_TIME.getKey(), + sourceIndexMetadata.getSettings().get(IndexSettings.TIME_SERIES_END_TIME.getKey()) + ); if (sourceIndexMetadata.getSettings().hasValue(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey())) { builder.put( MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), From a279a302bfe9289e522a4bb32499af1304a70c0b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 26 Mar 2024 16:51:43 +0100 Subject: [PATCH 65/79] Move more XContent parsing to test codebase (#106704) Follow up to #105801 moving more parsers that are test-only over to the test codebase. --- .../action/DocWriteResponse.java | 68 ++------------ .../snapshots/status/SnapshotIndexStatus.java | 48 ---------- .../snapshots/status/SnapshotShardsStats.java | 32 ------- .../snapshots/status/SnapshotStatus.java | 77 ++-------------- .../broadcast/BaseBroadcastResponse.java | 28 ------ .../cluster/health/ClusterIndexHealth.java | 90 ++----------------- .../cluster/health/ClusterShardHealth.java | 65 ++------------ .../script/ScriptLanguagesInfo.java | 40 +-------- .../profile/SearchProfileDfsPhaseResult.java | 24 +---- .../query/QueryProfileShardResult.java | 41 --------- .../health/ClusterHealthResponsesTests.java | 2 +- .../status/SnapshotIndexStatusTests.java | 49 +++++++++- .../status/SnapshotShardsStatsTests.java | 29 +++++- .../snapshots/status/SnapshotStatusTests.java | 61 ++++++++++++- .../status/SnapshotsStatusResponseTests.java | 2 +- .../GetScriptLanguageResponseTests.java | 29 +++++- .../analyze/ReloadAnalyzersResponseTests.java | 1 - .../query/ValidateQueryResponseTests.java | 1 - .../action/bulk/BulkItemResponseTests.java | 55 +++++++++++- .../action/delete/DeleteResponseTests.java | 4 +- .../action/index/IndexResponseTests.java | 3 +- .../action/update/UpdateResponseTests.java | 3 +- .../health/ClusterIndexHealthTests.java | 72 ++++++++++++++- .../health/ClusterShardHealthTests.java | 46 +++++++++- .../SearchProfileDfsPhaseResultTests.java | 3 +- .../query/QueryProfileShardResultTests.java | 3 +- .../search/SearchResponseUtils.java | 62 ++++++++++++- .../AbstractBroadcastResponseTestCase.java | 29 ++++++ .../test/rest/ESRestTestCase.java | 3 +- .../termsenum/action/TermsEnumResponse.java | 32 ------- .../termsenum/TermsEnumResponseTests.java | 31 ++++++- 31 files changed, 502 insertions(+), 531 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index fdef41acb16da..685fc032431c3 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -12,13 +12,11 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -26,7 +24,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.net.URLEncoder; @@ -34,7 +31,6 @@ import java.util.Locale; import java.util.Objects; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -43,14 +39,14 @@ */ public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, ToXContentObject { - private static final String _SHARDS = "_shards"; - private static final String _INDEX = "_index"; - private static final String _ID = "_id"; - private static final String _VERSION = "_version"; - private static final String _SEQ_NO = "_seq_no"; - private static final String _PRIMARY_TERM = "_primary_term"; - private static final String RESULT = "result"; - private static final String FORCED_REFRESH = "forced_refresh"; + public static final String _SHARDS = "_shards"; + public static final String _INDEX = "_index"; + public static final String _ID = "_id"; + public static final String _VERSION = "_version"; + public static final String _SEQ_NO = "_seq_no"; + public static final String _PRIMARY_TERM = "_primary_term"; + public static final String RESULT = "result"; + public static final String FORCED_REFRESH = "forced_refresh"; /** * An enum that represents the results of CRUD operations, primarily used to communicate the type of @@ -302,54 +298,6 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t return builder; } - /** - * Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method. - * - * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning - * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly - * if needed and then immediately returns. - */ - public static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException { - XContentParser.Token token = parser.currentToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - - String currentFieldName = parser.currentName(); - token = parser.nextToken(); - - if (token.isValue()) { - if (_INDEX.equals(currentFieldName)) { - // index uuid and shard id are unknown and can't be parsed back for now. - context.setShardId(new ShardId(new Index(parser.text(), IndexMetadata.INDEX_UUID_NA_VALUE), -1)); - } else if (_ID.equals(currentFieldName)) { - context.setId(parser.text()); - } else if (_VERSION.equals(currentFieldName)) { - context.setVersion(parser.longValue()); - } else if (RESULT.equals(currentFieldName)) { - String result = parser.text(); - for (Result r : Result.values()) { - if (r.getLowercase().equals(result)) { - context.setResult(r); - break; - } - } - } else if (FORCED_REFRESH.equals(currentFieldName)) { - context.setForcedRefresh(parser.booleanValue()); - } else if (_SEQ_NO.equals(currentFieldName)) { - context.setSeqNo(parser.longValue()); - } else if (_PRIMARY_TERM.equals(currentFieldName)) { - context.setPrimaryTerm(parser.longValue()); - } - } else if (token == XContentParser.Token.START_OBJECT) { - if (_SHARDS.equals(currentFieldName)) { - context.setShardInfo(ShardInfo.fromXContent(parser)); - } else { - parser.skipChildren(); // skip potential inner objects for forward compatibility - } - } else if (token == XContentParser.Token.START_ARRAY) { - parser.skipChildren(); // skip potential inner arrays for forward compatibility - } - } - /** * Base class of all {@link DocWriteResponse} builders. These {@link DocWriteResponse.Builder} are used during * xcontent parsing to temporarily store the parsed values, then the {@link Builder#build()} method is called to diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index 5d66baf0216ad..4a98ff62f6293 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -8,26 +8,17 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.xcontent.XContentParserUtils; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** * Represents snapshot status of all shards in the index @@ -118,45 +109,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - static final ObjectParser.NamedObjectParser PARSER; - static { - ConstructingObjectParser innerParser = new ConstructingObjectParser<>( - "snapshot_index_status", - true, - (Object[] parsedObjects, String index) -> { - int i = 0; - SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); - SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); - @SuppressWarnings("unchecked") - List shardStatuses = (List) parsedObjects[i]; - - final Map indexShards; - if (shardStatuses == null || shardStatuses.isEmpty()) { - indexShards = emptyMap(); - } else { - indexShards = Maps.newMapWithExpectedSize(shardStatuses.size()); - for (SnapshotIndexShardStatus shardStatus : shardStatuses) { - indexShards.put(shardStatus.getShardId().getId(), shardStatus); - } - } - return new SnapshotIndexStatus(index, indexShards, shardsStats, stats); - } - ); - innerParser.declareObject( - constructorArg(), - (p, c) -> SnapshotShardsStats.PARSER.apply(p, null), - new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS) - ); - innerParser.declareObject(constructorArg(), (p, c) -> SnapshotStats.fromXContent(p), new ParseField(SnapshotStats.Fields.STATS)); - innerParser.declareNamedObjects(constructorArg(), SnapshotIndexShardStatus.PARSER, new ParseField(Fields.SHARDS)); - PARSER = ((p, c, name) -> innerParser.apply(p, name)); - } - - public static SnapshotIndexStatus fromXContent(XContentParser parser) throws IOException { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); - return PARSER.parse(parser, null, parser.currentName()); - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index 5bbc5368505db..28806b0aca87e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -8,18 +8,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collection; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - /** * Status of a snapshot shards */ @@ -129,33 +124,6 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par return builder; } - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - Fields.SHARDS_STATS, - true, - (Object[] parsedObjects) -> { - int i = 0; - int initializingShards = (int) parsedObjects[i++]; - int startedShards = (int) parsedObjects[i++]; - int finalizingShards = (int) parsedObjects[i++]; - int doneShards = (int) parsedObjects[i++]; - int failedShards = (int) parsedObjects[i++]; - int totalShards = (int) parsedObjects[i]; - return new SnapshotShardsStats(initializingShards, startedShards, finalizingShards, doneShards, failedShards, totalShards); - } - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(Fields.INITIALIZING)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.STARTED)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.FINALIZING)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.DONE)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.FAILED)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.TOTAL)); - } - - public static SnapshotShardsStats fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 956ce57d168e0..e228ad18641fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; @@ -19,12 +18,7 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.snapshots.Snapshot; -import org.elasticsearch.snapshots.SnapshotId; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -34,11 +28,7 @@ import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Status of a snapshot @@ -87,7 +77,7 @@ public class SnapshotStatus implements ChunkedToXContentObject, Writeable { updateShardStats(startTime, time); } - private SnapshotStatus( + SnapshotStatus( Snapshot snapshot, State state, List shards, @@ -182,12 +172,12 @@ public SnapshotStats getStats() { return stats; } - private static final String SNAPSHOT = "snapshot"; - private static final String REPOSITORY = "repository"; - private static final String UUID = "uuid"; - private static final String STATE = "state"; - private static final String INDICES = "indices"; - private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; + static final String SNAPSHOT = "snapshot"; + static final String REPOSITORY = "repository"; + static final String UUID = "uuid"; + static final String STATE = "state"; + static final String INDICES = "indices"; + static final String INCLUDE_GLOBAL_STATE = "include_global_state"; @Override public Iterator toXContentChunked(ToXContent.Params params) { @@ -206,59 +196,6 @@ public Iterator toXContentChunked(ToXContent.Params params }), getIndices().values().iterator(), Iterators.single((b, p) -> b.endObject().endObject())); } - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "snapshot_status", - true, - (Object[] parsedObjects) -> { - int i = 0; - String name = (String) parsedObjects[i++]; - String repository = (String) parsedObjects[i++]; - String uuid = (String) parsedObjects[i++]; - String rawState = (String) parsedObjects[i++]; - Boolean includeGlobalState = (Boolean) parsedObjects[i++]; - SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); - SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); - @SuppressWarnings("unchecked") - List indices = ((List) parsedObjects[i]); - - Snapshot snapshot = new Snapshot(repository, new SnapshotId(name, uuid)); - SnapshotsInProgress.State state = SnapshotsInProgress.State.valueOf(rawState); - Map indicesStatus; - List shards; - if (indices == null || indices.isEmpty()) { - indicesStatus = emptyMap(); - shards = emptyList(); - } else { - indicesStatus = Maps.newMapWithExpectedSize(indices.size()); - shards = new ArrayList<>(); - for (SnapshotIndexStatus index : indices) { - indicesStatus.put(index.getIndex(), index); - shards.addAll(index.getShards().values()); - } - } - return new SnapshotStatus(snapshot, state, shards, indicesStatus, shardsStats, stats, includeGlobalState); - } - ); - static { - PARSER.declareString(constructorArg(), new ParseField(SNAPSHOT)); - PARSER.declareString(constructorArg(), new ParseField(REPOSITORY)); - PARSER.declareString(constructorArg(), new ParseField(UUID)); - PARSER.declareString(constructorArg(), new ParseField(STATE)); - PARSER.declareBoolean(optionalConstructorArg(), new ParseField(INCLUDE_GLOBAL_STATE)); - PARSER.declareField( - constructorArg(), - SnapshotStats::fromXContent, - new ParseField(SnapshotStats.Fields.STATS), - ObjectParser.ValueType.OBJECT - ); - PARSER.declareObject(constructorArg(), SnapshotShardsStats.PARSER, new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS)); - PARSER.declareNamedObjects(constructorArg(), SnapshotIndexStatus.PARSER, new ParseField(INDICES)); - } - - public static SnapshotStatus fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - private void updateShardStats(long startTime, long time) { stats = new SnapshotStats(startTime, time, 0, 0, 0, 0, 0, 0); shardsStats = new SnapshotShardsStats(shards); diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java index b69b87190f2a7..3a27d6ac58534 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java @@ -13,15 +13,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import java.io.IOException; import java.util.List; import static org.elasticsearch.action.support.DefaultShardOperationFailedException.readShardOperationFailed; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Base class for all broadcast operation based responses. @@ -30,35 +26,11 @@ public class BaseBroadcastResponse extends ActionResponse { public static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0]; - private static final ParseField _SHARDS_FIELD = new ParseField("_shards"); - private static final ParseField TOTAL_FIELD = new ParseField("total"); - private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); - private static final ParseField FAILED_FIELD = new ParseField("failed"); - private static final ParseField FAILURES_FIELD = new ParseField("failures"); - private final int totalShards; private final int successfulShards; private final int failedShards; private final DefaultShardOperationFailedException[] shardFailures; - @SuppressWarnings("unchecked") - public static void declareBroadcastFields(ConstructingObjectParser PARSER) { - ConstructingObjectParser shardsParser = new ConstructingObjectParser<>( - "_shards", - true, - arg -> new BaseBroadcastResponse((int) arg[0], (int) arg[1], (int) arg[2], (List) arg[3]) - ); - shardsParser.declareInt(constructorArg(), TOTAL_FIELD); - shardsParser.declareInt(constructorArg(), SUCCESSFUL_FIELD); - shardsParser.declareInt(constructorArg(), FAILED_FIELD); - shardsParser.declareObjectArray( - optionalConstructorArg(), - (p, c) -> DefaultShardOperationFailedException.fromXContent(p), - FAILURES_FIELD - ); - PARSER.declareObject(constructorArg(), shardsParser, _SHARDS_FIELD); - } - public BaseBroadcastResponse(StreamInput in) throws IOException { totalShards = in.readVInt(); successfulShards = in.readVInt(); diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java index f236a9eff25a2..ad957f7a8f37f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java @@ -15,93 +15,25 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.HashMap; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - public final class ClusterIndexHealth implements Writeable, ToXContentFragment { - private static final String STATUS = "status"; - private static final String NUMBER_OF_SHARDS = "number_of_shards"; - private static final String NUMBER_OF_REPLICAS = "number_of_replicas"; - private static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; - private static final String ACTIVE_SHARDS = "active_shards"; - private static final String RELOCATING_SHARDS = "relocating_shards"; - private static final String INITIALIZING_SHARDS = "initializing_shards"; - private static final String UNASSIGNED_SHARDS = "unassigned_shards"; - private static final String SHARDS = "shards"; - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "cluster_index_health", - true, - (parsedObjects, index) -> { - int i = 0; - int numberOfShards = (int) parsedObjects[i++]; - int numberOfReplicas = (int) parsedObjects[i++]; - int activeShards = (int) parsedObjects[i++]; - int relocatingShards = (int) parsedObjects[i++]; - int initializingShards = (int) parsedObjects[i++]; - int unassignedShards = (int) parsedObjects[i++]; - int activePrimaryShards = (int) parsedObjects[i++]; - String statusStr = (String) parsedObjects[i++]; - ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); - @SuppressWarnings("unchecked") - List shardList = (List) parsedObjects[i]; - final Map shards; - if (shardList == null || shardList.isEmpty()) { - shards = emptyMap(); - } else { - shards = Maps.newMapWithExpectedSize(shardList.size()); - for (ClusterShardHealth shardHealth : shardList) { - shards.put(shardHealth.getShardId(), shardHealth); - } - } - return new ClusterIndexHealth( - index, - numberOfShards, - numberOfReplicas, - activeShards, - relocatingShards, - initializingShards, - unassignedShards, - activePrimaryShards, - status, - shards - ); - } - ); - - public static final ObjectParser.NamedObjectParser SHARD_PARSER = ( - XContentParser p, - String indexIgnored, - String shardId) -> ClusterShardHealth.innerFromXContent(p, Integer.valueOf(shardId)); - - static { - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_REPLICAS)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(INITIALIZING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(UNASSIGNED_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS)); - PARSER.declareString(constructorArg(), new ParseField(STATUS)); - // Can be absent if LEVEL == 'indices' or 'cluster' - PARSER.declareNamedObjects(optionalConstructorArg(), SHARD_PARSER, new ParseField(SHARDS)); - } + static final String STATUS = "status"; + static final String NUMBER_OF_SHARDS = "number_of_shards"; + static final String NUMBER_OF_REPLICAS = "number_of_replicas"; + static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; + static final String ACTIVE_SHARDS = "active_shards"; + static final String RELOCATING_SHARDS = "relocating_shards"; + static final String INITIALIZING_SHARDS = "initializing_shards"; + static final String UNASSIGNED_SHARDS = "unassigned_shards"; + static final String SHARDS = "shards"; private final String index; private final int numberOfShards; @@ -279,10 +211,6 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa return builder; } - public static ClusterIndexHealth innerFromXContent(XContentParser parser, String index) { - return PARSER.apply(parser, index); - } - @Override public String toString() { return "ClusterIndexHealth{" diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java index b3aa4275f7be7..785b0db5cc807 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java @@ -17,59 +17,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Locale; import java.util.Objects; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - public final class ClusterShardHealth implements Writeable, ToXContentFragment { - private static final String STATUS = "status"; - private static final String ACTIVE_SHARDS = "active_shards"; - private static final String RELOCATING_SHARDS = "relocating_shards"; - private static final String INITIALIZING_SHARDS = "initializing_shards"; - private static final String UNASSIGNED_SHARDS = "unassigned_shards"; - private static final String PRIMARY_ACTIVE = "primary_active"; - - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "cluster_shard_health", - true, - (parsedObjects, shardId) -> { - int i = 0; - boolean primaryActive = (boolean) parsedObjects[i++]; - int activeShards = (int) parsedObjects[i++]; - int relocatingShards = (int) parsedObjects[i++]; - int initializingShards = (int) parsedObjects[i++]; - int unassignedShards = (int) parsedObjects[i++]; - String statusStr = (String) parsedObjects[i]; - ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); - return new ClusterShardHealth( - shardId, - status, - activeShards, - relocatingShards, - initializingShards, - unassignedShards, - primaryActive - ); - } - ); - - static { - PARSER.declareBoolean(constructorArg(), new ParseField(PRIMARY_ACTIVE)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(INITIALIZING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(UNASSIGNED_SHARDS)); - PARSER.declareString(constructorArg(), new ParseField(STATUS)); - } + static final String STATUS = "status"; + static final String ACTIVE_SHARDS = "active_shards"; + static final String RELOCATING_SHARDS = "relocating_shards"; + static final String INITIALIZING_SHARDS = "initializing_shards"; + static final String UNASSIGNED_SHARDS = "unassigned_shards"; + static final String PRIMARY_ACTIVE = "primary_active"; private final int shardId; private final ClusterHealthStatus status; @@ -230,20 +191,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - static ClusterShardHealth innerFromXContent(XContentParser parser, Integer shardId) { - return PARSER.apply(parser, shardId); - } - - public static ClusterShardHealth fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - String shardIdStr = parser.currentName(); - ClusterShardHealth parsed = innerFromXContent(parser, Integer.valueOf(shardIdStr)); - ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); - return parsed; - } - @Override public String toString() { return Strings.toString(this); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java index 7b3ea4fbe4581..b64383c562c50 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java @@ -11,23 +11,16 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; - -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** * The allowable types, languages and their corresponding contexts. When serialized there is a top level types_allowed list, @@ -68,10 +61,10 @@ * */ public class ScriptLanguagesInfo implements ToXContentObject, Writeable { - private static final ParseField TYPES_ALLOWED = new ParseField("types_allowed"); - private static final ParseField LANGUAGE_CONTEXTS = new ParseField("language_contexts"); - private static final ParseField LANGUAGE = new ParseField("language"); - private static final ParseField CONTEXTS = new ParseField("contexts"); + public static final ParseField TYPES_ALLOWED = new ParseField("types_allowed"); + public static final ParseField LANGUAGE_CONTEXTS = new ParseField("language_contexts"); + public static final ParseField LANGUAGE = new ParseField("language"); + public static final ParseField CONTEXTS = new ParseField("contexts"); public final Set typesAllowed; public final Map> languageContexts; @@ -86,31 +79,6 @@ public ScriptLanguagesInfo(StreamInput in) throws IOException { languageContexts = in.readImmutableMap(sin -> sin.readCollectionAsImmutableSet(StreamInput::readString)); } - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "script_languages_info", - true, - (a) -> new ScriptLanguagesInfo( - new HashSet<>((List) a[0]), - ((List>>) a[1]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) - ) - ); - - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser>, Void> LANGUAGE_CONTEXT_PARSER = - new ConstructingObjectParser<>("language_contexts", true, (m, name) -> new Tuple<>((String) m[0], Set.copyOf((List) m[1]))); - - static { - PARSER.declareStringArray(constructorArg(), TYPES_ALLOWED); - PARSER.declareObjectArray(constructorArg(), LANGUAGE_CONTEXT_PARSER, LANGUAGE_CONTEXTS); - LANGUAGE_CONTEXT_PARSER.declareString(constructorArg(), LANGUAGE); - LANGUAGE_CONTEXT_PARSER.declareStringArray(constructorArg(), CONTEXTS); - } - - public static ScriptLanguagesInfo fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(typesAllowed); diff --git a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java index 5f8e6a893c1b5..e83fa79c79460 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java @@ -15,20 +15,16 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; -import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParserConstructor; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - public class SearchProfileDfsPhaseResult implements Writeable, ToXContentObject { private final ProfileResult dfsShardResult; @@ -63,24 +59,8 @@ public void writeTo(StreamOutput out) throws IOException { } } - private static final ParseField STATISTICS = new ParseField("statistics"); - private static final ParseField KNN = new ParseField("knn"); - private static final InstantiatingObjectParser PARSER; - - static { - InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( - "search_profile_dfs_phase_result", - true, - SearchProfileDfsPhaseResult.class - ); - parser.declareObject(optionalConstructorArg(), (p, c) -> ProfileResult.fromXContent(p), STATISTICS); - parser.declareObjectArray(optionalConstructorArg(), (p, c) -> QueryProfileShardResult.fromXContent(p), KNN); - PARSER = parser.build(); - } - - public static SearchProfileDfsPhaseResult fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } + public static final ParseField STATISTICS = new ParseField("statistics"); + public static final ParseField KNN = new ParseField("knn"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java b/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java index e72ef2d9b3ece..8aebde23d6a87 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -25,8 +24,6 @@ import java.util.List; import java.util.Objects; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - /** * A container class to hold the profile results for a single shard in the request. * Contains a list of query profiles, a collector tree and a total rewrite tree. @@ -139,42 +136,4 @@ public int hashCode() { public String toString() { return Strings.toString(this); } - - public static QueryProfileShardResult fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.currentToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - String currentFieldName = null; - List queryProfileResults = new ArrayList<>(); - long rewriteTime = 0; - Long vectorOperationsCount = null; - CollectorResult collector = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (REWRITE_TIME.equals(currentFieldName)) { - rewriteTime = parser.longValue(); - } else if (VECTOR_OPERATIONS_COUNT.equals(currentFieldName)) { - vectorOperationsCount = parser.longValue(); - } else { - parser.skipChildren(); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (QUERY_ARRAY.equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - queryProfileResults.add(ProfileResult.fromXContent(parser)); - } - } else if (COLLECTOR.equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - collector = CollectorResult.fromXContent(parser); - } - } else { - parser.skipChildren(); - } - } else { - parser.skipChildren(); - } - } - return new QueryProfileShardResult(queryProfileResults, rewriteTime, collector, vectorOperationsCount); - } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index d4231c9f7538b..11655a93097cc 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -110,7 +110,7 @@ public class ClusterHealthResponsesTests extends AbstractXContentSerializingTest private static final ObjectParser.NamedObjectParser INDEX_PARSER = ( XContentParser parser, Void context, - String index) -> ClusterIndexHealth.innerFromXContent(parser, index); + String index) -> ClusterIndexHealthTests.parseInstance(parser, index); static { // ClusterStateHealth fields diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java index 4980d0f786d84..50f230022b375 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java @@ -8,17 +8,63 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.Predicate; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class SnapshotIndexStatusTests extends AbstractXContentTestCase { + static final ObjectParser.NamedObjectParser PARSER; + static { + ConstructingObjectParser innerParser = new ConstructingObjectParser<>( + "snapshot_index_status", + true, + (Object[] parsedObjects, String index) -> { + int i = 0; + SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); + SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); + @SuppressWarnings("unchecked") + List shardStatuses = (List) parsedObjects[i]; + + final Map indexShards; + if (shardStatuses == null || shardStatuses.isEmpty()) { + indexShards = emptyMap(); + } else { + indexShards = Maps.newMapWithExpectedSize(shardStatuses.size()); + for (SnapshotIndexShardStatus shardStatus : shardStatuses) { + indexShards.put(shardStatus.getShardId().getId(), shardStatus); + } + } + return new SnapshotIndexStatus(index, indexShards, shardsStats, stats); + } + ); + innerParser.declareObject( + constructorArg(), + (p, c) -> SnapshotShardsStatsTests.PARSER.apply(p, null), + new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS) + ); + innerParser.declareObject(constructorArg(), (p, c) -> SnapshotStats.fromXContent(p), new ParseField(SnapshotStats.Fields.STATS)); + innerParser.declareNamedObjects( + constructorArg(), + SnapshotIndexShardStatus.PARSER, + new ParseField(SnapshotIndexStatus.Fields.SHARDS) + ); + PARSER = ((p, c, name) -> innerParser.apply(p, name)); + } + @Override protected SnapshotIndexStatus createTestInstance() { String index = randomAlphaOfLength(10); @@ -40,7 +86,8 @@ protected Predicate getRandomFieldsExcludeFilter() { protected SnapshotIndexStatus doParseInstance(XContentParser parser) throws IOException { XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser); - SnapshotIndexStatus status = SnapshotIndexStatus.fromXContent(parser); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + SnapshotIndexStatus status = PARSER.parse(parser, null, parser.currentName()); XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return status; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java index 9d4b8d601c63b..a9eacb49798f9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java @@ -9,12 +9,39 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class SnapshotShardsStatsTests extends AbstractXContentTestCase { + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + SnapshotShardsStats.Fields.SHARDS_STATS, + true, + (Object[] parsedObjects) -> { + int i = 0; + int initializingShards = (int) parsedObjects[i++]; + int startedShards = (int) parsedObjects[i++]; + int finalizingShards = (int) parsedObjects[i++]; + int doneShards = (int) parsedObjects[i++]; + int failedShards = (int) parsedObjects[i++]; + int totalShards = (int) parsedObjects[i]; + return new SnapshotShardsStats(initializingShards, startedShards, finalizingShards, doneShards, failedShards, totalShards); + } + ); + static { + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.INITIALIZING)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.STARTED)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.FINALIZING)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.DONE)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.FAILED)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.TOTAL)); + } + @Override protected SnapshotShardsStats createTestInstance() { int initializingShards = randomInt(); @@ -28,7 +55,7 @@ protected SnapshotShardsStats createTestInstance() { @Override protected SnapshotShardsStats doParseInstance(XContentParser parser) throws IOException { - return SnapshotShardsStats.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java index 9c28930f12382..a32a66a55454f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java @@ -11,20 +11,79 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Strings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.Predicate; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class SnapshotStatusTests extends AbstractChunkedSerializingTestCase { + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "snapshot_status", + true, + (Object[] parsedObjects) -> { + int i = 0; + String name = (String) parsedObjects[i++]; + String repository = (String) parsedObjects[i++]; + String uuid = (String) parsedObjects[i++]; + String rawState = (String) parsedObjects[i++]; + Boolean includeGlobalState = (Boolean) parsedObjects[i++]; + SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); + SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); + @SuppressWarnings("unchecked") + List indices = ((List) parsedObjects[i]); + + Snapshot snapshot = new Snapshot(repository, new SnapshotId(name, uuid)); + SnapshotsInProgress.State state = SnapshotsInProgress.State.valueOf(rawState); + Map indicesStatus; + List shards; + if (indices == null || indices.isEmpty()) { + indicesStatus = emptyMap(); + shards = emptyList(); + } else { + indicesStatus = Maps.newMapWithExpectedSize(indices.size()); + shards = new ArrayList<>(); + for (SnapshotIndexStatus index : indices) { + indicesStatus.put(index.getIndex(), index); + shards.addAll(index.getShards().values()); + } + } + return new SnapshotStatus(snapshot, state, shards, indicesStatus, shardsStats, stats, includeGlobalState); + } + ); + static { + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.SNAPSHOT)); + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.REPOSITORY)); + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.UUID)); + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.STATE)); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField(SnapshotStatus.INCLUDE_GLOBAL_STATE)); + PARSER.declareField( + constructorArg(), + SnapshotStats::fromXContent, + new ParseField(SnapshotStats.Fields.STATS), + ObjectParser.ValueType.OBJECT + ); + PARSER.declareObject(constructorArg(), SnapshotShardsStatsTests.PARSER, new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS)); + PARSER.declareNamedObjects(constructorArg(), SnapshotIndexStatusTests.PARSER, new ParseField(SnapshotStatus.INDICES)); + } + public void testToString() throws Exception { SnapshotsInProgress.State state = randomFrom(SnapshotsInProgress.State.values()); String uuid = UUIDs.randomBase64UUID(); @@ -180,7 +239,7 @@ protected Predicate getRandomFieldsExcludeFilter() { @Override protected SnapshotStatus doParseInstance(XContentParser parser) throws IOException { - return SnapshotStatus.fromXContent(parser); + return PARSER.parse(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java index 21cba892669d0..6b921419c0fd4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java @@ -33,7 +33,7 @@ public class SnapshotsStatusResponseTests extends AbstractChunkedSerializingTest } ); static { - PARSER.declareObjectArray(constructorArg(), SnapshotStatus.PARSER, new ParseField("snapshots")); + PARSER.declareObjectArray(constructorArg(), SnapshotStatusTests.PARSER, new ParseField("snapshots")); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java index f8d3871fbfa8f..ec56a57aa3a90 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java @@ -10,8 +10,10 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Tuple; import org.elasticsearch.script.ScriptLanguagesInfo; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -22,8 +24,33 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; public class GetScriptLanguageResponseTests extends AbstractXContentSerializingTestCase { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "script_languages_info", + true, + (a) -> new ScriptLanguagesInfo( + new HashSet<>((List) a[0]), + ((List>>) a[1]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) + ) + ); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser>, Void> LANGUAGE_CONTEXT_PARSER = + new ConstructingObjectParser<>("language_contexts", true, (m, name) -> new Tuple<>((String) m[0], Set.copyOf((List) m[1]))); + + static { + PARSER.declareStringArray(constructorArg(), ScriptLanguagesInfo.TYPES_ALLOWED); + PARSER.declareObjectArray(constructorArg(), LANGUAGE_CONTEXT_PARSER, ScriptLanguagesInfo.LANGUAGE_CONTEXTS); + LANGUAGE_CONTEXT_PARSER.declareString(constructorArg(), ScriptLanguagesInfo.LANGUAGE); + LANGUAGE_CONTEXT_PARSER.declareStringArray(constructorArg(), ScriptLanguagesInfo.CONTEXTS); + } + private static int MAX_VALUES = 4; private static final int MIN_LENGTH = 1; private static final int MAX_LENGTH = 16; @@ -38,7 +65,7 @@ protected GetScriptLanguageResponse createTestInstance() { @Override protected GetScriptLanguageResponse doParseInstance(XContentParser parser) throws IOException { - return new GetScriptLanguageResponse(ScriptLanguagesInfo.fromXContent(parser)); + return new GetScriptLanguageResponse(PARSER.parse(parser, null)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java index f0802e471fc38..8cf8a1c064004 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java @@ -25,7 +25,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.action.support.broadcast.BaseBroadcastResponse.declareBroadcastFields; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; public class ReloadAnalyzersResponseTests extends AbstractBroadcastResponseTestCase { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java index 9ec910e79918c..5df0fa27f1016 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Set; -import static org.elasticsearch.action.support.broadcast.BaseBroadcastResponse.declareBroadcastFields; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java index 6c45367baf674..7c50ba3beae76 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java @@ -17,12 +17,15 @@ import org.elasticsearch.action.delete.DeleteResponseTests; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.IndexResponseTests; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponseTests; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -43,6 +46,54 @@ public class BulkItemResponseTests extends ESTestCase { + /** + * Parse the output of the {@link DocWriteResponse#innerToXContent(XContentBuilder, ToXContent.Params)} method. + * + * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning + * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly + * if needed and then immediately returns. + */ + public static void parseInnerToXContent(XContentParser parser, DocWriteResponse.Builder context) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + + String currentFieldName = parser.currentName(); + token = parser.nextToken(); + + if (token.isValue()) { + if (DocWriteResponse._INDEX.equals(currentFieldName)) { + // index uuid and shard id are unknown and can't be parsed back for now. + context.setShardId(new ShardId(new Index(parser.text(), IndexMetadata.INDEX_UUID_NA_VALUE), -1)); + } else if (DocWriteResponse._ID.equals(currentFieldName)) { + context.setId(parser.text()); + } else if (DocWriteResponse._VERSION.equals(currentFieldName)) { + context.setVersion(parser.longValue()); + } else if (DocWriteResponse.RESULT.equals(currentFieldName)) { + String result = parser.text(); + for (DocWriteResponse.Result r : DocWriteResponse.Result.values()) { + if (r.getLowercase().equals(result)) { + context.setResult(r); + break; + } + } + } else if (DocWriteResponse.FORCED_REFRESH.equals(currentFieldName)) { + context.setForcedRefresh(parser.booleanValue()); + } else if (DocWriteResponse._SEQ_NO.equals(currentFieldName)) { + context.setSeqNo(parser.longValue()); + } else if (DocWriteResponse._PRIMARY_TERM.equals(currentFieldName)) { + context.setPrimaryTerm(parser.longValue()); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (DocWriteResponse._SHARDS.equals(currentFieldName)) { + context.setShardInfo(ReplicationResponse.ShardInfo.fromXContent(parser)); + } else { + parser.skipChildren(); // skip potential inner objects for forward compatibility + } + } else if (token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); // skip potential inner arrays for forward compatibility + } + } + public void testBulkItemResponseShouldContainTypeInV7CompatibilityMode() throws IOException { BulkItemResponse bulkItemResponse = BulkItemResponse.success( randomInt(), @@ -192,7 +243,7 @@ public static BulkItemResponse itemResponseFromXContent(XContentParser parser, i if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder(); builder = indexResponseBuilder; - itemParser = indexParser -> DocWriteResponse.parseInnerToXContent(indexParser, indexResponseBuilder); + itemParser = indexParser -> parseInnerToXContent(indexParser, indexResponseBuilder); } else if (opType == DocWriteRequest.OpType.UPDATE) { final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder(); builder = updateResponseBuilder; @@ -201,7 +252,7 @@ public static BulkItemResponse itemResponseFromXContent(XContentParser parser, i } else if (opType == DocWriteRequest.OpType.DELETE) { final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder(); builder = deleteResponseBuilder; - itemParser = deleteParser -> DocWriteResponse.parseInnerToXContent(deleteParser, deleteResponseBuilder); + itemParser = deleteParser -> parseInnerToXContent(deleteParser, deleteResponseBuilder); } else { throwUnknownField(currentFieldName, parser); } diff --git a/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java b/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java index 937ac2d26ebb9..b22a30b533dd2 100644 --- a/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.delete; -import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponseTests; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -119,7 +119,7 @@ private static DeleteResponse parseInstance(XContentParser parser) throws IOExce DeleteResponse.Builder context = new DeleteResponse.Builder(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - DocWriteResponse.parseInnerToXContent(parser, context); + BulkItemResponseTests.parseInnerToXContent(parser, context); } return context.build(); } diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java index c8a8c3853601d..878c35b449366 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponseTests; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -126,7 +127,7 @@ private static IndexResponse parseInstanceFromXContent(XContentParser parser) th ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); IndexResponse.Builder context = new IndexResponse.Builder(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - DocWriteResponse.parseInnerToXContent(parser, context); + BulkItemResponseTests.parseInnerToXContent(parser, context); } return context.build(); } diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index d35162287e3ac..0eefeb87d3e02 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponseTests; import org.elasticsearch.action.index.IndexResponseTests; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Strings; @@ -214,7 +215,7 @@ public static void parseXContentFields(XContentParser parser, UpdateResponse.Bui context.setGetResult(GetResult.fromXContentEmbedded(parser)); } } else { - DocWriteResponse.parseInnerToXContent(parser, context); + BulkItemResponseTests.parseInnerToXContent(parser, context); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java index 637a18547b1b2..48d28462231a0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java @@ -12,8 +12,12 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTableGenerator; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -21,12 +25,16 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Predicate; import java.util.regex.Pattern; +import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.hamcrest.CoreMatchers.equalTo; public class ClusterIndexHealthTests extends AbstractXContentSerializingTestCase { @@ -106,7 +114,7 @@ protected ClusterIndexHealth doParseInstance(XContentParser parser) throws IOExc XContentParser.Token token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); String index = parser.currentName(); - ClusterIndexHealth parsed = ClusterIndexHealth.innerFromXContent(parser, index); + ClusterIndexHealth parsed = parseInstance(parser, index); ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return parsed; } @@ -288,4 +296,66 @@ protected ClusterIndexHealth mutateInstance(ClusterIndexHealth instance) { throw new UnsupportedOperationException(); } } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_index_health", + true, + (parsedObjects, index) -> { + int i = 0; + int numberOfShards = (int) parsedObjects[i++]; + int numberOfReplicas = (int) parsedObjects[i++]; + int activeShards = (int) parsedObjects[i++]; + int relocatingShards = (int) parsedObjects[i++]; + int initializingShards = (int) parsedObjects[i++]; + int unassignedShards = (int) parsedObjects[i++]; + int activePrimaryShards = (int) parsedObjects[i++]; + String statusStr = (String) parsedObjects[i++]; + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); + @SuppressWarnings("unchecked") + List shardList = (List) parsedObjects[i]; + final Map shards; + if (shardList == null || shardList.isEmpty()) { + shards = emptyMap(); + } else { + shards = Maps.newMapWithExpectedSize(shardList.size()); + for (ClusterShardHealth shardHealth : shardList) { + shards.put(shardHealth.getShardId(), shardHealth); + } + } + return new ClusterIndexHealth( + index, + numberOfShards, + numberOfReplicas, + activeShards, + relocatingShards, + initializingShards, + unassignedShards, + activePrimaryShards, + status, + shards + ); + } + ); + + public static final ObjectParser.NamedObjectParser SHARD_PARSER = ( + XContentParser p, + String indexIgnored, + String shardId) -> ClusterShardHealthTests.PARSER.apply(p, Integer.valueOf(shardId)); + + static { + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.NUMBER_OF_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.NUMBER_OF_REPLICAS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.ACTIVE_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.RELOCATING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.INITIALIZING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.UNASSIGNED_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.ACTIVE_PRIMARY_SHARDS)); + PARSER.declareString(constructorArg(), new ParseField(ClusterIndexHealth.STATUS)); + // Can be absent if LEVEL == 'indices' or 'cluster' + PARSER.declareNamedObjects(optionalConstructorArg(), SHARD_PARSER, new ParseField(ClusterIndexHealth.SHARDS)); + } + + public static ClusterIndexHealth parseInstance(XContentParser parser, String index) { + return PARSER.apply(parser, index); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java index ce7c366ff30e6..1e1eacba183d2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java @@ -9,17 +9,61 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Arrays; import java.util.function.Predicate; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class ClusterShardHealthTests extends AbstractXContentSerializingTestCase { + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_shard_health", + true, + (parsedObjects, shardId) -> { + int i = 0; + boolean primaryActive = (boolean) parsedObjects[i++]; + int activeShards = (int) parsedObjects[i++]; + int relocatingShards = (int) parsedObjects[i++]; + int initializingShards = (int) parsedObjects[i++]; + int unassignedShards = (int) parsedObjects[i++]; + String statusStr = (String) parsedObjects[i]; + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); + return new ClusterShardHealth( + shardId, + status, + activeShards, + relocatingShards, + initializingShards, + unassignedShards, + primaryActive + ); + } + ); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField(ClusterShardHealth.PRIMARY_ACTIVE)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.ACTIVE_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.RELOCATING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.INITIALIZING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.UNASSIGNED_SHARDS)); + PARSER.declareString(constructorArg(), new ParseField(ClusterShardHealth.STATUS)); + } + @Override protected ClusterShardHealth doParseInstance(XContentParser parser) throws IOException { - return ClusterShardHealth.fromXContent(parser); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String shardIdStr = parser.currentName(); + ClusterShardHealth parsed = PARSER.apply(parser, Integer.valueOf(shardIdStr)); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + return parsed; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java index c89edb29b5058..4855a043c565a 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.profile; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.profile.query.QueryProfileShardResultTests; @@ -48,7 +49,7 @@ protected Reader instanceReader() { @Override protected SearchProfileDfsPhaseResult doParseInstance(XContentParser parser) throws IOException { - return SearchProfileDfsPhaseResult.fromXContent(parser); + return SearchResponseUtils.parseProfileDfsPhaseResult(parser); } public void testCombineQueryProfileShardResults() { diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java index f28425172ead5..56520c0c6d033 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.profile.query; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.ProfileResultTests; import org.elasticsearch.test.AbstractXContentSerializingTestCase; @@ -51,7 +52,7 @@ protected QueryProfileShardResult mutateInstance(QueryProfileShardResult instanc @Override protected QueryProfileShardResult doParseInstance(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - QueryProfileShardResult result = QueryProfileShardResult.fromXContent(parser); + QueryProfileShardResult result = SearchResponseUtils.parseQueryProfileShardResult(parser); ensureExpectedToken(null, parser.nextToken(), parser); return result; } diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index 71837ccf14387..8831149fec905 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -24,10 +24,12 @@ import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; +import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -40,6 +42,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public enum SearchResponseUtils { ; @@ -439,7 +442,7 @@ private static void parseProfileResultsEntry(XContentParser parser, Map PROFILE_DFS_PHASE_RESULT_PARSER; + + static { + InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( + "search_profile_dfs_phase_result", + true, + SearchProfileDfsPhaseResult.class + ); + parser.declareObject(optionalConstructorArg(), (p, c) -> ProfileResult.fromXContent(p), SearchProfileDfsPhaseResult.STATISTICS); + parser.declareObjectArray(optionalConstructorArg(), (p, c) -> parseQueryProfileShardResult(p), SearchProfileDfsPhaseResult.KNN); + PROFILE_DFS_PHASE_RESULT_PARSER = parser.build(); + } + + public static SearchProfileDfsPhaseResult parseProfileDfsPhaseResult(XContentParser parser) throws IOException { + return PROFILE_DFS_PHASE_RESULT_PARSER.parse(parser, null); + } + + public static QueryProfileShardResult parseQueryProfileShardResult(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + String currentFieldName = null; + List queryProfileResults = new ArrayList<>(); + long rewriteTime = 0; + Long vectorOperationsCount = null; + CollectorResult collector = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (QueryProfileShardResult.REWRITE_TIME.equals(currentFieldName)) { + rewriteTime = parser.longValue(); + } else if (QueryProfileShardResult.VECTOR_OPERATIONS_COUNT.equals(currentFieldName)) { + vectorOperationsCount = parser.longValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (QueryProfileShardResult.QUERY_ARRAY.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + queryProfileResults.add(ProfileResult.fromXContent(parser)); + } + } else if (QueryProfileShardResult.COLLECTOR.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + collector = CollectorResult.fromXContent(parser); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + return new QueryProfileShardResult(queryProfileResults, rewriteTime, collector, vectorOperationsCount); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java index 5f720eededf02..751eed222ee7a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java @@ -10,12 +10,15 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -24,12 +27,38 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public abstract class AbstractBroadcastResponseTestCase extends AbstractXContentTestCase { + private static final ParseField _SHARDS_FIELD = new ParseField("_shards"); + private static final ParseField TOTAL_FIELD = new ParseField("total"); + private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); + private static final ParseField FAILED_FIELD = new ParseField("failed"); + private static final ParseField FAILURES_FIELD = new ParseField("failures"); + + @SuppressWarnings("unchecked") + public static void declareBroadcastFields(ConstructingObjectParser PARSER) { + ConstructingObjectParser shardsParser = new ConstructingObjectParser<>( + "_shards", + true, + arg -> new BaseBroadcastResponse((int) arg[0], (int) arg[1], (int) arg[2], (List) arg[3]) + ); + shardsParser.declareInt(constructorArg(), TOTAL_FIELD); + shardsParser.declareInt(constructorArg(), SUCCESSFUL_FIELD); + shardsParser.declareInt(constructorArg(), FAILED_FIELD); + shardsParser.declareObjectArray( + optionalConstructorArg(), + (p, c) -> DefaultShardOperationFailedException.fromXContent(p), + FAILURES_FIELD + ); + PARSER.declareObject(constructorArg(), shardsParser, _SHARDS_FIELD); + } + @Override protected T createTestInstance() { int totalShards = randomIntBetween(1, 10); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 307daddd17c37..6905ee391a6eb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -72,6 +72,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.AbstractBroadcastResponseTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.DeprecationHandler; @@ -1312,7 +1313,7 @@ protected static BroadcastResponse refresh(String index) throws IOException { ); static { - BaseBroadcastResponse.declareBroadcastFields(BROADCAST_RESPONSE_PARSER); + AbstractBroadcastResponseTestCase.declareBroadcastFields(BROADCAST_RESPONSE_PARSER); } protected static BroadcastResponse refresh(RestClient client, String index) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java index 43dc92857551a..d89732cb3b177 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java @@ -7,22 +7,15 @@ package org.elasticsearch.xpack.core.termsenum.action; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.List; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * The response of the _terms_enum action. */ @@ -31,28 +24,6 @@ public class TermsEnumResponse extends BroadcastResponse { public static final String TERMS_FIELD = "terms"; public static final String COMPLETE_FIELD = "complete"; - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "term_enum_results", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new TermsEnumResponse( - (List) arg[1], - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()), - (Boolean) arg[2] - ); - } - ); - static { - declareBroadcastFields(PARSER); - PARSER.declareStringArray(optionalConstructorArg(), new ParseField(TERMS_FIELD)); - PARSER.declareBoolean(optionalConstructorArg(), new ParseField(COMPLETE_FIELD)); - } - private final List terms; private boolean complete; @@ -106,7 +77,4 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.field(COMPLETE_FIELD, complete); } - public static TermsEnumResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java index a31c44a165cdf..1804de134c8fb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java @@ -8,19 +8,48 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.AbstractBroadcastResponseTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.termsenum.action.TermsEnumResponse; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Set; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class TermsEnumResponseTests extends AbstractBroadcastResponseTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "term_enum_results", + true, + arg -> { + BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; + return new TermsEnumResponse( + (List) arg[1], + response.getTotalShards(), + response.getSuccessfulShards(), + response.getFailedShards(), + Arrays.asList(response.getShardFailures()), + (Boolean) arg[2] + ); + } + ); + + static { + AbstractBroadcastResponseTestCase.declareBroadcastFields(PARSER); + PARSER.declareStringArray(optionalConstructorArg(), new ParseField(TermsEnumResponse.TERMS_FIELD)); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField(TermsEnumResponse.COMPLETE_FIELD)); + } + protected static List getRandomTerms() { int termCount = randomIntBetween(0, 100); Set uniqueTerms = Sets.newHashSetWithExpectedSize(termCount); @@ -48,7 +77,7 @@ private static TermsEnumResponse createRandomTermEnumResponse() { @Override protected TermsEnumResponse doParseInstance(XContentParser parser) throws IOException { - return TermsEnumResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override From cb1b8fceaecc22d061eb6aabd7dd36dae20b778d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 26 Mar 2024 12:17:16 -0400 Subject: [PATCH 66/79] Fix field caps and field level security (#106731) If you perform a `_field_caps` request on two indices with the same mapping but *different* field level security settings we were returning incorrect results. In particular, we'd return return whatever fields were visible in one of the indices. It's random which one we'd return. --- docs/changelog/106731.yaml | 5 ++ .../search/fieldcaps/FieldCapabilitiesIT.java | 21 ++++- .../fieldcaps/FieldCapabilitiesFetcher.java | 8 +- .../cluster/metadata/Metadata.java | 7 +- .../index/mapper/MapperRegistry.java | 8 +- .../elasticsearch/indices/IndicesModule.java | 21 ++--- .../elasticsearch/indices/IndicesService.java | 4 +- .../elasticsearch/plugins/FieldPredicate.java | 90 +++++++++++++++++++ .../elasticsearch/plugins/MapperPlugin.java | 22 ++--- .../FieldCapabilitiesFilterTests.java | 31 +++++-- .../cluster/metadata/MetadataTests.java | 3 +- .../mapper/FieldFilterMapperPluginTests.java | 21 ++++- .../indices/IndicesModuleTests.java | 90 +++++++++++++++---- .../index/mapper/MockFieldFilterPlugin.java | 22 ++++- .../permission/AutomatonFieldPredicate.java | 78 ++++++++++++++++ .../authz/permission/FieldPermissions.java | 10 +++ .../core/LocalStateCompositeXPackPlugin.java | 5 +- .../AutomatonFieldPredicateTests.java | 36 ++++++++ .../xpack/esql/EsqlSecurityIT.java | 82 ++++++++++++++++- .../src/javaRestTest/resources/roles.yml | 13 +++ .../integration/FieldLevelSecurityTests.java | 66 +++++++++++++- .../xpack/security/Security.java | 13 +-- .../xpack/security/SecurityTests.java | 12 +-- 23 files changed, 581 insertions(+), 87 deletions(-) create mode 100644 docs/changelog/106731.yaml create mode 100644 server/src/main/java/org/elasticsearch/plugins/FieldPredicate.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicate.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicateTests.java diff --git a/docs/changelog/106731.yaml b/docs/changelog/106731.yaml new file mode 100644 index 0000000000000..0d8e16a8f9616 --- /dev/null +++ b/docs/changelog/106731.yaml @@ -0,0 +1,5 @@ +pr: 106731 +summary: Fix field caps and field level security +area: Security +type: bug +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 282e29866a699..64f04d46a9a90 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -78,7 +79,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; -import java.util.function.Predicate; import java.util.stream.IntStream; import static java.util.Collections.singletonList; @@ -809,8 +809,23 @@ public Map getMetadataMappers() { } @Override - public Function> getFieldFilter() { - return index -> field -> field.equals("playlist") == false; + public Function getFieldFilter() { + return index -> new FieldPredicate() { + @Override + public boolean test(String field) { + return field.equals("playlist") == false; + } + + @Override + public String modifyHash(String hash) { + return "not-playlist:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index 6028a6e21ecff..51cb05f981177 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; @@ -123,14 +124,15 @@ private FieldCapabilitiesIndexResponse doFetch( final String shardUuid = indexService.getShard(shardId.getId()).getShardUuid(); indexMappingHash = mapping == null ? shardUuid : shardUuid + mapping.getSha256(); } + FieldPredicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); if (indexMappingHash != null) { + indexMappingHash = fieldPredicate.modifyHash(indexMappingHash); final Map existing = indexMappingHashToResponses.get(indexMappingHash); if (existing != null) { return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), indexMappingHash, existing, true); } } task.ensureNotCancelled(); - Predicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); final Map responseMap = retrieveFieldCaps( searchExecutionContext, fieldNameFilter, @@ -151,7 +153,7 @@ static Map retrieveFieldCaps( Predicate fieldNameFilter, String[] filters, String[] types, - Predicate indexFieldfilter, + FieldPredicate fieldPredicate, IndexShard indexShard, boolean includeEmptyFields ) { @@ -169,7 +171,7 @@ static Map retrieveFieldCaps( } MappedFieldType ft = entry.getValue(); if ((includeEmptyFields || ft.fieldHasValue(fieldInfos)) - && (indexFieldfilter.test(ft.name()) || context.isMetadataField(ft.name())) + && (fieldPredicate.test(ft.name()) || context.isMetadataField(ft.name())) && (filter == null || filter.test(ft))) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index b450251ff7e3f..f424861c5b7ff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.Transports; @@ -921,7 +922,7 @@ private void findAliasInfo(final String[] aliases, final String[] possibleMatche */ public Map findMappings( String[] concreteIndices, - Function> fieldFilter, + Function> fieldFilter, Runnable onNextIndex ) { assert Transports.assertNotTransportThread("decompressing mappings is too expensive for a transport thread"); @@ -974,7 +975,7 @@ private static MappingMetadata filterFields(MappingMetadata mappingMetadata, Pre if (mappingMetadata == null) { return MappingMetadata.EMPTY_MAPPINGS; } - if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) { + if (fieldPredicate == FieldPredicate.ACCEPT_ALL) { return mappingMetadata; } Map sourceAsMap = XContentHelper.convertToMap(mappingMetadata.source().compressedReference(), true).v2(); @@ -997,7 +998,7 @@ private static MappingMetadata filterFields(MappingMetadata mappingMetadata, Pre @SuppressWarnings("unchecked") private static boolean filterFields(String currentPath, Map fields, Predicate fieldPredicate) { - assert fieldPredicate != MapperPlugin.NOOP_FIELD_PREDICATE; + assert fieldPredicate != FieldPredicate.ACCEPT_ALL; Iterator> entryIterator = fields.entrySet().iterator(); while (entryIterator.hasNext()) { Map.Entry entry = entryIterator.next(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java index dcf24c9a61bbd..aa2a7ce2f3996 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java @@ -10,13 +10,13 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; import java.util.function.Function; -import java.util.function.Predicate; /** * A registry for all field mappers. @@ -29,13 +29,13 @@ public final class MapperRegistry { private final Map metadataMapperParsers7x; private final Map metadataMapperParsers6x; private final Map metadataMapperParsers5x; - private final Function> fieldFilter; + private final Function fieldFilter; public MapperRegistry( Map mapperParsers, Map runtimeFieldParsers, Map metadataMapperParsers, - Function> fieldFilter + Function fieldFilter ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.runtimeFieldParsers = runtimeFieldParsers; @@ -92,7 +92,7 @@ public Map getMetadataMapperParsers(Inde * {@link MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be returned by get mappings, * get index, get field mappings and field capabilities API. */ - public Function> getFieldFilter() { + public Function getFieldFilter() { return fieldFilter; } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 048d9adb8e7e3..b17777fc5a91e 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -72,6 +72,7 @@ import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.store.IndicesStore; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; @@ -83,7 +84,6 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Predicate; /** * Configures classes and services that are shared by indices on each node. @@ -307,18 +307,15 @@ public static Set getBuiltInMetadataFields() { return builtInMetadataFields; } - private static Function> getFieldFilter(List mapperPlugins) { - Function> fieldFilter = MapperPlugin.NOOP_FIELD_FILTER; + private static Function getFieldFilter(List mapperPlugins) { + Function fieldFilter = MapperPlugin.NOOP_FIELD_FILTER; for (MapperPlugin mapperPlugin : mapperPlugins) { fieldFilter = and(fieldFilter, mapperPlugin.getFieldFilter()); } return fieldFilter; } - private static Function> and( - Function> first, - Function> second - ) { + private static Function and(Function first, Function second) { // the purpose of this method is to not chain no-op field predicates, so that we can easily find out when no plugins plug in // a field filter, hence skip the mappings filtering part as a whole, as it requires parsing mappings into a map. if (first == MapperPlugin.NOOP_FIELD_FILTER) { @@ -328,15 +325,15 @@ private static Function> and( return first; } return index -> { - Predicate firstPredicate = first.apply(index); - Predicate secondPredicate = second.apply(index); - if (firstPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) { + FieldPredicate firstPredicate = first.apply(index); + FieldPredicate secondPredicate = second.apply(index); + if (firstPredicate == FieldPredicate.ACCEPT_ALL) { return secondPredicate; } - if (secondPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) { + if (secondPredicate == FieldPredicate.ACCEPT_ALL) { return firstPredicate; } - return firstPredicate.and(secondPredicate); + return new FieldPredicate.And(firstPredicate, secondPredicate); }; } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 3319b29df6dfa..026a20415aa91 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -128,6 +128,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.store.CompositeIndexFoldersDeletionListener; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; @@ -168,7 +169,6 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; -import java.util.function.Predicate; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -1756,7 +1756,7 @@ public void clearIndexShardCache(ShardId shardId, boolean queryCache, boolean fi * {@link org.elasticsearch.plugins.MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be * returned by get mappings, get index, get field mappings and field capabilities API. */ - public Function> getFieldFilter() { + public Function getFieldFilter() { return mapperRegistry.getFieldFilter(); } diff --git a/server/src/main/java/org/elasticsearch/plugins/FieldPredicate.java b/server/src/main/java/org/elasticsearch/plugins/FieldPredicate.java new file mode 100644 index 0000000000000..32692b9740f91 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/FieldPredicate.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.cluster.metadata.MappingMetadata; + +import java.util.function.Predicate; + +/** + * Filter for visible fields. + */ +public interface FieldPredicate extends Accountable, Predicate { + /** + * The default field predicate applied, which doesn't filter anything. That means that by default get mappings, get index + * get field mappings and field capabilities API will return every field that's present in the mappings. + */ + FieldPredicate ACCEPT_ALL = new FieldPredicate() { + @Override + public boolean test(String field) { + return true; + } + + @Override + public String modifyHash(String hash) { + return hash; + } + + @Override + public long ramBytesUsed() { + return 0; // Shared + } + + @Override + public String toString() { + return "accept all"; + } + }; + + /** + * Should this field be included? + */ + @Override + boolean test(String field); + + /** + * Modify the {@link MappingMetadata#getSha256} to track any filtering this predicate + * has performed on the list of fields. + */ + String modifyHash(String hash); + + class And implements FieldPredicate { + private static final long SHALLOW_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(And.class); + + private final FieldPredicate first; + private final FieldPredicate second; + + public And(FieldPredicate first, FieldPredicate second) { + this.first = first; + this.second = second; + } + + @Override + public boolean test(String field) { + return first.test(field) && second.test(field); + } + + @Override + public String modifyHash(String hash) { + return second.modifyHash(first.modifyHash(hash)); + } + + @Override + public long ramBytesUsed() { + return SHALLOW_RAM_BYTES_USED + first.ramBytesUsed() + second.ramBytesUsed(); + } + + @Override + public String toString() { + return first + " then " + second; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java b/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java index 401c014488f88..45f04487886d3 100644 --- a/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java @@ -8,7 +8,6 @@ package org.elasticsearch.plugins; -import org.elasticsearch.core.Predicates; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.RuntimeField; @@ -16,7 +15,6 @@ import java.util.Collections; import java.util.Map; import java.util.function.Function; -import java.util.function.Predicate; /** * An extension point for {@link Plugin} implementations to add custom mappers @@ -62,19 +60,23 @@ default Map getMetadataMappers() { * get index, get field mappings and field capabilities API. Useful to filter the fields that such API return. The predicate receives * the field name as input argument and should return true to show the field and false to hide it. */ - default Function> getFieldFilter() { + default Function getFieldFilter() { return NOOP_FIELD_FILTER; } - /** - * The default field predicate applied, which doesn't filter anything. That means that by default get mappings, get index - * get field mappings and field capabilities API will return every field that's present in the mappings. - */ - Predicate NOOP_FIELD_PREDICATE = Predicates.always(); - /** * The default field filter applied, which doesn't filter anything. That means that by default get mappings, get index * get field mappings and field capabilities API will return every field that's present in the mappings. */ - Function> NOOP_FIELD_FILTER = index -> NOOP_FIELD_PREDICATE; + Function NOOP_FIELD_FILTER = new Function<>() { + @Override + public FieldPredicate apply(String index) { + return FieldPredicate.ACCEPT_ALL; + } + + @Override + public String toString() { + return "accept all"; + } + }; } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java index ffdc7b9ca7652..478012567c1ae 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java @@ -14,10 +14,10 @@ import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.plugins.FieldPredicate; import java.io.IOException; import java.util.Map; -import java.util.function.Predicate; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -46,7 +46,7 @@ public void testExcludeNestedFields() throws IOException { s -> true, new String[] { "-nested" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -74,7 +74,7 @@ public void testMetadataFilters() throws IOException { s -> true, new String[] { "+metadata" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -87,7 +87,7 @@ public void testMetadataFilters() throws IOException { s -> true, new String[] { "-metadata" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -120,7 +120,7 @@ public void testExcludeMultifields() throws IOException { s -> true, new String[] { "-multifield" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -151,7 +151,7 @@ public void testDontIncludeParentInfo() throws IOException { s -> true, new String[] { "-parent" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -171,7 +171,22 @@ public void testSecurityFilter() throws IOException { } } """); SearchExecutionContext sec = createSearchExecutionContext(mapperService); - Predicate securityFilter = f -> f.startsWith("permitted"); + FieldPredicate securityFilter = new FieldPredicate() { + @Override + public boolean test(String field) { + return field.startsWith("permitted"); + } + + @Override + public String modifyHash(String hash) { + return "only-permitted:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; { Map response = FieldCapabilitiesFetcher.retrieveFieldCaps( @@ -223,7 +238,7 @@ public void testFieldTypeFiltering() throws IOException { s -> true, Strings.EMPTY_ARRAY, new String[] { "text", "keyword" }, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 1e35a40dedc17..955d7d2de6882 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; @@ -786,7 +787,7 @@ public void testFindMappingsWithFilters() throws IOException { if (index.equals("index2")) { return Predicates.never(); } - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; }, Metadata.ON_NEXT_INDEX_FIND_MAPPINGS_NOOP); assertIndexMappingsNoFields(mappings, "index2"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java index 2b8be2882c409..ce406b604ba62 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -32,7 +33,6 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Predicate; import static org.elasticsearch.cluster.metadata.MetadataTests.assertLeafs; import static org.elasticsearch.cluster.metadata.MetadataTests.assertMultiField; @@ -246,8 +246,23 @@ private static void assertNotFiltered(MappingMetadata mappingMetadata) { public static class FieldFilterPlugin extends Plugin implements MapperPlugin { @Override - public Function> getFieldFilter() { - return index -> index.equals("filtered") ? field -> field.endsWith("visible") : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> false == index.equals("filtered") ? FieldPredicate.ACCEPT_ALL : new FieldPredicate() { + @Override + public boolean test(String field) { + return field.endsWith("visible"); + } + + @Override + public String modifyHash(String hash) { + return "only-visible:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index cade1e66c7fc7..0216bad7cf7a3 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; @@ -44,11 +45,11 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Predicate; import static org.elasticsearch.test.LambdaMatchers.falseWith; import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -246,24 +247,24 @@ public void testGetFieldFilter() { List mapperPlugins = List.of(new MapperPlugin() { }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("hidden_index") ? field -> false : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("hidden_index") ? HIDDEN_INDEX : FieldPredicate.ACCEPT_ALL; } }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> field -> field.equals("hidden_field") == false; + public Function getFieldFilter() { + return index -> HIDDEN_FIELD; } }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("filtered") ? field -> field.equals("visible") : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("filtered") ? ONLY_VISIBLE : FieldPredicate.ACCEPT_ALL; } }); IndicesModule indicesModule = new IndicesModule(mapperPlugins); MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - Function> fieldFilter = mapperRegistry.getFieldFilter(); + Function fieldFilter = mapperRegistry.getFieldFilter(); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); assertThat(fieldFilter.apply("hidden_index"), falseWith(randomAlphaOfLengthBetween(3, 5))); @@ -276,6 +277,10 @@ public Function> getFieldFilter() { assertThat(fieldFilter.apply("hidden_index"), falseWith("visible")); assertThat(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)), trueWith("visible")); assertThat(fieldFilter.apply("hidden_index"), falseWith("hidden_field")); + + assertThat(fieldFilter.apply("filtered").modifyHash("hash"), equalTo("only-visible:hide-field:hash")); + assertThat(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)).modifyHash("hash"), equalTo("hide-field:hash")); + assertThat(fieldFilter.apply("hidden_index").modifyHash("hash"), equalTo("hide-field:hidden:hash")); } public void testDefaultFieldFilterIsNoOp() { @@ -286,7 +291,7 @@ public void testDefaultFieldFilterIsNoOp() { }); } IndicesModule indicesModule = new IndicesModule(mapperPlugins); - Function> fieldFilter = indicesModule.getMapperRegistry().getFieldFilter(); + Function fieldFilter = indicesModule.getMapperRegistry().getFieldFilter(); assertSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); } @@ -294,21 +299,72 @@ public void testNoOpFieldPredicate() { List mapperPlugins = Arrays.asList(new MapperPlugin() { }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("hidden_index") ? field -> false : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("hidden_index") ? HIDDEN_INDEX : FieldPredicate.ACCEPT_ALL; } }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("filtered") ? field -> field.equals("visible") : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("filtered") ? ONLY_VISIBLE : FieldPredicate.ACCEPT_ALL; } }); IndicesModule indicesModule = new IndicesModule(mapperPlugins); MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - Function> fieldFilter = mapperRegistry.getFieldFilter(); - assertSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply(randomAlphaOfLengthBetween(3, 7))); - assertNotSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("hidden_index")); - assertNotSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("filtered")); + Function fieldFilter = mapperRegistry.getFieldFilter(); + assertSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply(randomAlphaOfLengthBetween(3, 7))); + assertNotSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("hidden_index")); + assertNotSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("filtered")); } + + private static final FieldPredicate HIDDEN_INDEX = new FieldPredicate() { + @Override + public boolean test(String field) { + return false; + } + + @Override + public String modifyHash(String hash) { + return "hidden:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; + + private static final FieldPredicate HIDDEN_FIELD = new FieldPredicate() { + @Override + public boolean test(String field) { + return false == field.equals("hidden_field"); + } + + @Override + public String modifyHash(String hash) { + return "hide-field:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; + + private static final FieldPredicate ONLY_VISIBLE = new FieldPredicate() { + @Override + public boolean test(String field) { + return field.equals("visible"); + } + + @Override + public String modifyHash(String hash) { + return "only-visible:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java index 16cb0b4656fcf..61fc190e4952d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java @@ -8,18 +8,32 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.core.Predicates; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import java.util.function.Function; -import java.util.function.Predicate; public class MockFieldFilterPlugin extends Plugin implements MapperPlugin { @Override - public Function> getFieldFilter() { + public Function getFieldFilter() { // this filter doesn't filter any field out, but it's used to exercise the code path executed when the filter is not no-op - return index -> Predicates.always(); + return index -> new FieldPredicate() { + @Override + public boolean test(String field) { + return true; + } + + @Override + public String modifyHash(String hash) { + return hash + ":includeall"; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicate.java new file mode 100644 index 0000000000000..90ee353b46eaa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicate.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.Transition; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.plugins.FieldPredicate; + +import java.io.IOException; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.util.Base64; + +/** + * An implementation of {@link FieldPredicate} which matches fields + * against an {@link Automaton}. + */ +class AutomatonFieldPredicate implements FieldPredicate { + private final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(AutomatonFieldPredicate.class); + + private final String automatonHash; + private final CharacterRunAutomaton automaton; + + AutomatonFieldPredicate(Automaton originalAutomaton, CharacterRunAutomaton automaton) { + this.automatonHash = sha256(originalAutomaton); + this.automaton = automaton; + } + + @Override + public boolean test(String field) { + return automaton.run(field); + } + + @Override + public String modifyHash(String hash) { + return hash + ":" + automatonHash; + } + + @Override + public long ramBytesUsed() { + return SHALLOW_SIZE + RamUsageEstimator.sizeOf(automatonHash); // automaton itself is a shallow copy so not counted here + } + + private static String sha256(Automaton automaton) { + MessageDigest messageDigest = MessageDigests.sha256(); + try { + StreamOutput out = new OutputStreamStreamOutput(new DigestOutputStream(Streams.NULL_OUTPUT_STREAM, messageDigest)); + Transition t = new Transition(); + for (int state = 0; state < automaton.getNumStates(); state++) { + out.writeInt(state); + out.writeBoolean(automaton.isAccept(state)); + + int numTransitions = automaton.initTransition(state, t); + for (int i = 0; i < numTransitions; ++i) { + automaton.getNextTransition(t); + out.writeInt(t.dest); + out.writeInt(t.min); + out.writeInt(t.max); + } + } + } catch (IOException bogus) { + // cannot happen + throw new Error(bogus); + } + return Base64.getEncoder().encodeToString(messageDigest.digest()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index 8f2088f55ade6..f3c2d9f62e40f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReader; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup; import org.elasticsearch.xpack.core.security.authz.support.SecurityQueryTemplateEvaluator.DlsQueryEvaluationContext; @@ -67,6 +68,7 @@ public final class FieldPermissions implements Accountable, CacheKey { private final CharacterRunAutomaton permittedFieldsAutomaton; private final boolean permittedFieldsAutomatonIsTotal; private final Automaton originalAutomaton; + private final FieldPredicate fieldPredicate; private final long ramBytesUsed; @@ -106,6 +108,9 @@ private FieldPermissions(List fieldPermissionsDefini this.permittedFieldsAutomaton = new CharacterRunAutomaton(permittedFieldsAutomaton); // we cache the result of isTotal since this might be a costly operation this.permittedFieldsAutomatonIsTotal = Operations.isTotal(permittedFieldsAutomaton); + this.fieldPredicate = permittedFieldsAutomatonIsTotal + ? FieldPredicate.ACCEPT_ALL + : new AutomatonFieldPredicate(originalAutomaton, this.permittedFieldsAutomaton); long ramBytesUsed = BASE_FIELD_PERM_DEF_BYTES; ramBytesUsed += this.fieldPermissionsDefinitions.stream() @@ -113,6 +118,7 @@ private FieldPermissions(List fieldPermissionsDefini .sum(); ramBytesUsed += permittedFieldsAutomaton.ramBytesUsed(); ramBytesUsed += runAutomatonRamBytesUsed(permittedFieldsAutomaton); + ramBytesUsed += fieldPredicate.ramBytesUsed(); this.ramBytesUsed = ramBytesUsed; } @@ -220,6 +226,10 @@ public boolean grantsAccessTo(String fieldName) { return permittedFieldsAutomatonIsTotal || permittedFieldsAutomaton.run(fieldName); } + public FieldPredicate fieldPredicate() { + return fieldPredicate; + } + public List getFieldPermissionsDefinitions() { return fieldPermissionsDefinitions; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index bd267d19398b0..918976c0d3db8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -67,6 +67,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; @@ -454,8 +455,8 @@ public void onIndexModule(IndexModule indexModule) { } @Override - public Function> getFieldFilter() { - List>> items = filterPlugins(MapperPlugin.class).stream() + public Function getFieldFilter() { + List> items = filterPlugins(MapperPlugin.class).stream() .map(p -> p.getFieldFilter()) .filter(p -> p.equals(NOOP_FIELD_FILTER) == false) .toList(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicateTests.java new file mode 100644 index 0000000000000..d62cbb7dbab6b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicateTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class AutomatonFieldPredicateTests extends ESTestCase { + public void testMatching() { + String str = randomAlphaOfLength(10); + Automaton a = Automata.makeString(str); + AutomatonFieldPredicate pred = new AutomatonFieldPredicate(a, new CharacterRunAutomaton(a)); + assertTrue(pred.test(str)); + assertFalse(pred.test(str + randomAlphaOfLength(1))); + } + + public void testHash() { + Automaton a = Automata.makeString("a"); + AutomatonFieldPredicate predA = new AutomatonFieldPredicate(a, new CharacterRunAutomaton(a)); + + Automaton b = Automata.makeString("b"); + AutomatonFieldPredicate predB = new AutomatonFieldPredicate(b, new CharacterRunAutomaton(b)); + + assertThat(predA.modifyHash("a"), not(equalTo(predB.modifyHash("a")))); + } +} diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index bb8163915c1c4..2dd64cf02446b 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql; import org.apache.http.HttpStatus; +import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -31,6 +32,9 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class EsqlSecurityIT extends ESRestTestCase { @@ -47,6 +51,7 @@ public class EsqlSecurityIT extends ESRestTestCase { .user("user3", "x-pack-test-password", "user3", false) .user("user4", "x-pack-test-password", "user4", false) .user("user5", "x-pack-test-password", "user5", false) + .user("fls_user", "x-pack-test-password", "fls_user", false) .build(); @Override @@ -62,7 +67,11 @@ protected Settings restClientSettings() { private void indexDocument(String index, int id, double value, String org) throws IOException { Request indexDoc = new Request("PUT", index + "/_doc/" + id); - indexDoc.setJsonEntity("{\"value\":" + value + ",\"org\":\"" + org + "\"}"); + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + builder.field("value", value); + builder.field("org", org); + builder.field("partial", org + value); + indexDoc.setJsonEntity(Strings.toString(builder.endObject())); client().performRequest(indexDoc); } @@ -85,6 +94,11 @@ public void indexDocuments() throws IOException { indexDocument("index-user2", 1, 32.0, "marketing"); indexDocument("index-user2", 2, 40.0, "sales"); refresh("index-user2"); + + createIndex("indexpartial", Settings.EMPTY, mapping); + indexDocument("indexpartial", 1, 32.0, "marketing"); + indexDocument("indexpartial", 2, 40.0, "sales"); + refresh("indexpartial"); } public void testAllowedIndices() throws Exception { @@ -122,7 +136,7 @@ public void testUnauthorizedIndices() throws IOException { assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); } - public void testDLS() throws Exception { + public void testDocumentLevelSecurity() throws Exception { Response resp = runESQLCommand("user3", "from index | stats sum=sum(value)"); assertOK(resp); Map respMap = entityAsMap(resp); @@ -130,6 +144,69 @@ public void testDLS() throws Exception { assertThat(respMap.get("values"), equalTo(List.of(List.of(10.0)))); } + public void testFieldLevelSecurityAllow() throws Exception { + Response resp = runESQLCommand("fls_user", "FROM index* | SORT value | LIMIT 1"); + assertOK(resp); + assertMap( + entityAsMap(resp), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "partial").entry("type", "text"), + matchesMap().entry("name", "value").entry("type", "double") + ) + ) + .entry("values", List.of(List.of("sales10.0", 10.0))) + ); + } + + public void testFieldLevelSecurityAllowPartial() throws Exception { + Request request = new Request("GET", "/index*/_field_caps"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", "fls_user")); + request.addParameter("error_trace", "true"); + request.addParameter("pretty", "true"); + request.addParameter("fields", "*"); + + request = new Request("GET", "/index*/_search"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", "fls_user")); + request.addParameter("error_trace", "true"); + request.addParameter("pretty", "true"); + + Response resp = runESQLCommand("fls_user", "FROM index* | SORT partial | LIMIT 1"); + assertOK(resp); + assertMap( + entityAsMap(resp), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "partial").entry("type", "text"), + matchesMap().entry("name", "value").entry("type", "double") + ) + ) + .entry("values", List.of(List.of("engineering20.0", 20.0))) + ); + } + + public void testFieldLevelSecuritySpellingMistake() throws Exception { + ResponseException e = expectThrows( + ResponseException.class, + () -> runESQLCommand("fls_user", "FROM index* | SORT parial | LIMIT 1") + ); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("Unknown column [parial]")); + } + + public void testFieldLevelSecurityNotAllowed() throws Exception { + ResponseException e = expectThrows( + ResponseException.class, + () -> runESQLCommand("fls_user", "FROM index* | SORT org DESC | LIMIT 1") + ); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("Unknown column [org]")); + } + public void testRowCommand() throws Exception { String user = randomFrom("test-admin", "user1", "user2"); Response resp = runESQLCommand(user, "row a = 5, b = 2 | stats count=sum(b) by a"); @@ -283,6 +360,7 @@ protected Response runESQLCommand(String user, String command) throws IOExceptio Request request = new Request("POST", "_query"); request.setJsonEntity(Strings.toString(json)); request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); + request.addParameter("error_trace", "true"); return client().performRequest(request); } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml index 7a89fa57f7102..7d134103afd28 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml @@ -51,9 +51,22 @@ user4: - names: ['index-user1', 'index', "test-enrich" ] privileges: - read + user5: cluster: [] indices: - names: ['index-user1', 'index', "test-enrich" ] privileges: - read + +fls_user: + cluster: [] + indices: + - names: [ 'index' ] + privileges: [ 'read' ] + field_security: + grant: [ value, partial ] + - names: [ 'indexpartial' ] + privileges: [ 'read' ] + field_security: + grant: [ value ] diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 83be62beab4ec..591b20bd82f47 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.search.ClosePointInTimeRequest; @@ -48,6 +49,7 @@ import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.test.InternalSettingsPlugin; @@ -68,6 +70,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; @@ -77,6 +80,8 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; @@ -137,6 +142,9 @@ protected String configUsers() { + "\n" + "user9:" + usersPasswHashed + + "\n" + + "user_different_fields:" + + usersPasswHashed + "\n"; } @@ -150,7 +158,8 @@ protected String configUsersRoles() { role5:user4,user7 role6:user5,user7 role7:user6 - role8:user9"""; + role8:user9 + role_different_fields:user_different_fields"""; } @Override @@ -213,6 +222,16 @@ protected String configRoles() { privileges: [ ALL ] field_security: grant: [ 'field*', 'query' ] + role_different_fields: + indices: + - names: [ 'partial1*' ] + privileges: [ 'read' ] + field_security: + grant: [ value, partial ] + - names: [ 'partial2*' ] + privileges: [ 'read' ] + field_security: + grant: [ value ] """; } @@ -2336,4 +2355,49 @@ public void testLookupRuntimeFields() throws Exception { ); } + public void testSearchDifferentFieldsVisible() { + String firstName = "partial1" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + String secondName = "partial2" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + indexPartial(firstName, secondName); + SearchResponse response = client().filterWithHeader( + Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_different_fields", USERS_PASSWD)) + ).prepareSearch("partial*").addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)).get(); + try { + assertMap(response.getHits().getAt(0).getSourceAsMap(), matchesMap().entry("value", 1).entry("partial", 2)); + assertMap(response.getHits().getAt(1).getSourceAsMap(), matchesMap().entry("value", 2)); + } finally { + response.decRef(); + } + } + + /** + * The fields {@code partial} is only visible in one of the two backing indices and field caps should show it. + */ + public void testFieldCapsDifferentFieldsVisible() { + String firstName = "partial1_" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + String secondName = "partial2_" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + indexPartial(firstName, secondName); + FieldCapabilitiesResponse response = client().filterWithHeader( + Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_different_fields", USERS_PASSWD)) + ).prepareFieldCaps("partial*").setFields("value", "partial").get(); + try { + assertThat(response.get().keySet(), equalTo(Set.of("value", "partial"))); + assertThat(response.getField("value").keySet(), equalTo(Set.of("long"))); + assertThat(response.getField("partial").keySet(), equalTo(Set.of("long"))); + } finally { + response.decRef(); + } + } + + private void indexPartial(String firstName, String secondName) { + BulkResponse bulkResponse = client().prepareBulk() + .add(client().prepareIndex(firstName).setSource("value", 1, "partial", 2)) + .add(client().prepareIndex(secondName).setSource("value", 2, "partial", 3)) + .setRefreshPolicy(IMMEDIATE) + .get(); + for (var i : bulkResponse.getItems()) { + assertThat(i.getFailure(), nullValue()); + assertThat(i.status(), equalTo(RestStatus.CREATED)); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index ae6df838b4eac..5736d3e550f01 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -74,6 +74,7 @@ import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.NetworkPlugin; @@ -1947,29 +1948,29 @@ public UnaryOperator> getIndexTemplateMetadat } @Override - public Function> getFieldFilter() { + public Function getFieldFilter() { if (enabled) { return index -> { XPackLicenseState licenseState = getLicenseState(); IndicesAccessControl indicesAccessControl = threadContext.get() .getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); if (indicesAccessControl == null) { - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } assert indicesAccessControl.isGranted(); IndicesAccessControl.IndexAccessControl indexPermissions = indicesAccessControl.getIndexPermissions(index); if (indexPermissions == null) { - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } FieldPermissions fieldPermissions = indexPermissions.getFieldPermissions(); if (fieldPermissions.hasFieldLevelSecurity() == false) { - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } if (FIELD_LEVEL_SECURITY_FEATURE.checkWithoutTracking(licenseState) == false) { // check license last, once we know FLS is actually used - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } - return fieldPermissions::grantsAccessTo; + return fieldPermissions.fieldPredicate(); }; } return MapperPlugin.super.getFieldFilter(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 6a869377d7b07..f575bb6adc50e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.internal.XPackLicenseStatus; import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.internal.RestExtension; import org.elasticsearch.rest.RestHandler; @@ -120,7 +121,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.function.Predicate; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; @@ -469,7 +469,7 @@ public void testJoinValidatorForFIPSOnForbiddenLicense() throws Exception { public void testGetFieldFilterSecurityEnabled() throws Exception { createComponents(Settings.EMPTY); - Function> fieldFilter = security.getFieldFilter(); + Function fieldFilter = security.getFieldFilter(); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); Map permissionsMap = new HashMap<>(); @@ -491,9 +491,9 @@ public void testGetFieldFilterSecurityEnabled() throws Exception { assertThat(fieldFilter.apply("index_granted"), trueWith("field_granted")); assertThat(fieldFilter.apply("index_granted"), falseWith(randomAlphaOfLengthBetween(3, 10))); - assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("index_granted_all_permissions")); + assertEquals(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("index_granted_all_permissions")); assertThat(fieldFilter.apply("index_granted_all_permissions"), trueWith(randomAlphaOfLengthBetween(3, 10))); - assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("index_other")); + assertEquals(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("index_other")); } public void testGetFieldFilterSecurityDisabled() throws Exception { @@ -503,7 +503,7 @@ public void testGetFieldFilterSecurityDisabled() throws Exception { public void testGetFieldFilterSecurityEnabledLicenseNoFLS() throws Exception { createComponents(Settings.EMPTY); - Function> fieldFilter = security.getFieldFilter(); + Function fieldFilter = security.getFieldFilter(); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); licenseState.update( new XPackLicenseStatus( @@ -513,7 +513,7 @@ public void testGetFieldFilterSecurityEnabledLicenseNoFLS() throws Exception { ) ); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); - assertSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply(randomAlphaOfLengthBetween(3, 6))); + assertSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply(randomAlphaOfLengthBetween(3, 6))); } public void testValidateRealmsWhenSettingsAreInvalid() { From d09dce277eb271c68d1afedd76f44e498dc04ba7 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Tue, 26 Mar 2024 17:20:42 +0100 Subject: [PATCH 67/79] [Inference API] Move organization constant to OpenAiServiceFields and use constants. (#106772) --- .../services/openai/OpenAiServiceFields.java | 2 ++ .../OpenAiChatCompletionServiceSettings.java | 3 +-- .../OpenAiChatCompletionTaskSettings.java | 3 +-- .../OpenAiEmbeddingsRequestTaskSettings.java | 2 +- .../OpenAiEmbeddingsServiceSettings.java | 2 +- .../OpenAiEmbeddingsTaskSettings.java | 2 +- ...ChatCompletionRequestTaskSettingsTests.java | 7 +++---- ...enAiChatCompletionServiceSettingsTests.java | 9 +++++---- .../OpenAiChatCompletionTaskSettingsTests.java | 13 ++++++------- ...enAiEmbeddingsRequestTaskSettingsTests.java | 5 +++-- .../OpenAiEmbeddingsServiceSettingsTests.java | 17 +++++++++-------- .../OpenAiEmbeddingsTaskSettingsTests.java | 18 +++++++----------- 12 files changed, 40 insertions(+), 43 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java index 1e2353f901705..bafe1b031b028 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java @@ -11,4 +11,6 @@ public class OpenAiServiceFields { public static final String USER = "user"; + public static final String ORGANIZATION = "organization_id"; + } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java index 0150d75b7037e..16b0ed5d47039 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java @@ -31,6 +31,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.ORGANIZATION; /** * Defines the service settings for interacting with OpenAI's chat completion models. @@ -39,8 +40,6 @@ public class OpenAiChatCompletionServiceSettings implements ServiceSettings { public static final String NAME = "openai_completion_service_settings"; - static final String ORGANIZATION = "organization_id"; - public static OpenAiChatCompletionServiceSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java index fb10d959087de..2d5a407f3c1a6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java @@ -22,13 +22,12 @@ import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; public class OpenAiChatCompletionTaskSettings implements TaskSettings { public static final String NAME = "openai_completion_task_settings"; - public static final String USER = "user"; - public static OpenAiChatCompletionTaskSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java index 5bdb0d7542a83..373704af37fcd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java @@ -16,7 +16,7 @@ import java.util.Map; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; -import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings.USER; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; /** * This class handles extracting OpenAI task settings from a request. The difference between this class and diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java index 34713ff2b7208..01aa4f51799fb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java @@ -37,6 +37,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.ORGANIZATION; /** * Defines the service settings for interacting with OpenAI's text embedding models. @@ -45,7 +46,6 @@ public class OpenAiEmbeddingsServiceSettings implements ServiceSettings { public static final String NAME = "openai_service_settings"; - static final String ORGANIZATION = "organization_id"; static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user"; public static OpenAiEmbeddingsServiceSettings fromMap(Map map, ConfigurationParseContext context) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java index a7b11487ca72f..e306f2d3d2928 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java @@ -23,6 +23,7 @@ import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; /** * Defines the task settings for the openai service. @@ -33,7 +34,6 @@ public class OpenAiEmbeddingsTaskSettings implements TaskSettings { public static final String NAME = "openai_embeddings_task_settings"; - public static final String USER = "user"; public static OpenAiEmbeddingsTaskSettings fromMap(Map map, ConfigurationParseContext context) { ValidationException validationException = new ValidationException(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java index 24632e120f94b..6fbdd3bf622d3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.util.HashMap; import java.util.Map; @@ -28,9 +29,7 @@ public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() } public void testFromMap_ReturnsUser() { - var settings = OpenAiChatCompletionRequestTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user")) - ); + var settings = OpenAiChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); assertThat(settings.user(), is("user")); } @@ -38,7 +37,7 @@ public static Map getChatCompletionRequestTaskSettingsMap(@Nulla var map = new HashMap(); if (user != null) { - map.put(OpenAiChatCompletionTaskSettings.USER, user); + map.put(OpenAiServiceFields.USER, user); } return map; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java index 8778b2f13e746..ba2460f7bc09a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.io.IOException; import java.net.URI; @@ -40,7 +41,7 @@ public void testFromMap_Request_CreatesSettingsCorrectly() { modelId, ServiceFields.URL, url, - OpenAiChatCompletionServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.MAX_INPUT_TOKENS, maxInputTokens @@ -61,7 +62,7 @@ public void testFromMap_MissingUrl_DoesNotThrowException() { Map.of( ServiceFields.MODEL_ID, modelId, - OpenAiChatCompletionServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, organization, ServiceFields.MAX_INPUT_TOKENS, maxInputTokens @@ -109,7 +110,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { var thrownException = expectThrows( ValidationException.class, () -> OpenAiChatCompletionServiceSettings.fromMap( - new HashMap<>(Map.of(OpenAiChatCompletionServiceSettings.ORGANIZATION, "", ServiceFields.MODEL_ID, "model")) + new HashMap<>(Map.of(OpenAiServiceFields.ORGANIZATION, "", ServiceFields.MODEL_ID, "model")) ) ); @@ -118,7 +119,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { containsString( org.elasticsearch.common.Strings.format( "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", - OpenAiChatCompletionServiceSettings.ORGANIZATION + OpenAiServiceFields.ORGANIZATION ) ) ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java index 66a9ec371eb93..f2bd26a4e6432 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.io.IOException; import java.util.HashMap; @@ -27,14 +28,14 @@ public static OpenAiChatCompletionTaskSettings createRandomWithUser() { public void testFromMap_WithUser() { assertEquals( new OpenAiChatCompletionTaskSettings("user"), - OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))) + OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))) ); } public void testFromMap_UserIsEmptyString() { var thrownException = expectThrows( ValidationException.class, - () -> OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, ""))) + () -> OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, ""))) ); assertThat( @@ -49,7 +50,7 @@ public void testFromMap_MissingUser_DoesNotThrowException() { } public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { - var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))); + var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); var overriddenTaskSettings = OpenAiChatCompletionTaskSettings.of( taskSettings, @@ -59,11 +60,9 @@ public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { } public void testOverrideWith_UsesOverriddenSettings() { - var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))); + var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); - var requestTaskSettings = OpenAiChatCompletionRequestTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user2")) - ); + var requestTaskSettings = OpenAiChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user2"))); var overriddenTaskSettings = OpenAiChatCompletionTaskSettings.of(taskSettings, requestTaskSettings); assertThat(overriddenTaskSettings, is(new OpenAiChatCompletionTaskSettings("user2"))); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java index 5a39fcb61ff0a..c95853e2d0128 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.util.HashMap; import java.util.Map; @@ -27,7 +28,7 @@ public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() } public void testFromMap_ReturnsUser() { - var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user"))); + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); assertThat(settings.user(), is("user")); } @@ -35,7 +36,7 @@ public static Map getRequestTaskSettingsMap(@Nullable String use var map = new HashMap(); if (user != null) { - map.put(OpenAiEmbeddingsTaskSettings.USER, user); + map.put(OpenAiServiceFields.USER, user); } return map; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java index 00cea6dc6ed21..e37318a0c96d4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import org.hamcrest.CoreMatchers; import java.io.IOException; @@ -79,7 +80,7 @@ public void testFromMap_Request_CreatesSettingsCorrectly() { modelId, ServiceFields.URL, url, - OpenAiEmbeddingsServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.SIMILARITY, similarity, @@ -121,7 +122,7 @@ public void testFromMap_Request_DimensionsSetByUser_IsFalse_WhenDimensionsAreNot modelId, ServiceFields.URL, url, - OpenAiEmbeddingsServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.SIMILARITY, similarity, @@ -162,7 +163,7 @@ public void testFromMap_Persistent_CreatesSettingsCorrectly() { modelId, ServiceFields.URL, url, - OpenAiEmbeddingsServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.SIMILARITY, similarity, @@ -219,7 +220,7 @@ public void testFromMap_PersistentContext_ThrowsException_WhenDimensionsSetByUse public void testFromMap_MissingUrl_DoesNotThrowException() { var serviceSettings = OpenAiEmbeddingsServiceSettings.fromMap( - new HashMap<>(Map.of(ServiceFields.MODEL_ID, "m", OpenAiEmbeddingsServiceSettings.ORGANIZATION, "org")), + new HashMap<>(Map.of(ServiceFields.MODEL_ID, "m", OpenAiServiceFields.ORGANIZATION, "org")), ConfigurationParseContext.REQUEST ); assertNull(serviceSettings.uri()); @@ -260,7 +261,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { var thrownException = expectThrows( ValidationException.class, () -> OpenAiEmbeddingsServiceSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsServiceSettings.ORGANIZATION, "", ServiceFields.MODEL_ID, "m")), + new HashMap<>(Map.of(OpenAiServiceFields.ORGANIZATION, "", ServiceFields.MODEL_ID, "m")), ConfigurationParseContext.REQUEST ) ); @@ -270,7 +271,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { containsString( Strings.format( "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", - OpenAiEmbeddingsServiceSettings.ORGANIZATION + OpenAiServiceFields.ORGANIZATION ) ) ); @@ -375,7 +376,7 @@ public static Map getServiceSettingsMap(String modelId, @Nullabl } if (org != null) { - map.put(OpenAiEmbeddingsServiceSettings.ORGANIZATION, org); + map.put(OpenAiServiceFields.ORGANIZATION, org); } return map; } @@ -395,7 +396,7 @@ public static Map getServiceSettingsMap( } if (org != null) { - map.put(OpenAiEmbeddingsServiceSettings.ORGANIZATION, org); + map.put(OpenAiServiceFields.ORGANIZATION, org); } if (dimensions != null) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java index 6448b66d11cf3..c5a510ef9de0c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import org.hamcrest.MatcherAssert; import java.io.IOException; @@ -38,10 +39,7 @@ public static OpenAiEmbeddingsTaskSettings createRandom() { public void testFromMap_WithUser() { assertEquals( new OpenAiEmbeddingsTaskSettings("user"), - OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), - ConfigurationParseContext.REQUEST - ) + OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user")), ConfigurationParseContext.REQUEST) ); } @@ -49,7 +47,7 @@ public void testFromMap_UserIsEmptyString() { var thrownException = expectThrows( ValidationException.class, () -> OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "")), + new HashMap<>(Map.of(OpenAiServiceFields.USER, "")), ConfigurationParseContext.REQUEST ) ); @@ -67,7 +65,7 @@ public void testFromMap_MissingUser_DoesNotThrowException() { public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), + new HashMap<>(Map.of(OpenAiServiceFields.USER, "user")), ConfigurationParseContext.PERSISTENT ); @@ -77,13 +75,11 @@ public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { public void testOverrideWith_UsesOverriddenSettings() { var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), + new HashMap<>(Map.of(OpenAiServiceFields.USER, "user")), ConfigurationParseContext.PERSISTENT ); - var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user2")) - ); + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user2"))); var overriddenTaskSettings = OpenAiEmbeddingsTaskSettings.of(taskSettings, requestTaskSettings); MatcherAssert.assertThat(overriddenTaskSettings, is(new OpenAiEmbeddingsTaskSettings("user2"))); @@ -108,7 +104,7 @@ public static Map getTaskSettingsMap(@Nullable String user) { var map = new HashMap(); if (user != null) { - map.put(OpenAiEmbeddingsTaskSettings.USER, user); + map.put(OpenAiServiceFields.USER, user); } return map; From 59354e35e1606b2fdd25c42077923382de5b2510 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Tue, 26 Mar 2024 17:39:23 +0100 Subject: [PATCH 68/79] [Inference API] Add XContentUtilsTests and java docs to XContentUtils (#106770) --- .../external/response/XContentUtils.java | 7 ++ .../external/response/XContentUtilsTests.java | 86 +++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java index 4f4091873fba9..3511cbda1841b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java @@ -15,6 +15,13 @@ public class XContentUtils { + /** + * Moves to the first valid token, which is non-null. + * Does not move, if the parser is already positioned at a valid token. + * + * @param parser parser to move + * @throws IOException if underlying parser methods throw + */ public static void moveToFirstToken(XContentParser parser) throws IOException { if (parser.currentToken() == null) { parser.nextToken(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java new file mode 100644 index 0000000000000..c8de0371ab196 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Locale; + +public class XContentUtilsTests extends ESTestCase { + + public void testMoveToFirstToken() throws IOException { + var json = """ + { + "key": "value" + } + """; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + assertNull(parser.currentToken()); + + XContentUtils.moveToFirstToken(parser); + + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + } + } + + public void testMoveToFirstToken_DoesNotMoveIfAlreadyAtAToken() throws IOException { + var json = """ + { + "key": "value" + } + """; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + // position at a valid token + parser.nextToken(); + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + + XContentUtils.moveToFirstToken(parser); + + // still at the beginning of the object + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + } + } + + public void testPositionParserAtTokenAfterField() throws IOException { + var json = """ + { + "key": "value" + } + """; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + XContentUtils.positionParserAtTokenAfterField(parser, "key", "some error"); + + assertEquals("value", parser.text()); + } + } + + public void testPositionParserAtTokenAfterField_ThrowsIfFieldIsMissing() throws IOException { + var json = """ + { + "key": "value" + } + """; + var errorFormat = "Error: %s"; + var missingField = "missing field"; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + var exception = expectThrows( + IllegalStateException.class, + () -> XContentUtils.positionParserAtTokenAfterField(parser, missingField, errorFormat) + ); + + assertEquals(String.format(Locale.ROOT, errorFormat, missingField), exception.getMessage()); + } + } +} From cdb2e586403f0c2d84ad7931f49cb78f999ae3e8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 26 Mar 2024 09:45:19 -0700 Subject: [PATCH 69/79] Track memory in rate aggregation function (#106730) We should track the memory usage of the individual state in the rate aggregation function. Relates #106703 --- .../gen/GroupingAggregatorImplementer.java | 2 +- .../aggregation/RateDoubleAggregator.java | 37 ++++++++++++++----- .../aggregation/RateIntAggregator.java | 37 ++++++++++++++----- .../aggregation/RateLongAggregator.java | 37 ++++++++++++++----- .../RateDoubleGroupingAggregatorFunction.java | 2 +- .../RateIntGroupingAggregatorFunction.java | 2 +- .../RateLongGroupingAggregatorFunction.java | 2 +- .../aggregation/X-RateAggregator.java.st | 37 ++++++++++++++----- 8 files changed, 112 insertions(+), 44 deletions(-) diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index 1be01f445691d..cb65d2337d588 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -100,7 +100,7 @@ public GroupingAggregatorImplementer( this.createParameters = init.getParameters() .stream() .map(Parameter::from) - .filter(f -> false == f.type().equals(BIG_ARRAYS)) + .filter(f -> false == f.type().equals(BIG_ARRAYS) && false == f.type().equals(DRIVER_CONTEXT)) .collect(Collectors.toList()); this.implementation = ClassName.get( diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java index 016bf9387ca4b..a560eee4555e2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -35,9 +36,9 @@ @IntermediateState(name = "resets", type = "DOUBLE") } ) public class RateDoubleAggregator { - public static DoubleRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new DoubleRateGroupingState(bigArrays, unitInMillis); + + public static DoubleRateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new DoubleRateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine(DoubleRateGroupingState current, int groupId, long timestamp, double value) { @@ -68,7 +69,7 @@ public static Block evaluateFinal(DoubleRateGroupingState state, IntVector selec return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class DoubleRateState implements Accountable { + private static class DoubleRateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(DoubleRateState.class); final long[] timestamps; // descending order final double[] values; @@ -101,9 +102,10 @@ int entries() { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Double.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -111,9 +113,12 @@ public static final class DoubleRateGroupingState implements Releasable, Account private ObjectArray states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - DoubleRateGroupingState(BigArrays bigArrays, long unitInMillis) { + DoubleRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -122,16 +127,25 @@ void ensureCapacity(int groupId) { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, double value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(DoubleRateState.bytesUsed(1)); state = new DoubleRateState(new long[] { timestamp }, new double[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker(DoubleRateState.bytesUsed(2)); state = new DoubleRateState(new long[] { state.timestamps[0], timestamp }, new double[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-DoubleRateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -147,6 +161,7 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, double reset ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(DoubleRateState.bytesUsed(valueCount)); state = new DoubleRateState(valueCount); states.set(groupId, state); // TODO: add bulk_copy to Block @@ -155,9 +170,11 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, double reset state.values[i] = values.getDouble(firstIndex + i); } } else { + adjustBreaker(DoubleRateState.bytesUsed(state.entries() + valueCount)); var newState = new DoubleRateState(state.entries() + valueCount); states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-DoubleRateState.bytesUsed(state.entries())); // old state } state.reset += reset; } @@ -193,12 +210,12 @@ void merge(DoubleRateState curr, DoubleRateState dst, int firstIndex, int rightC @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java index fbf43f7d72c46..8a536a42a2dbe 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -36,9 +37,9 @@ @IntermediateState(name = "resets", type = "DOUBLE") } ) public class RateIntAggregator { - public static IntRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new IntRateGroupingState(bigArrays, unitInMillis); + + public static IntRateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new IntRateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine(IntRateGroupingState current, int groupId, long timestamp, int value) { @@ -69,7 +70,7 @@ public static Block evaluateFinal(IntRateGroupingState state, IntVector selected return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class IntRateState implements Accountable { + private static class IntRateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(IntRateState.class); final long[] timestamps; // descending order final int[] values; @@ -102,9 +103,10 @@ int entries() { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Integer.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -112,9 +114,12 @@ public static final class IntRateGroupingState implements Releasable, Accountabl private ObjectArray states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - IntRateGroupingState(BigArrays bigArrays, long unitInMillis) { + IntRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -123,16 +128,25 @@ void ensureCapacity(int groupId) { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, int value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(IntRateState.bytesUsed(1)); state = new IntRateState(new long[] { timestamp }, new int[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker(IntRateState.bytesUsed(2)); state = new IntRateState(new long[] { state.timestamps[0], timestamp }, new int[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-IntRateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -148,6 +162,7 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, double reset, i ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(IntRateState.bytesUsed(valueCount)); state = new IntRateState(valueCount); states.set(groupId, state); // TODO: add bulk_copy to Block @@ -156,9 +171,11 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, double reset, i state.values[i] = values.getInt(firstIndex + i); } } else { + adjustBreaker(IntRateState.bytesUsed(state.entries() + valueCount)); var newState = new IntRateState(state.entries() + valueCount); states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-IntRateState.bytesUsed(state.entries())); // old state } state.reset += reset; } @@ -194,12 +211,12 @@ void merge(IntRateState curr, IntRateState dst, int firstIndex, int rightCount, @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java index b5d0dfc8aabdb..eed95ab602db8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -35,9 +36,9 @@ @IntermediateState(name = "resets", type = "DOUBLE") } ) public class RateLongAggregator { - public static LongRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new LongRateGroupingState(bigArrays, unitInMillis); + + public static LongRateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new LongRateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine(LongRateGroupingState current, int groupId, long timestamp, long value) { @@ -68,7 +69,7 @@ public static Block evaluateFinal(LongRateGroupingState state, IntVector selecte return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class LongRateState implements Accountable { + private static class LongRateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(LongRateState.class); final long[] timestamps; // descending order final long[] values; @@ -101,9 +102,10 @@ int entries() { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -111,9 +113,12 @@ public static final class LongRateGroupingState implements Releasable, Accountab private ObjectArray states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - LongRateGroupingState(BigArrays bigArrays, long unitInMillis) { + LongRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -122,16 +127,25 @@ void ensureCapacity(int groupId) { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, long value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(LongRateState.bytesUsed(1)); state = new LongRateState(new long[] { timestamp }, new long[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker(LongRateState.bytesUsed(2)); state = new LongRateState(new long[] { state.timestamps[0], timestamp }, new long[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-LongRateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -147,6 +161,7 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, double reset, ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(LongRateState.bytesUsed(valueCount)); state = new LongRateState(valueCount); states.set(groupId, state); // TODO: add bulk_copy to Block @@ -155,9 +170,11 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, double reset, state.values[i] = values.getLong(firstIndex + i); } } else { + adjustBreaker(LongRateState.bytesUsed(state.entries() + valueCount)); var newState = new LongRateState(state.entries() + valueCount); states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-LongRateState.bytesUsed(state.entries())); // old state } state.reset += reset; } @@ -193,12 +210,12 @@ void merge(LongRateState curr, LongRateState dst, int firstIndex, int rightCount @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java index 608221614c483..8d9e011891e95 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java @@ -49,7 +49,7 @@ public RateDoubleGroupingAggregatorFunction(List channels, public static RateDoubleGroupingAggregatorFunction create(List channels, DriverContext driverContext, long unitInMillis) { - return new RateDoubleGroupingAggregatorFunction(channels, RateDoubleAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + return new RateDoubleGroupingAggregatorFunction(channels, RateDoubleAggregator.initGrouping(driverContext, unitInMillis), driverContext, unitInMillis); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java index df954d92a6d2a..6bd4b833dc9e6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java @@ -49,7 +49,7 @@ public RateIntGroupingAggregatorFunction(List channels, public static RateIntGroupingAggregatorFunction create(List channels, DriverContext driverContext, long unitInMillis) { - return new RateIntGroupingAggregatorFunction(channels, RateIntAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + return new RateIntGroupingAggregatorFunction(channels, RateIntAggregator.initGrouping(driverContext, unitInMillis), driverContext, unitInMillis); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java index fb536465ed973..27318d6496737 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java @@ -49,7 +49,7 @@ public RateLongGroupingAggregatorFunction(List channels, public static RateLongGroupingAggregatorFunction create(List channels, DriverContext driverContext, long unitInMillis) { - return new RateLongGroupingAggregatorFunction(channels, RateLongAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + return new RateLongGroupingAggregatorFunction(channels, RateLongAggregator.initGrouping(driverContext, unitInMillis), driverContext, unitInMillis); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st index 9ace663fec990..86f5e058bd19c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st @@ -9,6 +9,7 @@ package org.elasticsearch.compute.aggregation; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -38,9 +39,9 @@ import org.elasticsearch.core.Releasables; @IntermediateState(name = "resets", type = "DOUBLE") } ) public class Rate$Type$Aggregator { - public static $Type$RateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new $Type$RateGroupingState(bigArrays, unitInMillis); + + public static $Type$RateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new $Type$RateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine($Type$RateGroupingState current, int groupId, long timestamp, $type$ value) { @@ -71,7 +72,7 @@ public class Rate$Type$Aggregator { return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class $Type$RateState implements Accountable { + private static class $Type$RateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$RateState.class); final long[] timestamps; // descending order final $type$[] values; @@ -104,9 +105,10 @@ public class Rate$Type$Aggregator { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) $BYTES$ * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -114,9 +116,12 @@ public class Rate$Type$Aggregator { private ObjectArray<$Type$RateState> states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - $Type$RateGroupingState(BigArrays bigArrays, long unitInMillis) { + $Type$RateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -125,16 +130,25 @@ public class Rate$Type$Aggregator { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, $type$ value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker($Type$RateState.bytesUsed(1)); state = new $Type$RateState(new long[] { timestamp }, new $type$[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker($Type$RateState.bytesUsed(2)); state = new $Type$RateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-$Type$RateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -150,6 +164,7 @@ public class Rate$Type$Aggregator { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker($Type$RateState.bytesUsed(valueCount)); state = new $Type$RateState(valueCount); states.set(groupId, state); // TODO: add bulk_copy to Block @@ -158,9 +173,11 @@ public class Rate$Type$Aggregator { state.values[i] = values.get$Type$(firstIndex + i); } } else { + adjustBreaker($Type$RateState.bytesUsed(state.entries() + valueCount)); var newState = new $Type$RateState(state.entries() + valueCount); states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-$Type$RateState.bytesUsed(state.entries())); // old state } state.reset += reset; } @@ -196,12 +213,12 @@ public class Rate$Type$Aggregator { @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override From 69bf2be9f3251a5896e8c57f16cc3931cce93d72 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:03:24 -0400 Subject: [PATCH 70/79] Remove 8.12 from branches.json --- branches.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/branches.json b/branches.json index dc72956c13f80..772693505b9e0 100644 --- a/branches.json +++ b/branches.json @@ -7,9 +7,6 @@ { "branch": "8.13" }, - { - "branch": "8.12" - }, { "branch": "7.17" } From 0b3382cd240e0afd01d27a8740b57c3d2771a697 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 26 Mar 2024 11:00:38 -0700 Subject: [PATCH 71/79] Support ordinals grouping for rate aggregation (#106735) Add support for ordinal grouping in the rate aggregation function. Relates #106703 --- .../aggregation/RateDoubleAggregator.java | 54 ++++++++++++- .../aggregation/RateIntAggregator.java | 54 ++++++++++++- .../aggregation/RateLongAggregator.java | 54 ++++++++++++- .../aggregation/X-RateAggregator.java.st | 54 ++++++++++++- ...TimeSeriesSortedSourceOperatorFactory.java | 6 +- .../TimeSeriesSortedSourceOperatorTests.java | 80 +++++++++++++------ 6 files changed, 258 insertions(+), 44 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java index a560eee4555e2..2dc5b441ca00d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java @@ -24,6 +24,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for double. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -59,10 +61,10 @@ public static void combineIntermediate( public static void combineStates( DoubleRateGroupingState current, int currentGroupId, // make the stylecheck happy - DoubleRateGroupingState state, - int statePosition + DoubleRateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal(DoubleRateGroupingState state, IntVector selected, DriverContext driverContext) { @@ -163,6 +165,7 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, double reset if (state == null) { adjustBreaker(DoubleRateState.bytesUsed(valueCount)); state = new DoubleRateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -172,11 +175,11 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, double reset } else { adjustBreaker(DoubleRateState.bytesUsed(state.entries() + valueCount)); var newState = new DoubleRateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); adjustBreaker(-DoubleRateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge(DoubleRateState curr, DoubleRateState dst, int firstIndex, int rightCount, LongBlock timestamps, DoubleBlock values) { @@ -208,6 +211,49 @@ void merge(DoubleRateState curr, DoubleRateState dst, int firstIndex, int rightC } } + void combineState(int groupId, DoubleRateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker(DoubleRateState.bytesUsed(len)); + curr = new DoubleRateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + DoubleRateState mergeState(DoubleRateState s1, DoubleRateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker(DoubleRateState.bytesUsed(newLen)); + var dst = new DoubleRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { return states.ramBytesUsed() + stateBytes; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java index 8a536a42a2dbe..1ba8b9264c24a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java @@ -25,6 +25,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for int. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -60,10 +62,10 @@ public static void combineIntermediate( public static void combineStates( IntRateGroupingState current, int currentGroupId, // make the stylecheck happy - IntRateGroupingState state, - int statePosition + IntRateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal(IntRateGroupingState state, IntVector selected, DriverContext driverContext) { @@ -164,6 +166,7 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, double reset, i if (state == null) { adjustBreaker(IntRateState.bytesUsed(valueCount)); state = new IntRateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -173,11 +176,11 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, double reset, i } else { adjustBreaker(IntRateState.bytesUsed(state.entries() + valueCount)); var newState = new IntRateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); adjustBreaker(-IntRateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge(IntRateState curr, IntRateState dst, int firstIndex, int rightCount, LongBlock timestamps, IntBlock values) { @@ -209,6 +212,49 @@ void merge(IntRateState curr, IntRateState dst, int firstIndex, int rightCount, } } + void combineState(int groupId, IntRateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker(IntRateState.bytesUsed(len)); + curr = new IntRateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + IntRateState mergeState(IntRateState s1, IntRateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker(IntRateState.bytesUsed(newLen)); + var dst = new IntRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { return states.ramBytesUsed() + stateBytes; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java index eed95ab602db8..846c6f0cc2730 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java @@ -24,6 +24,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for long. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -59,10 +61,10 @@ public static void combineIntermediate( public static void combineStates( LongRateGroupingState current, int currentGroupId, // make the stylecheck happy - LongRateGroupingState state, - int statePosition + LongRateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal(LongRateGroupingState state, IntVector selected, DriverContext driverContext) { @@ -163,6 +165,7 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, double reset, if (state == null) { adjustBreaker(LongRateState.bytesUsed(valueCount)); state = new LongRateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -172,11 +175,11 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, double reset, } else { adjustBreaker(LongRateState.bytesUsed(state.entries() + valueCount)); var newState = new LongRateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); adjustBreaker(-LongRateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge(LongRateState curr, LongRateState dst, int firstIndex, int rightCount, LongBlock timestamps, LongBlock values) { @@ -208,6 +211,49 @@ void merge(LongRateState curr, LongRateState dst, int firstIndex, int rightCount } } + void combineState(int groupId, LongRateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker(LongRateState.bytesUsed(len)); + curr = new LongRateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + LongRateState mergeState(LongRateState s1, LongRateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker(LongRateState.bytesUsed(newLen)); + var dst = new LongRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { return states.ramBytesUsed() + stateBytes; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st index 86f5e058bd19c..ad305809c6651 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st @@ -27,6 +27,8 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for $type$. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -62,10 +64,10 @@ public class Rate$Type$Aggregator { public static void combineStates( $Type$RateGroupingState current, int currentGroupId, // make the stylecheck happy - $Type$RateGroupingState state, - int statePosition + $Type$RateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal($Type$RateGroupingState state, IntVector selected, DriverContext driverContext) { @@ -166,6 +168,7 @@ public class Rate$Type$Aggregator { if (state == null) { adjustBreaker($Type$RateState.bytesUsed(valueCount)); state = new $Type$RateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -175,11 +178,11 @@ public class Rate$Type$Aggregator { } else { adjustBreaker($Type$RateState.bytesUsed(state.entries() + valueCount)); var newState = new $Type$RateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); adjustBreaker(-$Type$RateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge($Type$RateState curr, $Type$RateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { @@ -211,6 +214,49 @@ public class Rate$Type$Aggregator { } } + void combineState(int groupId, $Type$RateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker($Type$RateState.bytesUsed(len)); + curr = new $Type$RateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + $Type$RateState mergeState($Type$RateState s1, $Type$RateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker($Type$RateState.bytesUsed(newLen)); + var dst = new $Type$RateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { return states.ramBytesUsed() + stateBytes; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java index ad884538ac85f..855066fcb9da5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -143,14 +143,11 @@ public Page getOutput() { } iterator.consume(); shard = blockFactory.newConstantIntBlockWith(iterator.slice.shardContext().index(), currentPagePos); - boolean singleSegmentNonDecreasing; if (iterator.slice.numLeaves() == 1) { - singleSegmentNonDecreasing = true; int segmentOrd = iterator.slice.getLeaf(0).leafReaderContext().ord; leaf = blockFactory.newConstantIntBlockWith(segmentOrd, currentPagePos).asVector(); } else { // Due to the multi segment nature of time series source operator singleSegmentNonDecreasing must be false - singleSegmentNonDecreasing = false; leaf = segmentsBuilder.build(); segmentsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); } @@ -161,10 +158,9 @@ public Page getOutput() { timestampIntervalBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); tsids = tsOrdBuilder.build(); tsOrdBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); - page = new Page( currentPagePos, - new DocVector(shard.asVector(), leaf, docs, singleSegmentNonDecreasing).asBlock(), + new DocVector(shard.asVector(), leaf, docs, leaf.isConstant()).asBlock(), tsids.asBlock(), timestampIntervals.asBlock() ); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index 16340909a4fd3..b397d36837d01 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -43,14 +43,17 @@ import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.BlockDocValuesReader; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.junit.After; @@ -285,17 +288,6 @@ record Doc(String pod, long timestamp, long requests) { return docs.size(); }); var ctx = driverContext(); - HashAggregationOperator initialHash = new HashAggregationOperator( - List.of(new RateLongAggregatorFunctionSupplier(List.of(4, 2), unitInMillis).groupingAggregatorFactory(AggregatorMode.INITIAL)), - () -> BlockHash.build( - List.of(new HashAggregationOperator.GroupSpec(3, ElementType.BYTES_REF)), - ctx.blockFactory(), - randomIntBetween(1, 1000), - randomBoolean() - ), - ctx - ); - HashAggregationOperator finalHash = new HashAggregationOperator( List.of(new RateLongAggregatorFunctionSupplier(List.of(1, 2, 3), unitInMillis).groupingAggregatorFactory(AggregatorMode.FINAL)), () -> BlockHash.build( @@ -309,20 +301,62 @@ record Doc(String pod, long timestamp, long requests) { List results = new ArrayList<>(); var requestsField = new NumberFieldMapper.NumberFieldType("requests", NumberFieldMapper.NumberType.LONG); var podField = new KeywordFieldMapper.KeywordFieldType("pod"); - OperatorTestCase.runDriver( - new Driver( - ctx, - sourceOperatorFactory.get(ctx), + if (randomBoolean()) { + HashAggregationOperator initialHash = new HashAggregationOperator( List.of( - ValuesSourceReaderOperatorTests.factory(reader, podField, ElementType.BYTES_REF).get(ctx), - ValuesSourceReaderOperatorTests.factory(reader, requestsField, ElementType.LONG).get(ctx), - initialHash, - finalHash + new RateLongAggregatorFunctionSupplier(List.of(4, 2), unitInMillis).groupingAggregatorFactory(AggregatorMode.INITIAL) ), - new TestResultPageSinkOperator(results::add), - () -> {} - ) - ); + () -> BlockHash.build( + List.of(new HashAggregationOperator.GroupSpec(3, ElementType.BYTES_REF)), + ctx.blockFactory(), + randomIntBetween(1, 1000), + randomBoolean() + ), + ctx + ); + OperatorTestCase.runDriver( + new Driver( + ctx, + sourceOperatorFactory.get(ctx), + List.of( + ValuesSourceReaderOperatorTests.factory(reader, podField, ElementType.BYTES_REF).get(ctx), + ValuesSourceReaderOperatorTests.factory(reader, requestsField, ElementType.LONG).get(ctx), + initialHash, + finalHash + ), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + } else { + var blockLoader = new BlockDocValuesReader.BytesRefsFromOrdsBlockLoader("pod"); + var shardContext = new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE); + var ordinalGrouping = new OrdinalsGroupingOperator( + shardIdx -> blockLoader, + List.of(shardContext), + ElementType.BYTES_REF, + 0, + "pod", + List.of( + new RateLongAggregatorFunctionSupplier(List.of(3, 2), unitInMillis).groupingAggregatorFactory(AggregatorMode.INITIAL) + ), + randomIntBetween(1, 1000), + ctx + ); + OperatorTestCase.runDriver( + new Driver( + ctx, + sourceOperatorFactory.get(ctx), + List.of( + ValuesSourceReaderOperatorTests.factory(reader, requestsField, ElementType.LONG).get(ctx), + ordinalGrouping, + finalHash + ), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + } Map rates = new HashMap<>(); for (Page result : results) { BytesRefBlock keysBlock = result.getBlock(0); From 1c60a8dd8f0db70770642cb60cfb2bf1b573a980 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 26 Mar 2024 15:21:17 -0400 Subject: [PATCH 72/79] Forward port release notes for v8.13.0 (#106783) --- .../reference/migration/migrate_8_13.asciidoc | 116 ++++- docs/reference/release-notes/8.13.0.asciidoc | 439 +++++++++++++++++- .../release-notes/highlights.asciidoc | 10 + 3 files changed, 562 insertions(+), 3 deletions(-) diff --git a/docs/reference/migration/migrate_8_13.asciidoc b/docs/reference/migration/migrate_8_13.asciidoc index c2f431da388f1..c9e726d940b1d 100644 --- a/docs/reference/migration/migrate_8_13.asciidoc +++ b/docs/reference/migration/migrate_8_13.asciidoc @@ -16,5 +16,119 @@ coming::[8.13.0] [[breaking-changes-8.13]] === Breaking changes -There are no breaking changes in {es} 8.13. +The following changes in {es} 8.13 might affect your applications +and prevent them from operating normally. +Before upgrading to 8.13, review these changes and take the described steps +to mitigate the impact. + + +There are no notable breaking changes in {es} 8.13. +But there are some less critical breaking changes. + +[discrete] +[[breaking_813_index_setting_changes]] +==== Index setting changes + +[[change_index_look_ahead_time_index_settings_default_value_from_2_hours_to_30_minutes]] +.Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. +[%collapsible] +==== +*Details* + +Lower the `index.look_ahead_time` index setting's max value from 2 hours to 30 minutes. + +*Impact* + +Documents with @timestamp of 30 minutes or more in the future will be rejected. Before documents with @timestamp of 2 hours or more in the future were rejected. If the previous behaviour should be kept, then update the `index.look_ahead_time` setting to two hours before performing the upgrade. +==== + +[[lower_look_ahead_time_index_settings_max_value]] +.Lower the `look_ahead_time` index setting's max value +[%collapsible] +==== +*Details* + +Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. + +*Impact* + +Any value between 2 hours and 7 days will be as a look ahead time of 2 hours is defined +==== + +[discrete] +[[breaking_813_rest_api_changes]] +==== REST API changes + +[[esql_grammar_from_metadata_no_longer_requires]] +.ESQL: Grammar - FROM METADATA no longer requires [] +[%collapsible] +==== +*Details* + +Remove [ ] for METADATA option inside FROM command statements + +*Impact* + +Previously to return metadata fields, one had to use square brackets: (eg. 'FROM index [METADATA _index]'). This is no longer needed: the [ ] are dropped and do not have to be specified, thus simplifying the command above to:'FROM index METADATA _index'. +==== + +[[es_ql_remove_project_keyword_from_grammar]] +.ES|QL: remove PROJECT keyword from the grammar +[%collapsible] +==== +*Details* + +Removes the PROJECT keyword (an alias for KEEP) from ES|QL grammar + +*Impact* + +Before this change, users could use PROJECT as an alias for KEEP in ESQL queries, (eg. 'FROM idx | PROJECT name, surname') the parser replaced PROJECT with KEEP, emitted a warning: 'PROJECT command is no longer supported, please use KEEP instead' and the query was executed normally. With this change, PROJECT command is no longer recognized by the query parser; queries using PROJECT command now return a parsing exception. +==== + +[[esql_remove_nan_finite_infinite]] +.[ESQL] Remove is_nan, is_finite, and `is_infinite` +[%collapsible] +==== +*Details* + +Removes the functions `is_nan`, `is_finite`, and `is_infinite`. + +*Impact* + +Attempting to use the above functions will now be a planner time error. These functions are no longer supported. +==== + + +[discrete] +[[deprecated-8.13]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.13 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.13. + +To find out if you are using any deprecated functionality, +enable <>. + +[discrete] +[[deprecations_813_cluster_and_node_setting]] +==== Cluster and node setting deprecations + +[[deprecate_client_type]] +.Deprecate `client.type` +[%collapsible] +==== +*Details* + +The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now deprecated and will be removed in a future release. + +*Impact* + +Remove the `client.type` setting from `elasticsearch.yml` +==== + +[discrete] +[[deprecations_813_rest_api]] +==== REST API deprecations + +[[desirednode_deprecate_node_version_field_make_it_optional_for_current_version]] +.`DesiredNode:` deprecate `node_version` field and make it optional for the current version +[%collapsible] +==== +*Details* + +The desired_node API includes a `node_version` field to perform validation on the new node version required. This kind of check is too broad, and it's better done by external logic, so it has been removed, making the `node_version` field not necessary. The field will be removed in a later version. + +*Impact* + +Users should update their usages of `desired_node` to not include the `node_version` field anymore. +==== diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc index 5b7d4f90f98de..2ef183374f167 100644 --- a/docs/reference/release-notes/8.13.0.asciidoc +++ b/docs/reference/release-notes/8.13.0.asciidoc @@ -1,8 +1,443 @@ [[release-notes-8.13.0]] == {es} version 8.13.0 -coming[8.13.0] - Also see <>. +[[breaking-8.13.0]] +[float] +=== Breaking changes + +ES|QL:: +* ESQL: Grammar - FROM METADATA no longer requires [] {es-pull}105221[#105221] +* ES|QL: remove PROJECT keyword from the grammar {es-pull}105064[#105064] +* [ESQL] Remove is_nan, is_finite, and `is_infinite` {es-pull}104091[#104091] + +TSDB:: +* Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. {es-pull}103898[#103898] +* Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. {es-pull}103434[#103434] + +[[bug-8.13.0]] +[float] +=== Bug fixes + +Aggregations:: +* Disable parallel collection for terms aggregation with `min_doc_count` equals to 0 {es-pull}106156[#106156] +* `GlobalOrdCardinalityAggregator` should use `HyperLogLogPlusPlus` instead of `HyperLogLogPlusPlusSparse` {es-pull}105546[#105546] + +Allocation:: +* Fix disk computation when initializing new shards {es-pull}102879[#102879] +* Fix disk computation when initializing unassigned shards in desired balance computation {es-pull}102207[#102207] + +Application:: +* Fix Search Applications bug where deleting an alias before deleting an application intermittently caused errors {es-pull}106329[#106329] +* Use search to determine if cluster contains data {es-pull}103920[#103920] +* [Connector API] Bugfix: support list type in filtering advenced snippet value {es-pull}105633[#105633] +* [Connector API] Fix default ordering in `SyncJob` list endpoint {es-pull}105945[#105945] +* [Connector API] Fix serialisation of script params in connector index service {es-pull}106060[#106060] + +Authentication:: +* Execute SAML authentication on the generic threadpool {es-pull}105232[#105232] (issue: {es-issue}104962[#104962]) + +Authorization:: +* Adjust interception of requests for specific shard IDs {es-pull}101656[#101656] + +Client:: +* Validate settings in `ReloadSecureSettings` API {es-pull}103176[#103176] + +Data streams:: +* Apm-data: fix `@custom` component templates {es-pull}104182[#104182] +* Avoid false-positive matches on intermediate objects in `ecs@mappings` {es-pull}105440[#105440] (issue: {es-issue}102794[#102794]) +* Execute lazy rollover with an internal dedicated user #104732 {es-pull}104905[#104905] (issue: {es-issue}104732[#104732]) +* Fix write index resolution when an alias is pointing to a TSDS {es-pull}104440[#104440] (issue: {es-issue}104189[#104189]) +* x-pack/plugin/core: add `match_mapping_type` to `ecs@mappings` dynamic templates {es-pull}103035[#103035] + +Distributed:: +* Fix logger Strings.format calls {es-pull}104573[#104573] +* Request indexing memory pressure in APM node metrics publisher {es-pull}103520[#103520] + +ES|QL:: +* ESQL: Add single value checks on LIKE/RLIKE pushdown {es-pull}103807[#103807] (issue: {es-issue}103806[#103806]) +* ESQL: Correct out-of-range filter pushdowns {es-pull}99961[#99961] (issue: {es-issue}99960[#99960]) +* ESQL: Fix Analyzer to not interpret escaped * as a pattern {es-pull}105325[#105325] (issue: {es-issue}104955[#104955]) +* ESQL: Fix a bug loading unindexed text fields {es-pull}104553[#104553] +* ESQL: Fix bug in grammar that allowed spaces inside id pattern {es-pull}105476[#105476] (issue: {es-issue}105441[#105441]) +* ESQL: Fix replacement of nested expressions in aggs with multiple parameters {es-pull}104718[#104718] (issue: {es-issue}104706[#104706]) +* ESQL: Fix wrong attribute shadowing in pushdown rules {es-pull}105650[#105650] (issue: {es-issue}105434[#105434]) +* ESQL: Improve pushdown of certain filters {es-pull}103538[#103538] (issue: {es-issue}103536[#103536]) +* ESQL: allow `null` in date math {es-pull}103610[#103610] (issue: {es-issue}103085[#103085]) +* ESQL: make `cidr_match` foldable {es-pull}105403[#105403] (issue: {es-issue}105376[#105376]) +* ES|QL: Disable optimizations that rely on Expression.nullable() {es-pull}105691[#105691] +* ES|QL: Improve type validation in aggs for UNSIGNED_LONG better support for VERSION {es-pull}104911[#104911] (issue: {es-issue}102961[#102961]) +* ES|QL: better management of exact subfields for TEXT fields {es-pull}103510[#103510] (issue: {es-issue}99899[#99899]) +* Fix error on sorting unsortable `geo_point` and `cartesian_point` {es-pull}106351[#106351] (issue: {es-issue}106007[#106007]) +* For empty mappings use a `LocalRelation` {es-pull}105081[#105081] (issue: {es-issue}104809[#104809]) +* Resume driver when failing to fetch pages {es-pull}106392[#106392] (issue: {es-issue}106262[#106262]) +* Review KEEP logic to prevent duplicate column names {es-pull}103316[#103316] +* `ProjectOperator` should not retain references to released blocks {es-pull}105848[#105848] + +Engine:: +* Consider currently refreshing data in the memory usage of refresh {es-pull}104122[#104122] +* Release `TranslogSnapshot` buffer after iteration {es-pull}106398[#106398] (issue: {es-issue}106390[#106390]) + +Health:: +* Make Health API more resilient to multi-version clusters {es-pull}105789[#105789] (issue: {es-issue}90183[#90183]) +* Stop the periodic health logger when es is stopping {es-pull}105272[#105272] + +ILM+SLM:: +* Remove `hashCode` and `equals` from `OperationModeUpdateTask` {es-pull}104265[#104265] (issue: {es-issue}100871[#100871]) +* [ILM] Delete step deletes data stream with only one index {es-pull}105772[#105772] + +Indices APIs:: +* Fix `require_alias` implicit true value on presence {es-pull}104099[#104099] (issue: {es-issue}103945[#103945]) + +Infra/CLI:: +* Fix server cli to always pass through exit code {es-pull}104943[#104943] + +Infra/Core:: +* Do not enable APM agent 'instrument', it's not required for manual tracing {es-pull}105055[#105055] +* Fix bogus assertion tripped by force-executed tasks {es-pull}104581[#104581] (issue: {es-issue}104580[#104580]) +* Metrics: Allow `AsyncCounters` to switch providers {es-pull}103025[#103025] +* Metrics: Handle null observations in observers {es-pull}103091[#103091] + +Infra/Node Lifecycle:: +* Close rather than stop `HttpServerTransport` on shutdown {es-pull}102759[#102759] (issue: {es-issue}102501[#102501]) + +Ingest Node:: +* Add stable `ThreadPool` constructor to `LogstashInternalBridge` {es-pull}105163[#105163] +* Adding `executedPipelines` to the `IngestDocument` copy constructor {es-pull}105427[#105427] +* Revert "x-pack/plugin/apm-data: download geoip DB on pipeline creation" {es-pull}104505[#104505] +* X-pack/plugin/apm-data: fix `@custom` pipeline support {es-pull}104113[#104113] + +Machine Learning:: +* Allow GET inference models by user a with read only permission {es-pull}105346[#105346] +* Avoid computing `currentInferenceProcessors` on every cluster state {es-pull}106057[#106057] +* Catch all the potential exceptions in the ingest processor code {es-pull}105391[#105391] +* Changed system auditor to use levels {es-pull}105429[#105429] +* During ML maintenance, reset jobs in the reset state without a corresponding task {es-pull}106062[#106062] +* Fix `categorize_text` aggregation nested under empty buckets {es-pull}105987[#105987] (issue: {es-issue}105836[#105836]) +* Fix resetting a job if the original reset task no longer exists. {es-pull}106020[#106020] +* Retry updates to model snapshot ID on job config {es-pull}104077[#104077] +* The OpenAI model parameter should be in service settings not task settings. Move the configuration field to service settings {es-pull}105458[#105458] +* Undeploy elser when inference model deleted {es-pull}104230[#104230] + +Mapping:: +* Fix parsing of flattened fields within subobjects: false {es-pull}105373[#105373] + +Network:: +* Fix use-after-free at event-loop shutdown {es-pull}105486[#105486] + +Search:: +* Correct profiled rewrite time for knn with a pre-filter {es-pull}104150[#104150] +* Force execution of `SearchService.Reaper` {es-pull}106544[#106544] (issue: {es-issue}106543[#106543]) +* Move `TransportTermsEnumAction` coordination off transport threads {es-pull}104408[#104408] +* Remove `SearchException` usages without a proper status code {es-pull}105150[#105150] +* Require the name field for `inner_hits` for collapse {es-pull}104666[#104666] +* add validation on _id field when upsert new doc {es-pull}103399[#103399] (issue: {es-issue}102981[#102981]) + +Security:: +* Revert "Validate settings in `ReloadSecureSettings` API" {es-pull}103310[#103310] + +Snapshot/Restore:: +* Do not record s3 http request time when it is not available {es-pull}105103[#105103] +* `URLRepository` should not block shutdown {es-pull}105588[#105588] + +TLS:: +* Respect --pass option in certutil csr mode {es-pull}106105[#106105] + +Transform:: +* Fix `_reset` API when called with `force=true` on a failed transform {es-pull}106574[#106574] (issue: {es-issue}106573[#106573]) +* Fix a bug where destination index aliases are not set up for an unattended transform {es-pull}105499[#105499] +* Remove duplicate checkpoint audits {es-pull}105164[#105164] (issue: {es-issue}105106[#105106]) +* Return results in order {es-pull}105089[#105089] (issue: {es-issue}104847[#104847]) +* Use deduced mappings for determining proper fields' format even if `deduce_mappings==false` {es-pull}103682[#103682] (issue: {es-issue}103115[#103115]) + +Vector Search:: +* Fix bug when nested knn pre-filter might match nested docs {es-pull}105994[#105994] + +Watcher:: +* Handling exceptions on watcher reload {es-pull}105442[#105442] (issue: {es-issue}69842[#69842]) + +[[deprecation-8.13.0]] +[float] +=== Deprecations + +Distributed:: +* `DesiredNode:` deprecate `node_version` field and make it optional (unused) in current parser {es-pull}104209[#104209] + +Infra/Core:: +* Deprecate `client.type` {es-pull}104574[#104574] + +[[enhancement-8.13.0]] +[float] +=== Enhancements + +Aggregations:: +* Add index mapping parameter for `counted_keyword` {es-pull}103646[#103646] +* Introduce an `AggregatorReducer` to reduce the footprint of aggregations in the coordinating node {es-pull}105207[#105207] +* Release resources in `BestBucketsDeferringCollector` earlier {es-pull}104893[#104893] +* Support sampling in `counted_terms` aggregation {es-pull}103846[#103846] + +Allocation:: +* Account for reserved disk size {es-pull}103903[#103903] +* Derive expected replica size from primary {es-pull}102078[#102078] + +Application:: +* Add serverless scopes for Connector APIs {es-pull}104063[#104063] +* [Connector API] Change required privileges to indices:data/read(write) {es-pull}105289[#105289] +* [Connector API] Implement update `index_name` action {es-pull}104648[#104648] +* [Connector API] Support filtering by name, index name in list action {es-pull}105131[#105131] +* [Connector API] Support filtering connectors by service type and a query {es-pull}105178[#105178] +* [Connector API] Support updating configuration values only {es-pull}105249[#105249] +* [Connectors API] Add new field `api_key_secret_id` to Connector {es-pull}104982[#104982] +* [Connectors API] Implement connector status update action {es-pull}104750[#104750] +* [Connectors API] Implement update native action endpoint {es-pull}104654[#104654] +* [Connectors API] Implement update service type action {es-pull}104643[#104643] +* [Connectors API] Relax strict response parsing for get/list operations {es-pull}104909[#104909] +* [Profiling] Extract properties faster from source {es-pull}104356[#104356] +* [Profiling] Mark all templates as managed {es-pull}103783[#103783] +* [Profiling] Speed up processing of stacktraces {es-pull}104674[#104674] +* [Profiling] Support downsampling of generic events {es-pull}104730[#104730] +* [Profiling] Use shard request cache consistently {es-pull}103643[#103643] + +Authentication:: +* Expose API key authentication metrics {es-pull}103178[#103178] +* Expose realms authentication metrics {es-pull}104200[#104200] +* Expose service account authentication metrics {es-pull}104043[#104043] +* Expose token authentication metrics {es-pull}104142[#104142] +* Hot-reloadable LDAP bind password {es-pull}104320[#104320] +* Support of `match` for the Query API Key API {es-pull}104594[#104594] + +Authorization:: +* [Security Solution] Allow write permission for `kibana_system` role on endpoint response index {es-pull}103555[#103555] + +CRUD:: +* Avoid wrapping searchers multiple times in mget {es-pull}104227[#104227] (issue: {es-issue}85069[#85069]) + +Client:: +* Add rest spec for Query User API {es-pull}104529[#104529] + +Cluster Coordination:: +* Add troubleshooting docs link to `PeerFinder` logs {es-pull}104787[#104787] +* Report current master in `PeerFinder` {es-pull}104396[#104396] + +Data streams:: +* Introduce lazy rollover for mapping updates in data streams {es-pull}103309[#103309] (issue: {es-issue}89346[#89346]) +* Use new `ignore_dynamic_beyond_limit` in logs and metric data streams {es-pull}105180[#105180] +* X-pack/plugin/apm-data: add dynamic setting for enabling template registry {es-pull}104386[#104386] (issue: {es-issue}104385[#104385]) +* X-pack/plugin/core: rename `double_metrics` template {es-pull}103033[#103033] +* x-pack/plugin/apm-data: Add a new field transaction.profiler_stack_trace_ids to traces-apm@mappings.yaml {es-pull}105223[#105223] +* x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script {es-pull}103032[#103032] +* x-pack/plugin/core: make automatic rollovers lazy {es-pull}105273[#105273] (issue: {es-issue}104083[#104083]) + +Discovery-Plugins:: +* Set read timeout for fetching IMDSv2 token {es-pull}104407[#104407] (issue: {es-issue}104244[#104244]) + +Downsampling:: +* Support patch transport version from 8.12 {es-pull}104406[#104406] + +ES|QL:: +* Add ES|QL async delete API {es-pull}103628[#103628] +* Avoid humongous blocks {es-pull}103340[#103340] +* ESQL: Add TO_UPPER and TO_LOWER functions {es-pull}104309[#104309] +* ESQL: Add option to drop null fields {es-pull}102428[#102428] +* ESQL: Add plan consistency verification after each optimizer {es-pull}105371[#105371] +* ESQL: Check field exists before load from `_source` {es-pull}103632[#103632] +* ESQL: Delay finding field load infrastructure {es-pull}103821[#103821] +* ESQL: Expand shallow copy with vecs {es-pull}103681[#103681] (issue: {es-issue}100528[#100528]) +* ESQL: Extend STATS command to support aggregate expressions {es-pull}104958[#104958] +* ESQL: Infer not null for aggregated fields {es-pull}103673[#103673] (issue: {es-issue}102787[#102787]) +* ESQL: Nested expressions inside stats command {es-pull}104387[#104387] (issue: {es-issue}99828[#99828]) +* ESQL: Pre-allocate rows in TopNOperator {es-pull}104796[#104796] +* ESQL: Referencing expressions that contain backticks requires <>. {es-pull}100740[#100740] (issue: {es-issue}100312[#100312]) +* ESQL: Simpify IS NULL/IS NOT NULL evaluation {es-pull}103099[#103099] (issue: {es-issue}103097[#103097]) +* ESQL: Speed up reading many nulls {es-pull}105088[#105088] +* ESQL: Support loading shapes from source into WKB blocks {es-pull}104269[#104269] +* ESQL: Track the rest of `DocVector` {es-pull}103727[#103727] +* ESQL: `MV_FIRST` and `MV_LAST` {es-pull}103928[#103928] +* ESQL: add `date_diff` function {es-pull}104118[#104118] (issue: {es-issue}101942[#101942]) +* ESQL: push down "[text_field] is not null" {es-pull}105593[#105593] +* ES|QL Async Query API {es-pull}103398[#103398] +* Prepare enrich plan to support multi clusters {es-pull}104355[#104355] +* Reading points from source to reduce precision loss {es-pull}103698[#103698] +* Remove deprecated Block APIs {es-pull}103592[#103592] +* Reserve bytes before serializing page {es-pull}105269[#105269] +* Support ST_CENTROID over spatial points {es-pull}104218[#104218] (issue: {es-issue}104656[#104656]) +* Support cross clusters query in ESQL {es-pull}101640[#101640] +* Support enrich ANY mode in cross clusters query {es-pull}104840[#104840] +* Support enrich coordinator mode {es-pull}104936[#104936] +* Support enrich remote mode {es-pull}104993[#104993] + +Geo:: +* Add support for Well Known Binary (WKB) in the fields API for spatial fields {es-pull}103461[#103461] +* Add the possibility to transform WKT to WKB directly {es-pull}104030[#104030] + +Health:: +* Add APM metrics to `HealthPeriodicLogger` {es-pull}102765[#102765] +* Extend `repository_integrity` health indicator for unknown and invalid repos {es-pull}104614[#104614] (issue: {es-issue}103784[#103784]) + +ILM+SLM:: +* Add "step":"ERROR" to ILM explain response for missing policy {es-pull}103720[#103720] (issue: {es-issue}99030[#99030]) +* Add default rollover conditions to ILM explain API response {es-pull}104721[#104721] (issue: {es-issue}103395[#103395]) +* ILM/SLM history policies forcemerge in hot and dsl configuration {es-pull}103190[#103190] + +Infra/CLI:: +* Add replay diagnostic dir to system jvm options {es-pull}103535[#103535] + +Infra/Circuit Breakers:: +* Lower G1 minimum full GC interval {es-pull}105259[#105259] + +Infra/Core:: +* Adding threadpool metrics {es-pull}102371[#102371] +* ES - document observing with rejections {es-pull}104859[#104859] +* Thread pool metrics {es-pull}104500[#104500] + +Infra/Metrics:: +* Modify name of threadpool metric for rejected {es-pull}105015[#105015] + +Infra/Node Lifecycle:: +* Wait for async searches to finish when shutting down {es-pull}101487[#101487] + +Infra/Transport API:: +* Make `ParentTaskAssigningClient.getRemoteClusterClient` method also return `ParentTaskAssigningClient` {es-pull}100813[#100813] + +Ingest Node:: +* Adding `ActionRequestLazyBuilder` implementation of `RequestBuilder` {es-pull}104927[#104927] +* Adding a `RequestBuilder` interface {es-pull}104778[#104778] +* Adding a custom exception for problems with the graph of pipelines to be applied to a document {es-pull}105196[#105196] +* Improving the performance of the ingest simulate verbose API {es-pull}105265[#105265] +* Ingest geoip processor cache 'no results' from the database {es-pull}104092[#104092] +* Limiting the number of nested pipelines that can be executed {es-pull}105428[#105428] +* Modifying request builders {es-pull}104636[#104636] + +Java Low Level REST Client:: +* Set thread name used by REST client {es-pull}103160[#103160] + +Machine Learning:: +* Add optional pruning configuration (weighted terms scoring) to text expansion query {es-pull}102862[#102862] +* Add text_embedding inference service with multilingual-e5 and custom eland models {es-pull}104949[#104949] +* Add 3 automatic restarts for `pytorch_inference` processes that stop unexpectedly {es-pull}104433[#104433] +* Add support for Cohere inference service {es-pull}104559[#104559] +* Always test for spikes and dips as well as changes in the change point aggregation {es-pull}103922[#103922] +* Apply windowing and chunking to long documents {es-pull}104363[#104363] +* Automatically download the ELSER model when PUT in `_inference` {es-pull}104334[#104334] +* Better handling of number of allocations in pytorch_inference in the case that hardware_concurrency fails {ml-pull}2607[#2607] +* Change detection aggregation improvements {es-pull}102824[#102824] +* Conditionally send the dimensions field as part of the openai requests {es-pull}105299[#105299] (issue: {es-issue}105005[#105005]) +* Endpoint to find positions of Grok pattern matches {es-pull}104394[#104394] +* Ensure unique IDs between inference models and trained model deployments {es-pull}103996[#103996] +* Expose some ML metrics via APM {es-pull}102584[#102584] +* Make `task_type` optional in `_inference` APIs {es-pull}104483[#104483] +* Update `missingTrainedModel` message to include: you may need to create it {es-pull}104155[#104155] +* Upgrade MKL to version 2024.0 on Linux x86_64 {ml-pull}2619[#2619] +* Upgrade PyTorch to version 2.1.2. {ml-pull}2588[#2588] +* Upgrade zlib to version 1.2.13 on Windows {ml-pull}2588[#2588] +* Use Boost.JSON for JSON processing {ml-pull}2614[#2614] +* Validate inference model ids {es-pull}103669[#103669] + + +Mapping:: +* Add `index.mapping.total_fields.ignore_dynamic_beyond_limit` setting to ignore dynamic fields when field limit is reached {es-pull}96235[#96235] +* Make field limit more predictable {es-pull}102885[#102885] + +Network:: +* Prune unnecessary information from TransportNodesStatsAction.NodeStatsRequest {es-pull}102559[#102559] (issue: {es-issue}100878[#100878]) + +Percolator:: +* Return `matched_queries` in Percolator {es-pull}103084[#103084] (issue: {es-issue}10163[#10163]) + +Query Languages:: +* Introduce Alias.unwrap method {es-pull}104575[#104575] + +Search:: +* Dyamically adjust node metrics cache expire {es-pull}104460[#104460] +* Enhancement: Metrics for Search Took Times using Action Listeners {es-pull}104996[#104996] +* Field caps performance pt2 {es-pull}105941[#105941] +* Field-caps field has value lookup use map instead of looping array {es-pull}105770[#105770] +* Flag in `_field_caps` to return only fields with values in index {es-pull}103651[#103651] +* Include better output in profiling & `toString` for automaton based queries {es-pull}105468[#105468] +* Metrics for search latencies {es-pull}102557[#102557] +* Ref count search response bytes {es-pull}103763[#103763] (issue: {es-issue}102657[#102657]) +* Remove leniency in msearch parsing {es-pull}103232[#103232] +* Resolve Cluster API {es-pull}102726[#102726] +* Reuse number field mapper tests in other modules {es-pull}99142[#99142] (issue: {es-issue}92947[#92947]) +* S3 first byte latency metric {es-pull}102435[#102435] +* Update s3 latency metric to use micros {es-pull}103633[#103633] +* Upgrade to Lucene 9.10.0 {es-pull}105578[#105578] + +Security:: +* Add Query Users API {es-pull}104033[#104033] +* Add `ApiKey` expiration time to audit log {es-pull}103959[#103959] +* Add expiration time to update api key api {es-pull}103453[#103453] +* Add stricter validation for api key expiration time {es-pull}103973[#103973] +* Add support for the `simple_query_string` to the Query API Key API {es-pull}104132[#104132] +* Add support for the `type` parameter, for sorting, to the Query API Key API {es-pull}104625[#104625] +* Aggs support for Query API Key Information API {es-pull}104895[#104895] +* Hot-reloadable remote cluster credentials {es-pull}102798[#102798] + +Snapshot/Restore:: +* Add s3 `HeadObject` request to request stats {es-pull}105105[#105105] +* Expose `OperationPurpose` via `CustomQueryParameter` to s3 logs {es-pull}105044[#105044] +* Fix blob cache race, decay, time dependency {es-pull}104784[#104784] +* Pause shard snapshots on graceful shutdown {es-pull}101717[#101717] +* Retry indefinitely for s3 indices blob read errors {es-pull}103300[#103300] + +Store:: +* List hidden shard stores by default {es-pull}103710[#103710] + +TLS:: +* 'elasticsearch-certutil cert' now verifies the issuing chain of the generated certificate {es-pull}103948[#103948] + +TSDB:: +* Improve storage efficiency for non-metric fields in TSDB {es-pull}99747[#99747] +* Introduce experimental pass-through field type {es-pull}103648[#103648] +* Nest pass-through objects within objects {es-pull}105062[#105062] +* Restrict usage of certain aggregations when in sort order execution is required {es-pull}104665[#104665] +* Small time series agg improvement {es-pull}106288[#106288] + +Transform:: +* Allow transforms to use PIT with remote clusters again {es-pull}105192[#105192] (issue: {es-issue}104518[#104518]) +* Transforms: Adding basic stats API param {es-pull}104878[#104878] + +Vector Search:: +* Add new int8_flat and flat vector index types {es-pull}104872[#104872] +* Add support for more than one `inner_hit` when searching nested vectors {es-pull}104006[#104006] +* Making `k` and `num_candidates` optional for knn search {es-pull}101209[#101209] (issue: {es-issue}97533[#97533]) + +[[feature-8.13.0]] +[float] +=== New features + +Data streams:: +* Add `require_data_stream` parameter to indexing requests to enforce indexing operations target a data stream {es-pull}101872[#101872] (issue: {es-issue}97032[#97032]) +* Redirect failed ingest node operations to a failure store when available {es-pull}103481[#103481] + +ES|QL:: +* ESQL: Introduce mode setting for ENRICH {es-pull}103949[#103949] +* ESQL: add =~ operator (case insensitive equality) {es-pull}103656[#103656] + +Health:: +* Create a DSL health indicator as part of the health API {es-pull}103130[#103130] + +Infra/Core:: +* Add gradle tasks and code to modify and access mappings between version ids and release versions {es-pull}103627[#103627] + +Mapping:: +* Add `unmatch_mapping_type`, and support array of types {es-pull}103171[#103171] (issues: {es-issue}102807[#102807], {es-issue}102795[#102795]) + +Search:: +* Added Duplicate Word Check Feature to Analysis Nori {es-pull}103325[#103325] (issue: {es-issue}103321[#103321]) +* [Synonyms] Mark Synonyms as GA {es-pull}103223[#103223] + +[[upgrade-8.13.0]] +[float] +=== Upgrades + +Query Languages:: +* Upgrade ANTLR4 to 4.13.1 {es-pull}105334[#105334] (issue: {es-issue}102953[#102953]) + +Search:: +* Upgrade to Lucene 9.9.0 {es-pull}102782[#102782] +* Upgrade to Lucene 9.9.1 {es-pull}103387[#103387] +* Upgrade to Lucene 9.9.2 {es-pull}104753[#104753] + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 92cd447a48deb..25096779521e4 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -62,6 +62,16 @@ fields that don't have a value. This can be done through the newly added {es-pull}103651[#103651] +[discrete] +[[new_lucene_9_10_release]] +=== New Lucene 9.10 release +- https://github.com/apache/lucene/pull/13090: Prevent humongous allocations in ScalarQuantizer when building quantiles. +- https://github.com/apache/lucene/pull/12962: Speedup concurrent multi-segment HNSW graph search +- https://github.com/apache/lucene/pull/13033: Range queries on numeric/date/ip fields now exit earlier on segments whose values don't intersect with the query range. This should especially help when there are other required clauses in the `bool` query and when the range filter is narrow, e.g. filtering on the last 5 minutes. +- https://github.com/apache/lucene/pull/13026: `bool` queries that mix `filter` and `should` clauses will now propagate minimum competitive scores through the `should` clauses. This should yield speedups when sorting by descending score. + +{es-pull}105578[#105578] + // end::notable-highlights[] From ca5a7519a9f1d16c548a3eba1ba1021515fcc0cd Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 26 Mar 2024 14:36:13 -0500 Subject: [PATCH 73/79] Updating FullClusterRestartIT.testWatcher to account for watcher running (#106697) --- .../org/elasticsearch/xpack/restart/FullClusterRestartIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 254d12a05d936..d7760eb42a1db 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -731,7 +731,7 @@ private void assertBasicWatchInteractions() throws Exception { Map updateWatch = entityAsMap(client().performRequest(createWatchRequest)); assertThat(updateWatch.get("created"), equalTo(false)); - assertThat(updateWatch.get("_version"), equalTo(2)); + assertThat((int) updateWatch.get("_version"), greaterThanOrEqualTo(2)); Map get = entityAsMap(client().performRequest(new Request("GET", "_watcher/watch/new_watch"))); assertThat(get.get("found"), equalTo(true)); From 930654b496552cf3fc81408d171e151a0181666e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 26 Mar 2024 19:56:29 +0000 Subject: [PATCH 74/79] Bump versions after 7.17.19 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 16 ++++++++++++++++ .buildkite/pipelines/periodic.yml | 14 ++++++++++++-- .ci/bwcVersions | 1 + .ci/snapshotBwcVersions | 2 +- .../src/main/java/org/elasticsearch/Version.java | 1 + .../org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 34 insertions(+), 4 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 3283e691f121c..f45caaf7fdfaf 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.12.3", "8.13.0", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 5e7c1a0960789..c38e0e48cd070 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1137,6 +1137,22 @@ steps: env: BWC_VERSION: 7.17.19 + - label: "{{matrix.image}} / 7.17.20 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.20 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.20 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 8e1ff14eda792..23f0e7d4bbacf 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -692,6 +692,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.19 + - label: 7.17.20 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.20#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.20 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 @@ -1246,7 +1256,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.12.3", "8.13.0", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -1290,7 +1300,7 @@ steps: - openjdk17 - openjdk21 - openjdk22 - BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.12.3", "8.13.0", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 8b454fa92ab02..bc5c24cf0f365 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -68,6 +68,7 @@ BWC_VERSION: - "7.17.17" - "7.17.18" - "7.17.19" + - "7.17.20" - "8.0.0" - "8.0.1" - "8.1.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index d85a432684495..6d391a3fd72ae 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "7.17.19" + - "7.17.20" - "8.12.3" - "8.13.0" - "8.14.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 241af6e7b6c45..391ede4d2aa40 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -119,6 +119,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_17 = new Version(7_17_17_99); public static final Version V_7_17_18 = new Version(7_17_18_99); public static final Version V_7_17_19 = new Version(7_17_19_99); + public static final Version V_7_17_20 = new Version(7_17_20_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index b392111557615..17f594ec992d1 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -66,6 +66,7 @@ 7.17.16,7171699 7.17.17,7171799 7.17.18,7171899 +7.17.19,7171999 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index f2da9fcaf60ce..b29ae972c9b13 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -66,6 +66,7 @@ 7.17.16,7171699 7.17.17,7171799 7.17.18,7171899 +7.17.19,7171999 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 From cba9e5be9f21d85e91c1c90ae4e3f12ef8778d95 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 26 Mar 2024 20:02:39 +0000 Subject: [PATCH 75/79] Prune changelogs after 7.17.19 release --- docs/changelog/100740.yaml | 6 ------ docs/changelog/100813.yaml | 6 ------ docs/changelog/101209.yaml | 6 ------ docs/changelog/101487.yaml | 5 ----- docs/changelog/101640.yaml | 5 ----- docs/changelog/101656.yaml | 5 ----- docs/changelog/101717.yaml | 5 ----- docs/changelog/101872.yaml | 6 ------ docs/changelog/102078.yaml | 5 ----- docs/changelog/102207.yaml | 6 ------ docs/changelog/102371.yaml | 5 ----- docs/changelog/102428.yaml | 5 ----- docs/changelog/102435.yaml | 5 ----- docs/changelog/102557.yaml | 5 ----- docs/changelog/102559.yaml | 5 ----- docs/changelog/102584.yaml | 5 ----- docs/changelog/102726.yaml | 5 ----- docs/changelog/102759.yaml | 6 ------ docs/changelog/102765.yaml | 5 ----- docs/changelog/102782.yaml | 5 ----- docs/changelog/102798.yaml | 5 ----- docs/changelog/102824.yaml | 5 ----- docs/changelog/102862.yaml | 5 ----- docs/changelog/102879.yaml | 5 ----- docs/changelog/102885.yaml | 5 ----- docs/changelog/103025.yaml | 5 ----- docs/changelog/103032.yaml | 5 ----- docs/changelog/103033.yaml | 5 ----- docs/changelog/103035.yaml | 5 ----- docs/changelog/103084.yaml | 6 ------ docs/changelog/103091.yaml | 5 ----- docs/changelog/103099.yaml | 6 ------ docs/changelog/103130.yaml | 5 ----- docs/changelog/103160.yaml | 5 ----- docs/changelog/103171.yaml | 7 ------- docs/changelog/103176.yaml | 5 ----- docs/changelog/103178.yaml | 5 ----- docs/changelog/103190.yaml | 5 ----- docs/changelog/103223.yaml | 10 ---------- docs/changelog/103232.yaml | 5 ----- docs/changelog/103300.yaml | 5 ----- docs/changelog/103309.yaml | 6 ------ docs/changelog/103310.yaml | 5 ----- docs/changelog/103316.yaml | 5 ----- docs/changelog/103325.yaml | 6 ------ docs/changelog/103340.yaml | 5 ----- docs/changelog/103387.yaml | 5 ----- docs/changelog/103398.yaml | 5 ----- docs/changelog/103399.yaml | 6 ------ docs/changelog/103434.yaml | 11 ----------- docs/changelog/103453.yaml | 5 ----- docs/changelog/103461.yaml | 5 ----- docs/changelog/103481.yaml | 5 ----- docs/changelog/103510.yaml | 6 ------ docs/changelog/103520.yaml | 5 ----- docs/changelog/103535.yaml | 5 ----- docs/changelog/103538.yaml | 6 ------ docs/changelog/103555.yaml | 6 ------ docs/changelog/103592.yaml | 5 ----- docs/changelog/103610.yaml | 6 ------ docs/changelog/103627.yaml | 5 ----- docs/changelog/103628.yaml | 5 ----- docs/changelog/103632.yaml | 5 ----- docs/changelog/103633.yaml | 5 ----- docs/changelog/103643.yaml | 5 ----- docs/changelog/103646.yaml | 5 ----- docs/changelog/103648.yaml | 5 ----- docs/changelog/103651.yaml | 12 ------------ docs/changelog/103656.yaml | 5 ----- docs/changelog/103669.yaml | 5 ----- docs/changelog/103673.yaml | 6 ------ docs/changelog/103681.yaml | 6 ------ docs/changelog/103682.yaml | 6 ------ docs/changelog/103698.yaml | 5 ----- docs/changelog/103710.yaml | 5 ----- docs/changelog/103720.yaml | 6 ------ docs/changelog/103727.yaml | 5 ----- docs/changelog/103763.yaml | 6 ------ docs/changelog/103783.yaml | 5 ----- docs/changelog/103807.yaml | 6 ------ docs/changelog/103821.yaml | 5 ----- docs/changelog/103846.yaml | 5 ----- docs/changelog/103898.yaml | 14 -------------- docs/changelog/103903.yaml | 5 ----- docs/changelog/103920.yaml | 5 ----- docs/changelog/103922.yaml | 5 ----- docs/changelog/103928.yaml | 5 ----- docs/changelog/103948.yaml | 6 ------ docs/changelog/103949.yaml | 5 ----- docs/changelog/103959.yaml | 5 ----- docs/changelog/103973.yaml | 5 ----- docs/changelog/103996.yaml | 5 ----- docs/changelog/104006.yaml | 5 ----- docs/changelog/104030.yaml | 5 ----- docs/changelog/104033.yaml | 5 ----- docs/changelog/104043.yaml | 5 ----- docs/changelog/104063.yaml | 5 ----- docs/changelog/104077.yaml | 5 ----- docs/changelog/104091.yaml | 11 ----------- docs/changelog/104092.yaml | 5 ----- docs/changelog/104099.yaml | 6 ------ docs/changelog/104113.yaml | 5 ----- docs/changelog/104118.yaml | 6 ------ docs/changelog/104122.yaml | 5 ----- docs/changelog/104132.yaml | 5 ----- docs/changelog/104142.yaml | 5 ----- docs/changelog/104150.yaml | 5 ----- docs/changelog/104155.yaml | 6 ------ docs/changelog/104182.yaml | 5 ----- docs/changelog/104200.yaml | 5 ----- docs/changelog/104209.yaml | 13 ------------- docs/changelog/104218.yaml | 6 ------ docs/changelog/104227.yaml | 6 ------ docs/changelog/104230.yaml | 5 ----- docs/changelog/104265.yaml | 6 ------ docs/changelog/104269.yaml | 5 ----- docs/changelog/104309.yaml | 5 ----- docs/changelog/104320.yaml | 5 ----- docs/changelog/104334.yaml | 5 ----- docs/changelog/104355.yaml | 5 ----- docs/changelog/104356.yaml | 5 ----- docs/changelog/104363.yaml | 5 ----- docs/changelog/104386.yaml | 6 ------ docs/changelog/104387.yaml | 6 ------ docs/changelog/104394.yaml | 5 ----- docs/changelog/104396.yaml | 5 ----- docs/changelog/104406.yaml | 5 ----- docs/changelog/104407.yaml | 6 ------ docs/changelog/104408.yaml | 5 ----- docs/changelog/104433.yaml | 5 ----- docs/changelog/104440.yaml | 6 ------ docs/changelog/104460.yaml | 5 ----- docs/changelog/104483.yaml | 5 ----- docs/changelog/104500.yaml | 5 ----- docs/changelog/104505.yaml | 5 ----- docs/changelog/104529.yaml | 5 ----- docs/changelog/104553.yaml | 5 ----- docs/changelog/104559.yaml | 5 ----- docs/changelog/104573.yaml | 5 ----- docs/changelog/104574.yaml | 10 ---------- docs/changelog/104575.yaml | 5 ----- docs/changelog/104581.yaml | 6 ------ docs/changelog/104594.yaml | 5 ----- docs/changelog/104614.yaml | 6 ------ docs/changelog/104625.yaml | 6 ------ docs/changelog/104636.yaml | 5 ----- docs/changelog/104643.yaml | 5 ----- docs/changelog/104648.yaml | 5 ----- docs/changelog/104654.yaml | 5 ----- docs/changelog/104665.yaml | 5 ----- docs/changelog/104666.yaml | 5 ----- docs/changelog/104674.yaml | 5 ----- docs/changelog/104718.yaml | 6 ------ docs/changelog/104721.yaml | 6 ------ docs/changelog/104730.yaml | 5 ----- docs/changelog/104750.yaml | 5 ----- docs/changelog/104753.yaml | 5 ----- docs/changelog/104778.yaml | 5 ----- docs/changelog/104784.yaml | 5 ----- docs/changelog/104787.yaml | 5 ----- docs/changelog/104796.yaml | 5 ----- docs/changelog/104840.yaml | 5 ----- docs/changelog/104859.yaml | 5 ----- docs/changelog/104872.yaml | 5 ----- docs/changelog/104878.yaml | 5 ----- docs/changelog/104893.yaml | 5 ----- docs/changelog/104895.yaml | 5 ----- docs/changelog/104905.yaml | 6 ------ docs/changelog/104909.yaml | 5 ----- docs/changelog/104911.yaml | 7 ------- docs/changelog/104927.yaml | 5 ----- docs/changelog/104936.yaml | 5 ----- docs/changelog/104943.yaml | 5 ----- docs/changelog/104949.yaml | 5 ----- docs/changelog/104958.yaml | 5 ----- docs/changelog/104982.yaml | 5 ----- docs/changelog/104993.yaml | 5 ----- docs/changelog/104996.yaml | 5 ----- docs/changelog/105015.yaml | 5 ----- docs/changelog/105044.yaml | 5 ----- docs/changelog/105055.yaml | 5 ----- docs/changelog/105062.yaml | 5 ----- docs/changelog/105064.yaml | 17 ----------------- docs/changelog/105081.yaml | 6 ------ docs/changelog/105088.yaml | 5 ----- docs/changelog/105089.yaml | 6 ------ docs/changelog/105103.yaml | 5 ----- docs/changelog/105105.yaml | 5 ----- docs/changelog/105131.yaml | 5 ----- docs/changelog/105150.yaml | 5 ----- docs/changelog/105163.yaml | 5 ----- docs/changelog/105164.yaml | 6 ------ docs/changelog/105178.yaml | 5 ----- docs/changelog/105180.yaml | 5 ----- docs/changelog/105192.yaml | 6 ------ docs/changelog/105196.yaml | 6 ------ docs/changelog/105207.yaml | 6 ------ docs/changelog/105221.yaml | 14 -------------- docs/changelog/105223.yaml | 5 ----- docs/changelog/105232.yaml | 6 ------ docs/changelog/105249.yaml | 5 ----- docs/changelog/105259.yaml | 5 ----- docs/changelog/105265.yaml | 5 ----- docs/changelog/105269.yaml | 5 ----- docs/changelog/105272.yaml | 5 ----- docs/changelog/105273.yaml | 6 ------ docs/changelog/105289.yaml | 5 ----- docs/changelog/105299.yaml | 6 ------ docs/changelog/105325.yaml | 6 ------ docs/changelog/105334.yaml | 6 ------ docs/changelog/105346.yaml | 5 ----- docs/changelog/105371.yaml | 5 ----- docs/changelog/105373.yaml | 5 ----- docs/changelog/105391.yaml | 5 ----- docs/changelog/105403.yaml | 6 ------ docs/changelog/105427.yaml | 5 ----- docs/changelog/105428.yaml | 5 ----- docs/changelog/105429.yaml | 5 ----- docs/changelog/105440.yaml | 6 ------ docs/changelog/105442.yaml | 6 ------ docs/changelog/105458.yaml | 5 ----- docs/changelog/105468.yaml | 5 ----- docs/changelog/105476.yaml | 6 ------ docs/changelog/105486.yaml | 5 ----- docs/changelog/105499.yaml | 5 ----- docs/changelog/105546.yaml | 6 ------ docs/changelog/105578.yaml | 13 ------------- docs/changelog/105588.yaml | 5 ----- docs/changelog/105593.yaml | 5 ----- docs/changelog/105633.yaml | 6 ------ docs/changelog/105650.yaml | 6 ------ docs/changelog/105691.yaml | 5 ----- docs/changelog/105770.yaml | 5 ----- docs/changelog/105772.yaml | 5 ----- docs/changelog/105789.yaml | 6 ------ docs/changelog/105848.yaml | 5 ----- docs/changelog/105941.yaml | 5 ----- docs/changelog/105945.yaml | 5 ----- docs/changelog/105987.yaml | 6 ------ docs/changelog/105994.yaml | 5 ----- docs/changelog/106020.yaml | 5 ----- docs/changelog/106057.yaml | 5 ----- docs/changelog/106060.yaml | 5 ----- docs/changelog/106062.yaml | 6 ------ docs/changelog/106105.yaml | 5 ----- docs/changelog/106156.yaml | 6 ------ docs/changelog/106288.yaml | 5 ----- docs/changelog/106329.yaml | 5 ----- docs/changelog/106351.yaml | 6 ------ docs/changelog/106392.yaml | 6 ------ docs/changelog/106398.yaml | 6 ------ docs/changelog/106544.yaml | 6 ------ docs/changelog/106574.yaml | 6 ------ docs/changelog/96235.yaml | 5 ----- docs/changelog/99142.yaml | 6 ------ docs/changelog/99747.yaml | 19 ------------------- docs/changelog/99961.yaml | 6 ------ 257 files changed, 1445 deletions(-) delete mode 100644 docs/changelog/100740.yaml delete mode 100644 docs/changelog/100813.yaml delete mode 100644 docs/changelog/101209.yaml delete mode 100644 docs/changelog/101487.yaml delete mode 100644 docs/changelog/101640.yaml delete mode 100644 docs/changelog/101656.yaml delete mode 100644 docs/changelog/101717.yaml delete mode 100644 docs/changelog/101872.yaml delete mode 100644 docs/changelog/102078.yaml delete mode 100644 docs/changelog/102207.yaml delete mode 100644 docs/changelog/102371.yaml delete mode 100644 docs/changelog/102428.yaml delete mode 100644 docs/changelog/102435.yaml delete mode 100644 docs/changelog/102557.yaml delete mode 100644 docs/changelog/102559.yaml delete mode 100644 docs/changelog/102584.yaml delete mode 100644 docs/changelog/102726.yaml delete mode 100644 docs/changelog/102759.yaml delete mode 100644 docs/changelog/102765.yaml delete mode 100644 docs/changelog/102782.yaml delete mode 100644 docs/changelog/102798.yaml delete mode 100644 docs/changelog/102824.yaml delete mode 100644 docs/changelog/102862.yaml delete mode 100644 docs/changelog/102879.yaml delete mode 100644 docs/changelog/102885.yaml delete mode 100644 docs/changelog/103025.yaml delete mode 100644 docs/changelog/103032.yaml delete mode 100644 docs/changelog/103033.yaml delete mode 100644 docs/changelog/103035.yaml delete mode 100644 docs/changelog/103084.yaml delete mode 100644 docs/changelog/103091.yaml delete mode 100644 docs/changelog/103099.yaml delete mode 100644 docs/changelog/103130.yaml delete mode 100644 docs/changelog/103160.yaml delete mode 100644 docs/changelog/103171.yaml delete mode 100644 docs/changelog/103176.yaml delete mode 100644 docs/changelog/103178.yaml delete mode 100644 docs/changelog/103190.yaml delete mode 100644 docs/changelog/103223.yaml delete mode 100644 docs/changelog/103232.yaml delete mode 100644 docs/changelog/103300.yaml delete mode 100644 docs/changelog/103309.yaml delete mode 100644 docs/changelog/103310.yaml delete mode 100644 docs/changelog/103316.yaml delete mode 100644 docs/changelog/103325.yaml delete mode 100644 docs/changelog/103340.yaml delete mode 100644 docs/changelog/103387.yaml delete mode 100644 docs/changelog/103398.yaml delete mode 100644 docs/changelog/103399.yaml delete mode 100644 docs/changelog/103434.yaml delete mode 100644 docs/changelog/103453.yaml delete mode 100644 docs/changelog/103461.yaml delete mode 100644 docs/changelog/103481.yaml delete mode 100644 docs/changelog/103510.yaml delete mode 100644 docs/changelog/103520.yaml delete mode 100644 docs/changelog/103535.yaml delete mode 100644 docs/changelog/103538.yaml delete mode 100644 docs/changelog/103555.yaml delete mode 100644 docs/changelog/103592.yaml delete mode 100644 docs/changelog/103610.yaml delete mode 100644 docs/changelog/103627.yaml delete mode 100644 docs/changelog/103628.yaml delete mode 100644 docs/changelog/103632.yaml delete mode 100644 docs/changelog/103633.yaml delete mode 100644 docs/changelog/103643.yaml delete mode 100644 docs/changelog/103646.yaml delete mode 100644 docs/changelog/103648.yaml delete mode 100644 docs/changelog/103651.yaml delete mode 100644 docs/changelog/103656.yaml delete mode 100644 docs/changelog/103669.yaml delete mode 100644 docs/changelog/103673.yaml delete mode 100644 docs/changelog/103681.yaml delete mode 100644 docs/changelog/103682.yaml delete mode 100644 docs/changelog/103698.yaml delete mode 100644 docs/changelog/103710.yaml delete mode 100644 docs/changelog/103720.yaml delete mode 100644 docs/changelog/103727.yaml delete mode 100644 docs/changelog/103763.yaml delete mode 100644 docs/changelog/103783.yaml delete mode 100644 docs/changelog/103807.yaml delete mode 100644 docs/changelog/103821.yaml delete mode 100644 docs/changelog/103846.yaml delete mode 100644 docs/changelog/103898.yaml delete mode 100644 docs/changelog/103903.yaml delete mode 100644 docs/changelog/103920.yaml delete mode 100644 docs/changelog/103922.yaml delete mode 100644 docs/changelog/103928.yaml delete mode 100644 docs/changelog/103948.yaml delete mode 100644 docs/changelog/103949.yaml delete mode 100644 docs/changelog/103959.yaml delete mode 100644 docs/changelog/103973.yaml delete mode 100644 docs/changelog/103996.yaml delete mode 100644 docs/changelog/104006.yaml delete mode 100644 docs/changelog/104030.yaml delete mode 100644 docs/changelog/104033.yaml delete mode 100644 docs/changelog/104043.yaml delete mode 100644 docs/changelog/104063.yaml delete mode 100644 docs/changelog/104077.yaml delete mode 100644 docs/changelog/104091.yaml delete mode 100644 docs/changelog/104092.yaml delete mode 100644 docs/changelog/104099.yaml delete mode 100644 docs/changelog/104113.yaml delete mode 100644 docs/changelog/104118.yaml delete mode 100644 docs/changelog/104122.yaml delete mode 100644 docs/changelog/104132.yaml delete mode 100644 docs/changelog/104142.yaml delete mode 100644 docs/changelog/104150.yaml delete mode 100644 docs/changelog/104155.yaml delete mode 100644 docs/changelog/104182.yaml delete mode 100644 docs/changelog/104200.yaml delete mode 100644 docs/changelog/104209.yaml delete mode 100644 docs/changelog/104218.yaml delete mode 100644 docs/changelog/104227.yaml delete mode 100644 docs/changelog/104230.yaml delete mode 100644 docs/changelog/104265.yaml delete mode 100644 docs/changelog/104269.yaml delete mode 100644 docs/changelog/104309.yaml delete mode 100644 docs/changelog/104320.yaml delete mode 100644 docs/changelog/104334.yaml delete mode 100644 docs/changelog/104355.yaml delete mode 100644 docs/changelog/104356.yaml delete mode 100644 docs/changelog/104363.yaml delete mode 100644 docs/changelog/104386.yaml delete mode 100644 docs/changelog/104387.yaml delete mode 100644 docs/changelog/104394.yaml delete mode 100644 docs/changelog/104396.yaml delete mode 100644 docs/changelog/104406.yaml delete mode 100644 docs/changelog/104407.yaml delete mode 100644 docs/changelog/104408.yaml delete mode 100644 docs/changelog/104433.yaml delete mode 100644 docs/changelog/104440.yaml delete mode 100644 docs/changelog/104460.yaml delete mode 100644 docs/changelog/104483.yaml delete mode 100644 docs/changelog/104500.yaml delete mode 100644 docs/changelog/104505.yaml delete mode 100644 docs/changelog/104529.yaml delete mode 100644 docs/changelog/104553.yaml delete mode 100644 docs/changelog/104559.yaml delete mode 100644 docs/changelog/104573.yaml delete mode 100644 docs/changelog/104574.yaml delete mode 100644 docs/changelog/104575.yaml delete mode 100644 docs/changelog/104581.yaml delete mode 100644 docs/changelog/104594.yaml delete mode 100644 docs/changelog/104614.yaml delete mode 100644 docs/changelog/104625.yaml delete mode 100644 docs/changelog/104636.yaml delete mode 100644 docs/changelog/104643.yaml delete mode 100644 docs/changelog/104648.yaml delete mode 100644 docs/changelog/104654.yaml delete mode 100644 docs/changelog/104665.yaml delete mode 100644 docs/changelog/104666.yaml delete mode 100644 docs/changelog/104674.yaml delete mode 100644 docs/changelog/104718.yaml delete mode 100644 docs/changelog/104721.yaml delete mode 100644 docs/changelog/104730.yaml delete mode 100644 docs/changelog/104750.yaml delete mode 100644 docs/changelog/104753.yaml delete mode 100644 docs/changelog/104778.yaml delete mode 100644 docs/changelog/104784.yaml delete mode 100644 docs/changelog/104787.yaml delete mode 100644 docs/changelog/104796.yaml delete mode 100644 docs/changelog/104840.yaml delete mode 100644 docs/changelog/104859.yaml delete mode 100644 docs/changelog/104872.yaml delete mode 100644 docs/changelog/104878.yaml delete mode 100644 docs/changelog/104893.yaml delete mode 100644 docs/changelog/104895.yaml delete mode 100644 docs/changelog/104905.yaml delete mode 100644 docs/changelog/104909.yaml delete mode 100644 docs/changelog/104911.yaml delete mode 100644 docs/changelog/104927.yaml delete mode 100644 docs/changelog/104936.yaml delete mode 100644 docs/changelog/104943.yaml delete mode 100644 docs/changelog/104949.yaml delete mode 100644 docs/changelog/104958.yaml delete mode 100644 docs/changelog/104982.yaml delete mode 100644 docs/changelog/104993.yaml delete mode 100644 docs/changelog/104996.yaml delete mode 100644 docs/changelog/105015.yaml delete mode 100644 docs/changelog/105044.yaml delete mode 100644 docs/changelog/105055.yaml delete mode 100644 docs/changelog/105062.yaml delete mode 100644 docs/changelog/105064.yaml delete mode 100644 docs/changelog/105081.yaml delete mode 100644 docs/changelog/105088.yaml delete mode 100644 docs/changelog/105089.yaml delete mode 100644 docs/changelog/105103.yaml delete mode 100644 docs/changelog/105105.yaml delete mode 100644 docs/changelog/105131.yaml delete mode 100644 docs/changelog/105150.yaml delete mode 100644 docs/changelog/105163.yaml delete mode 100644 docs/changelog/105164.yaml delete mode 100644 docs/changelog/105178.yaml delete mode 100644 docs/changelog/105180.yaml delete mode 100644 docs/changelog/105192.yaml delete mode 100644 docs/changelog/105196.yaml delete mode 100644 docs/changelog/105207.yaml delete mode 100644 docs/changelog/105221.yaml delete mode 100644 docs/changelog/105223.yaml delete mode 100644 docs/changelog/105232.yaml delete mode 100644 docs/changelog/105249.yaml delete mode 100644 docs/changelog/105259.yaml delete mode 100644 docs/changelog/105265.yaml delete mode 100644 docs/changelog/105269.yaml delete mode 100644 docs/changelog/105272.yaml delete mode 100644 docs/changelog/105273.yaml delete mode 100644 docs/changelog/105289.yaml delete mode 100644 docs/changelog/105299.yaml delete mode 100644 docs/changelog/105325.yaml delete mode 100644 docs/changelog/105334.yaml delete mode 100644 docs/changelog/105346.yaml delete mode 100644 docs/changelog/105371.yaml delete mode 100644 docs/changelog/105373.yaml delete mode 100644 docs/changelog/105391.yaml delete mode 100644 docs/changelog/105403.yaml delete mode 100644 docs/changelog/105427.yaml delete mode 100644 docs/changelog/105428.yaml delete mode 100644 docs/changelog/105429.yaml delete mode 100644 docs/changelog/105440.yaml delete mode 100644 docs/changelog/105442.yaml delete mode 100644 docs/changelog/105458.yaml delete mode 100644 docs/changelog/105468.yaml delete mode 100644 docs/changelog/105476.yaml delete mode 100644 docs/changelog/105486.yaml delete mode 100644 docs/changelog/105499.yaml delete mode 100644 docs/changelog/105546.yaml delete mode 100644 docs/changelog/105578.yaml delete mode 100644 docs/changelog/105588.yaml delete mode 100644 docs/changelog/105593.yaml delete mode 100644 docs/changelog/105633.yaml delete mode 100644 docs/changelog/105650.yaml delete mode 100644 docs/changelog/105691.yaml delete mode 100644 docs/changelog/105770.yaml delete mode 100644 docs/changelog/105772.yaml delete mode 100644 docs/changelog/105789.yaml delete mode 100644 docs/changelog/105848.yaml delete mode 100644 docs/changelog/105941.yaml delete mode 100644 docs/changelog/105945.yaml delete mode 100644 docs/changelog/105987.yaml delete mode 100644 docs/changelog/105994.yaml delete mode 100644 docs/changelog/106020.yaml delete mode 100644 docs/changelog/106057.yaml delete mode 100644 docs/changelog/106060.yaml delete mode 100644 docs/changelog/106062.yaml delete mode 100644 docs/changelog/106105.yaml delete mode 100644 docs/changelog/106156.yaml delete mode 100644 docs/changelog/106288.yaml delete mode 100644 docs/changelog/106329.yaml delete mode 100644 docs/changelog/106351.yaml delete mode 100644 docs/changelog/106392.yaml delete mode 100644 docs/changelog/106398.yaml delete mode 100644 docs/changelog/106544.yaml delete mode 100644 docs/changelog/106574.yaml delete mode 100644 docs/changelog/96235.yaml delete mode 100644 docs/changelog/99142.yaml delete mode 100644 docs/changelog/99747.yaml delete mode 100644 docs/changelog/99961.yaml diff --git a/docs/changelog/100740.yaml b/docs/changelog/100740.yaml deleted file mode 100644 index c93fbf676ef81..0000000000000 --- a/docs/changelog/100740.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100740 -summary: "ESQL: Referencing expressions that contain backticks requires <>." -area: ES|QL -type: enhancement -issues: - - 100312 diff --git a/docs/changelog/100813.yaml b/docs/changelog/100813.yaml deleted file mode 100644 index 476098b62c106..0000000000000 --- a/docs/changelog/100813.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100813 -summary: Make `ParentTaskAssigningClient.getRemoteClusterClient` method also return - `ParentTaskAssigningClient` -area: Infra/Transport API -type: enhancement -issues: [] diff --git a/docs/changelog/101209.yaml b/docs/changelog/101209.yaml deleted file mode 100644 index debec27e61307..0000000000000 --- a/docs/changelog/101209.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101209 -summary: "Making `k` and `num_candidates` optional for knn search" -area: Vector Search -type: enhancement -issues: - - 97533 diff --git a/docs/changelog/101487.yaml b/docs/changelog/101487.yaml deleted file mode 100644 index b4531f7fd6f75..0000000000000 --- a/docs/changelog/101487.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101487 -summary: Wait for async searches to finish when shutting down -area: Infra/Node Lifecycle -type: enhancement -issues: [] diff --git a/docs/changelog/101640.yaml b/docs/changelog/101640.yaml deleted file mode 100644 index 6f61a3a3ffd84..0000000000000 --- a/docs/changelog/101640.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101640 -summary: Support cross clusters query in ESQL -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101656.yaml b/docs/changelog/101656.yaml deleted file mode 100644 index 7cd4f30cae849..0000000000000 --- a/docs/changelog/101656.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101656 -summary: Adjust interception of requests for specific shard IDs -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/101717.yaml b/docs/changelog/101717.yaml deleted file mode 100644 index 7e97ef1049f88..0000000000000 --- a/docs/changelog/101717.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101717 -summary: Pause shard snapshots on graceful shutdown -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/101872.yaml b/docs/changelog/101872.yaml deleted file mode 100644 index 1c63c2d8b009a..0000000000000 --- a/docs/changelog/101872.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101872 -summary: "Add `require_data_stream` parameter to indexing requests to enforce indexing operations target a data stream" -area: Data streams -type: feature -issues: - - 97032 diff --git a/docs/changelog/102078.yaml b/docs/changelog/102078.yaml deleted file mode 100644 index d031aa0dbf6f7..0000000000000 --- a/docs/changelog/102078.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102078 -summary: Derive expected replica size from primary -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/102207.yaml b/docs/changelog/102207.yaml deleted file mode 100644 index 8b247828845f4..0000000000000 --- a/docs/changelog/102207.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102207 -summary: Fix disk computation when initializing unassigned shards in desired balance - computation -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/102371.yaml b/docs/changelog/102371.yaml deleted file mode 100644 index 5a698bc9d671a..0000000000000 --- a/docs/changelog/102371.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102371 -summary: Adding threadpool metrics -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/102428.yaml b/docs/changelog/102428.yaml deleted file mode 100644 index 275492fa6a888..0000000000000 --- a/docs/changelog/102428.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102428 -summary: "ESQL: Add option to drop null fields" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102435.yaml b/docs/changelog/102435.yaml deleted file mode 100644 index e8905b08f1adc..0000000000000 --- a/docs/changelog/102435.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102435 -summary: S3 first byte latency metric -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102557.yaml b/docs/changelog/102557.yaml deleted file mode 100644 index dfca1763064d4..0000000000000 --- a/docs/changelog/102557.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102557 -summary: Metrics for search latencies -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102559.yaml b/docs/changelog/102559.yaml deleted file mode 100644 index ad0867ab087b9..0000000000000 --- a/docs/changelog/102559.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102559 -summary: "Prune unnecessary information from TransportNodesStatsAction.NodeStatsRequest" -area: Network -type: enhancement -issues: [100878] diff --git a/docs/changelog/102584.yaml b/docs/changelog/102584.yaml deleted file mode 100644 index 44ff5dd9f7461..0000000000000 --- a/docs/changelog/102584.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102584 -summary: Expose some ML metrics via APM -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102726.yaml b/docs/changelog/102726.yaml deleted file mode 100644 index bc5b311481123..0000000000000 --- a/docs/changelog/102726.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102726 -summary: Resolve Cluster API -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102759.yaml b/docs/changelog/102759.yaml deleted file mode 100644 index 1c002ef2b678e..0000000000000 --- a/docs/changelog/102759.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102759 -summary: Close rather than stop `HttpServerTransport` on shutdown -area: Infra/Node Lifecycle -type: bug -issues: - - 102501 diff --git a/docs/changelog/102765.yaml b/docs/changelog/102765.yaml deleted file mode 100644 index eb73da2650542..0000000000000 --- a/docs/changelog/102765.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102765 -summary: "Add APM metrics to `HealthPeriodicLogger`" -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/102782.yaml b/docs/changelog/102782.yaml deleted file mode 100644 index ed0a004765859..0000000000000 --- a/docs/changelog/102782.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102782 -summary: Upgrade to Lucene 9.9.0 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/102798.yaml b/docs/changelog/102798.yaml deleted file mode 100644 index 986ad99f96a19..0000000000000 --- a/docs/changelog/102798.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102798 -summary: Hot-reloadable remote cluster credentials -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/102824.yaml b/docs/changelog/102824.yaml deleted file mode 100644 index 21b39a4c3999d..0000000000000 --- a/docs/changelog/102824.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102824 -summary: Change detection aggregation improvements -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102862.yaml b/docs/changelog/102862.yaml deleted file mode 100644 index bb453163009d5..0000000000000 --- a/docs/changelog/102862.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102862 -summary: Add optional pruning configuration (weighted terms scoring) to text expansion query -area: "Machine Learning" -type: enhancement -issues: [] diff --git a/docs/changelog/102879.yaml b/docs/changelog/102879.yaml deleted file mode 100644 index b35d36dd0a3a9..0000000000000 --- a/docs/changelog/102879.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102879 -summary: Fix disk computation when initializing new shards -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/102885.yaml b/docs/changelog/102885.yaml deleted file mode 100644 index 7a998c3eb1f66..0000000000000 --- a/docs/changelog/102885.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102885 -summary: Make field limit more predictable -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/103025.yaml b/docs/changelog/103025.yaml deleted file mode 100644 index 856a7c022d5dd..0000000000000 --- a/docs/changelog/103025.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103025 -summary: "Metrics: Allow `AsyncCounters` to switch providers" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/103032.yaml b/docs/changelog/103032.yaml deleted file mode 100644 index 81d84fca0bdb0..0000000000000 --- a/docs/changelog/103032.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103032 -summary: "x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/103033.yaml b/docs/changelog/103033.yaml deleted file mode 100644 index 30f8e182b9998..0000000000000 --- a/docs/changelog/103033.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103033 -summary: "X-pack/plugin/core: rename `double_metrics` template" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/103035.yaml b/docs/changelog/103035.yaml deleted file mode 100644 index 5b1c9d6629767..0000000000000 --- a/docs/changelog/103035.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103035 -summary: "x-pack/plugin/core: add `match_mapping_type` to `ecs@mappings` dynamic templates" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/103084.yaml b/docs/changelog/103084.yaml deleted file mode 100644 index fb5a718a086de..0000000000000 --- a/docs/changelog/103084.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103084 -summary: Return `matched_queries` in Percolator -area: Percolator -type: enhancement -issues: - - 10163 diff --git a/docs/changelog/103091.yaml b/docs/changelog/103091.yaml deleted file mode 100644 index ae4ac11933d4e..0000000000000 --- a/docs/changelog/103091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103091 -summary: "Metrics: Handle null observations in observers" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/103099.yaml b/docs/changelog/103099.yaml deleted file mode 100644 index c3fd3f9d7b8e4..0000000000000 --- a/docs/changelog/103099.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103099 -summary: "ESQL: Simpify IS NULL/IS NOT NULL evaluation" -area: ES|QL -type: enhancement -issues: - - 103097 diff --git a/docs/changelog/103130.yaml b/docs/changelog/103130.yaml deleted file mode 100644 index 3ef56ae84d123..0000000000000 --- a/docs/changelog/103130.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103130 -summary: Create a DSL health indicator as part of the health API -area: Health -type: feature -issues: [] diff --git a/docs/changelog/103160.yaml b/docs/changelog/103160.yaml deleted file mode 100644 index 7701aa2b4a8d4..0000000000000 --- a/docs/changelog/103160.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103160 -summary: Set thread name used by REST client -area: Java Low Level REST Client -type: enhancement -issues: [] diff --git a/docs/changelog/103171.yaml b/docs/changelog/103171.yaml deleted file mode 100644 index 95ad6a1ea77c2..0000000000000 --- a/docs/changelog/103171.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 103171 -summary: "Add `unmatch_mapping_type`, and support array of types" -area: Mapping -type: feature -issues: - - 102807 - - 102795 diff --git a/docs/changelog/103176.yaml b/docs/changelog/103176.yaml deleted file mode 100644 index a0f46c1462f62..0000000000000 --- a/docs/changelog/103176.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103176 -summary: Validate settings in `ReloadSecureSettings` API -area: Client -type: bug -issues: [] diff --git a/docs/changelog/103178.yaml b/docs/changelog/103178.yaml deleted file mode 100644 index 5da0221a68984..0000000000000 --- a/docs/changelog/103178.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103178 -summary: Expose API key authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/103190.yaml b/docs/changelog/103190.yaml deleted file mode 100644 index 5e6927d3eadd7..0000000000000 --- a/docs/changelog/103190.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103190 -summary: ILM/SLM history policies forcemerge in hot and dsl configuration -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/103223.yaml b/docs/changelog/103223.yaml deleted file mode 100644 index c2f4c1b6a2cf4..0000000000000 --- a/docs/changelog/103223.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 103223 -summary: "[Synonyms] Mark Synonyms as GA" -area: "Search" -type: feature -issues: [] -highlight: - title: "GA Release of Synonyms API" - body: |- - Removes the beta label for the Synonyms API to make it GA. - notable: true diff --git a/docs/changelog/103232.yaml b/docs/changelog/103232.yaml deleted file mode 100644 index b955e7abb7683..0000000000000 --- a/docs/changelog/103232.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103232 -summary: "Remove leniency in msearch parsing" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/103300.yaml b/docs/changelog/103300.yaml deleted file mode 100644 index a536a673b7827..0000000000000 --- a/docs/changelog/103300.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103300 -summary: Retry indefinitely for s3 indices blob read errors -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/103309.yaml b/docs/changelog/103309.yaml deleted file mode 100644 index 94b2a31127870..0000000000000 --- a/docs/changelog/103309.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103309 -summary: Introduce lazy rollover for mapping updates in data streams -area: Data streams -type: enhancement -issues: - - 89346 diff --git a/docs/changelog/103310.yaml b/docs/changelog/103310.yaml deleted file mode 100644 index a7a0746b6b8c4..0000000000000 --- a/docs/changelog/103310.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103310 -summary: Revert "Validate settings in `ReloadSecureSettings` API" -area: Security -type: bug -issues: [] diff --git a/docs/changelog/103316.yaml b/docs/changelog/103316.yaml deleted file mode 100644 index 47eddcc34d924..0000000000000 --- a/docs/changelog/103316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103316 -summary: Review KEEP logic to prevent duplicate column names -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/103325.yaml b/docs/changelog/103325.yaml deleted file mode 100644 index 7de6c41986490..0000000000000 --- a/docs/changelog/103325.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103325 -summary: Added Duplicate Word Check Feature to Analysis Nori -area: Search -type: feature -issues: - - 103321 diff --git a/docs/changelog/103340.yaml b/docs/changelog/103340.yaml deleted file mode 100644 index 21280dbfc857d..0000000000000 --- a/docs/changelog/103340.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103340 -summary: Avoid humongous blocks -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103387.yaml b/docs/changelog/103387.yaml deleted file mode 100644 index 77239fb9a3778..0000000000000 --- a/docs/changelog/103387.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103387 -summary: Upgrade to Lucene 9.9.1 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/103398.yaml b/docs/changelog/103398.yaml deleted file mode 100644 index 69452616ddc99..0000000000000 --- a/docs/changelog/103398.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103398 -summary: ES|QL Async Query API -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103399.yaml b/docs/changelog/103399.yaml deleted file mode 100644 index 440ac90b313f5..0000000000000 --- a/docs/changelog/103399.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103399 -summary: "add validation on _id field when upsert new doc" -area: Search -type: bug -issues: - - 102981 diff --git a/docs/changelog/103434.yaml b/docs/changelog/103434.yaml deleted file mode 100644 index 56af604fe08f7..0000000000000 --- a/docs/changelog/103434.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 103434 -summary: Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. -area: TSDB -type: breaking -issues: [] -breaking: - title: Lower the `look_ahead_time` index setting's max value - area: Index setting - details: "Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours." - impact: "Any value between 2 hours and 7 days will be as a look ahead time of 2 hours is defined" - notable: false diff --git a/docs/changelog/103453.yaml b/docs/changelog/103453.yaml deleted file mode 100644 index 4b7dab77c8b23..0000000000000 --- a/docs/changelog/103453.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103453 -summary: Add expiration time to update api key api -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/103461.yaml b/docs/changelog/103461.yaml deleted file mode 100644 index 3a1bf30aa90c9..0000000000000 --- a/docs/changelog/103461.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103461 -summary: Add support for Well Known Binary (WKB) in the fields API for spatial fields -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/103481.yaml b/docs/changelog/103481.yaml deleted file mode 100644 index f7c7c0b6eecc9..0000000000000 --- a/docs/changelog/103481.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103481 -summary: Redirect failed ingest node operations to a failure store when available -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/103510.yaml b/docs/changelog/103510.yaml deleted file mode 100644 index 50ec8efd5c440..0000000000000 --- a/docs/changelog/103510.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103510 -summary: "ES|QL: better management of exact subfields for TEXT fields" -area: ES|QL -type: bug -issues: - - 99899 diff --git a/docs/changelog/103520.yaml b/docs/changelog/103520.yaml deleted file mode 100644 index 0ef7124eb1ed2..0000000000000 --- a/docs/changelog/103520.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103520 -summary: Request indexing memory pressure in APM node metrics publisher -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/103535.yaml b/docs/changelog/103535.yaml deleted file mode 100644 index 80cf6e1ea709a..0000000000000 --- a/docs/changelog/103535.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103535 -summary: Add replay diagnostic dir to system jvm options -area: Infra/CLI -type: enhancement -issues: [] diff --git a/docs/changelog/103538.yaml b/docs/changelog/103538.yaml deleted file mode 100644 index 5aaed771d5ee4..0000000000000 --- a/docs/changelog/103538.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103538 -summary: "ESQL: Improve pushdown of certain filters" -area: ES|QL -type: bug -issues: - - 103536 diff --git a/docs/changelog/103555.yaml b/docs/changelog/103555.yaml deleted file mode 100644 index 2b0dc2692e252..0000000000000 --- a/docs/changelog/103555.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103555 -summary: "[Security Solution] Allow write permission for `kibana_system` role on endpoint\ - \ response index" -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/103592.yaml b/docs/changelog/103592.yaml deleted file mode 100644 index 21e06f1f5a10d..0000000000000 --- a/docs/changelog/103592.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103592 -summary: Remove deprecated Block APIs -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103610.yaml b/docs/changelog/103610.yaml deleted file mode 100644 index 1ed38cc2822bd..0000000000000 --- a/docs/changelog/103610.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103610 -summary: "ESQL: allow `null` in date math" -area: ES|QL -type: bug -issues: - - 103085 diff --git a/docs/changelog/103627.yaml b/docs/changelog/103627.yaml deleted file mode 100644 index 4b0d9e937542e..0000000000000 --- a/docs/changelog/103627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103627 -summary: Add gradle tasks and code to modify and access mappings between version ids and release versions -area: Infra/Core -type: feature -issues: [] diff --git a/docs/changelog/103628.yaml b/docs/changelog/103628.yaml deleted file mode 100644 index 42259c7bcde46..0000000000000 --- a/docs/changelog/103628.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103628 -summary: Add ES|QL async delete API -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103632.yaml b/docs/changelog/103632.yaml deleted file mode 100644 index 1d83c6528f371..0000000000000 --- a/docs/changelog/103632.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103632 -summary: "ESQL: Check field exists before load from `_source`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103633.yaml b/docs/changelog/103633.yaml deleted file mode 100644 index 9e36451caafd8..0000000000000 --- a/docs/changelog/103633.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103633 -summary: Update s3 latency metric to use micros -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/103643.yaml b/docs/changelog/103643.yaml deleted file mode 100644 index 966fb57acf566..0000000000000 --- a/docs/changelog/103643.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103643 -summary: "[Profiling] Use shard request cache consistently" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/103646.yaml b/docs/changelog/103646.yaml deleted file mode 100644 index b7a6fae025771..0000000000000 --- a/docs/changelog/103646.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103646 -summary: Add index mapping parameter for `counted_keyword` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/103648.yaml b/docs/changelog/103648.yaml deleted file mode 100644 index d4fa489a6812c..0000000000000 --- a/docs/changelog/103648.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103648 -summary: Introduce experimental pass-through field type -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/103651.yaml b/docs/changelog/103651.yaml deleted file mode 100644 index 1106044b31fd2..0000000000000 --- a/docs/changelog/103651.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 103651 -summary: Flag in `_field_caps` to return only fields with values in index -area: Search -type: enhancement -issues: [] -highlight: - title: Flag in `_field_caps` to return only fields with values in index - body: |- - We added support for filtering the field capabilities API output by removing - fields that don't have a value. This can be done through the newly added - `include_empty_fields` parameter, which defaults to true. - notable: true diff --git a/docs/changelog/103656.yaml b/docs/changelog/103656.yaml deleted file mode 100644 index 24bd8814029ff..0000000000000 --- a/docs/changelog/103656.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103656 -summary: "ESQL: add =~ operator (case insensitive equality)" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/103669.yaml b/docs/changelog/103669.yaml deleted file mode 100644 index 57361b9d842e4..0000000000000 --- a/docs/changelog/103669.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103669 -summary: Validate inference model ids -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/103673.yaml b/docs/changelog/103673.yaml deleted file mode 100644 index f786b57eba411..0000000000000 --- a/docs/changelog/103673.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103673 -summary: "ESQL: Infer not null for aggregated fields" -area: ES|QL -type: enhancement -issues: - - 102787 diff --git a/docs/changelog/103681.yaml b/docs/changelog/103681.yaml deleted file mode 100644 index bba73c8e3a7d4..0000000000000 --- a/docs/changelog/103681.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103681 -summary: "ESQL: Expand shallow copy with vecs" -area: ES|QL -type: enhancement -issues: - - 100528 diff --git a/docs/changelog/103682.yaml b/docs/changelog/103682.yaml deleted file mode 100644 index 109e77dd053a5..0000000000000 --- a/docs/changelog/103682.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103682 -summary: Use deduced mappings for determining proper fields' format even if `deduce_mappings==false` -area: Transform -type: bug -issues: - - 103115 diff --git a/docs/changelog/103698.yaml b/docs/changelog/103698.yaml deleted file mode 100644 index d94b70b54e505..0000000000000 --- a/docs/changelog/103698.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103698 -summary: Reading points from source to reduce precision loss -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103710.yaml b/docs/changelog/103710.yaml deleted file mode 100644 index 539b9f553ccc2..0000000000000 --- a/docs/changelog/103710.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103710 -summary: List hidden shard stores by default -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/103720.yaml b/docs/changelog/103720.yaml deleted file mode 100644 index e0ee879988fa7..0000000000000 --- a/docs/changelog/103720.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103720 -summary: Add "step":"ERROR" to ILM explain response for missing policy -area: ILM+SLM -type: enhancement -issues: - - 99030 diff --git a/docs/changelog/103727.yaml b/docs/changelog/103727.yaml deleted file mode 100644 index f943ee7906d58..0000000000000 --- a/docs/changelog/103727.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103727 -summary: "ESQL: Track the rest of `DocVector`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103763.yaml b/docs/changelog/103763.yaml deleted file mode 100644 index e4d6556c77077..0000000000000 --- a/docs/changelog/103763.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103763 -summary: Ref count search response bytes -area: Search -type: enhancement -issues: - - 102657 diff --git a/docs/changelog/103783.yaml b/docs/changelog/103783.yaml deleted file mode 100644 index 47c32dd639310..0000000000000 --- a/docs/changelog/103783.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103783 -summary: "[Profiling] Mark all templates as managed" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/103807.yaml b/docs/changelog/103807.yaml deleted file mode 100644 index 3849edcc00ced..0000000000000 --- a/docs/changelog/103807.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103807 -summary: "ESQL: Add single value checks on LIKE/RLIKE pushdown" -area: ES|QL -type: bug -issues: - - 103806 diff --git a/docs/changelog/103821.yaml b/docs/changelog/103821.yaml deleted file mode 100644 index 3279059acbe3e..0000000000000 --- a/docs/changelog/103821.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103821 -summary: "ESQL: Delay finding field load infrastructure" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103846.yaml b/docs/changelog/103846.yaml deleted file mode 100644 index 0d34efabc0278..0000000000000 --- a/docs/changelog/103846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103846 -summary: Support sampling in `counted_terms` aggregation -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/103898.yaml b/docs/changelog/103898.yaml deleted file mode 100644 index 73d89e49e8812..0000000000000 --- a/docs/changelog/103898.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 103898 -summary: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. -area: TSDB -type: breaking -issues: [] -breaking: - title: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. - area: Index setting - details: Lower the `index.look_ahead_time` index setting's max value from 2 hours to 30 minutes. - impact: > - Documents with @timestamp of 30 minutes or more in the future will be rejected. - Before documents with @timestamp of 2 hours or more in the future were rejected. - If the previous behaviour should be kept, then update the `index.look_ahead_time` setting to two hours before performing the upgrade. - notable: false diff --git a/docs/changelog/103903.yaml b/docs/changelog/103903.yaml deleted file mode 100644 index c2e5e710ac439..0000000000000 --- a/docs/changelog/103903.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103903 -summary: Account for reserved disk size -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/103920.yaml b/docs/changelog/103920.yaml deleted file mode 100644 index c4a0d3b06fc82..0000000000000 --- a/docs/changelog/103920.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103920 -summary: Use search to determine if cluster contains data -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103922.yaml b/docs/changelog/103922.yaml deleted file mode 100644 index 4181a6e6b1e8a..0000000000000 --- a/docs/changelog/103922.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103922 -summary: Always test for spikes and dips as well as changes in the change point aggregation -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/103928.yaml b/docs/changelog/103928.yaml deleted file mode 100644 index a9e60ba33a686..0000000000000 --- a/docs/changelog/103928.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103928 -summary: "ESQL: `MV_FIRST` and `MV_LAST`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103948.yaml b/docs/changelog/103948.yaml deleted file mode 100644 index 3247183fc97bb..0000000000000 --- a/docs/changelog/103948.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103948 -summary: '''elasticsearch-certutil cert'' now verifies the issuing chain of the generated - certificate' -area: TLS -type: enhancement -issues: [] diff --git a/docs/changelog/103949.yaml b/docs/changelog/103949.yaml deleted file mode 100644 index 96bd76d89ceae..0000000000000 --- a/docs/changelog/103949.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103949 -summary: "ESQL: Introduce mode setting for ENRICH" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/103959.yaml b/docs/changelog/103959.yaml deleted file mode 100644 index 4c8b4413b95f8..0000000000000 --- a/docs/changelog/103959.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103959 -summary: Add `ApiKey` expiration time to audit log -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/103973.yaml b/docs/changelog/103973.yaml deleted file mode 100644 index f3bde76c7a559..0000000000000 --- a/docs/changelog/103973.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103973 -summary: Add stricter validation for api key expiration time -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/103996.yaml b/docs/changelog/103996.yaml deleted file mode 100644 index 699b93fff4f03..0000000000000 --- a/docs/changelog/103996.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103996 -summary: Ensure unique IDs between inference models and trained model deployments -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104006.yaml b/docs/changelog/104006.yaml deleted file mode 100644 index d840502cdefbe..0000000000000 --- a/docs/changelog/104006.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104006 -summary: Add support for more than one `inner_hit` when searching nested vectors -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/104030.yaml b/docs/changelog/104030.yaml deleted file mode 100644 index 8fe30e6258653..0000000000000 --- a/docs/changelog/104030.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104030 -summary: Add the possibility to transform WKT to WKB directly -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/104033.yaml b/docs/changelog/104033.yaml deleted file mode 100644 index d3e167665732c..0000000000000 --- a/docs/changelog/104033.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104033 -summary: Add Query Users API -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104043.yaml b/docs/changelog/104043.yaml deleted file mode 100644 index 86032e52fe208..0000000000000 --- a/docs/changelog/104043.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104043 -summary: Expose service account authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104063.yaml b/docs/changelog/104063.yaml deleted file mode 100644 index 5f59022472c75..0000000000000 --- a/docs/changelog/104063.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104063 -summary: Add serverless scopes for Connector APIs -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104077.yaml b/docs/changelog/104077.yaml deleted file mode 100644 index 7550e7388a29d..0000000000000 --- a/docs/changelog/104077.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104077 -summary: Retry updates to model snapshot ID on job config -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104091.yaml b/docs/changelog/104091.yaml deleted file mode 100644 index 42609e42471f8..0000000000000 --- a/docs/changelog/104091.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 104091 -summary: "[ESQL] Remove is_nan, is_finite, and `is_infinite`" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "[ESQL] Remove is_nan, is_finite, and `is_infinite`" - area: REST API - details: Removes the functions `is_nan`, `is_finite`, and `is_infinite`. - impact: Attempting to use the above functions will now be a planner time error. These functions are no longer supported. - notable: false diff --git a/docs/changelog/104092.yaml b/docs/changelog/104092.yaml deleted file mode 100644 index b40637d51765e..0000000000000 --- a/docs/changelog/104092.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104092 -summary: Ingest geoip processor cache 'no results' from the database -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104099.yaml b/docs/changelog/104099.yaml deleted file mode 100644 index b4164896a5923..0000000000000 --- a/docs/changelog/104099.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104099 -summary: Fix `require_alias` implicit true value on presence -area: Indices APIs -type: bug -issues: - - 103945 diff --git a/docs/changelog/104113.yaml b/docs/changelog/104113.yaml deleted file mode 100644 index 3068291606578..0000000000000 --- a/docs/changelog/104113.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104113 -summary: "X-pack/plugin/apm-data: fix `@custom` pipeline support" -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/104118.yaml b/docs/changelog/104118.yaml deleted file mode 100644 index f5afb199bc5eb..0000000000000 --- a/docs/changelog/104118.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104118 -summary: "ESQL: add `date_diff` function" -area: ES|QL -type: enhancement -issues: - - 101942 diff --git a/docs/changelog/104122.yaml b/docs/changelog/104122.yaml deleted file mode 100644 index a88d7499bd44e..0000000000000 --- a/docs/changelog/104122.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104122 -summary: Consider currently refreshing data in the memory usage of refresh -area: Engine -type: bug -issues: [] diff --git a/docs/changelog/104132.yaml b/docs/changelog/104132.yaml deleted file mode 100644 index 87fe94ddcfcea..0000000000000 --- a/docs/changelog/104132.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104132 -summary: Add support for the `simple_query_string` to the Query API Key API -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104142.yaml b/docs/changelog/104142.yaml deleted file mode 100644 index 08bf9ef759090..0000000000000 --- a/docs/changelog/104142.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104142 -summary: Expose token authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104150.yaml b/docs/changelog/104150.yaml deleted file mode 100644 index c910542dcf7f6..0000000000000 --- a/docs/changelog/104150.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104150 -summary: Correct profiled rewrite time for knn with a pre-filter -area: Search -type: bug -issues: [] diff --git a/docs/changelog/104155.yaml b/docs/changelog/104155.yaml deleted file mode 100644 index 04d6a9920310a..0000000000000 --- a/docs/changelog/104155.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104155 -summary: "Updated `missingTrainedModel` message to include: you may need to create\ - \ it" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104182.yaml b/docs/changelog/104182.yaml deleted file mode 100644 index b5cf10f941cc6..0000000000000 --- a/docs/changelog/104182.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104182 -summary: "Apm-data: fix `@custom` component templates" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/104200.yaml b/docs/changelog/104200.yaml deleted file mode 100644 index bc2aa2507f0ec..0000000000000 --- a/docs/changelog/104200.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104200 -summary: Expose realms authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104209.yaml b/docs/changelog/104209.yaml deleted file mode 100644 index fabf06fb99c2e..0000000000000 --- a/docs/changelog/104209.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 104209 -summary: '`DesiredNode:` deprecate `node_version` field and make it optional (unused) - in current parser' -area: Distributed -type: deprecation -issues: [] -deprecation: - title: '`DesiredNode:` deprecate `node_version` field and make it optional for the current version' - area: REST API - details: The desired_node API includes a `node_version` field to perform validation on the new node version required. - This kind of check is too broad, and it's better done by external logic, so it has been removed, making the - `node_version` field not necessary. The field will be removed in a later version. - impact: Users should update their usages of `desired_node` to not include the `node_version` field anymore. diff --git a/docs/changelog/104218.yaml b/docs/changelog/104218.yaml deleted file mode 100644 index b3051008dc47b..0000000000000 --- a/docs/changelog/104218.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104218 -summary: "Support ST_CENTROID over spatial points" -area: "ES|QL" -type: enhancement -issues: - - 104656 diff --git a/docs/changelog/104227.yaml b/docs/changelog/104227.yaml deleted file mode 100644 index 64dcf844f23f2..0000000000000 --- a/docs/changelog/104227.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104227 -summary: Avoid wrapping searchers multiple times in mget -area: CRUD -type: enhancement -issues: - - 85069 diff --git a/docs/changelog/104230.yaml b/docs/changelog/104230.yaml deleted file mode 100644 index 94184f64586f5..0000000000000 --- a/docs/changelog/104230.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104230 -summary: Undeploy elser when inference model deleted -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104265.yaml b/docs/changelog/104265.yaml deleted file mode 100644 index 88c3d72ee81d0..0000000000000 --- a/docs/changelog/104265.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104265 -summary: Remove `hashCode` and `equals` from `OperationModeUpdateTask` -area: ILM+SLM -type: bug -issues: - - 100871 diff --git a/docs/changelog/104269.yaml b/docs/changelog/104269.yaml deleted file mode 100644 index 8d4b0fc5d5198..0000000000000 --- a/docs/changelog/104269.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104269 -summary: "ESQL: Support loading shapes from source into WKB blocks" -area: "ES|QL" -type: enhancement -issues: [] diff --git a/docs/changelog/104309.yaml b/docs/changelog/104309.yaml deleted file mode 100644 index 4467eb6722afc..0000000000000 --- a/docs/changelog/104309.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104309 -summary: "ESQL: Add TO_UPPER and TO_LOWER functions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104320.yaml b/docs/changelog/104320.yaml deleted file mode 100644 index d2b0d09070fb9..0000000000000 --- a/docs/changelog/104320.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104320 -summary: Hot-reloadable LDAP bind password -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104334.yaml b/docs/changelog/104334.yaml deleted file mode 100644 index ff242ee15141b..0000000000000 --- a/docs/changelog/104334.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104334 -summary: Automatically download the ELSER model when PUT in `_inference` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104355.yaml b/docs/changelog/104355.yaml deleted file mode 100644 index 2a100faf3c35f..0000000000000 --- a/docs/changelog/104355.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104355 -summary: Prepare enrich plan to support multi clusters -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104356.yaml b/docs/changelog/104356.yaml deleted file mode 100644 index e0cb2311fbfc9..0000000000000 --- a/docs/changelog/104356.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104356 -summary: "[Profiling] Extract properties faster from source" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104363.yaml b/docs/changelog/104363.yaml deleted file mode 100644 index 9d97991ea7fab..0000000000000 --- a/docs/changelog/104363.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104363 -summary: Apply windowing and chunking to long documents -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104386.yaml b/docs/changelog/104386.yaml deleted file mode 100644 index 41b6a17424bbd..0000000000000 --- a/docs/changelog/104386.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104386 -summary: "X-pack/plugin/apm-data: add dynamic setting for enabling template registry" -area: Data streams -type: enhancement -issues: - - 104385 diff --git a/docs/changelog/104387.yaml b/docs/changelog/104387.yaml deleted file mode 100644 index f10084d8c4b32..0000000000000 --- a/docs/changelog/104387.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104387 -summary: "ESQL: Nested expressions inside stats command" -area: ES|QL -type: enhancement -issues: - - 99828 diff --git a/docs/changelog/104394.yaml b/docs/changelog/104394.yaml deleted file mode 100644 index 39fbfc0c4ea28..0000000000000 --- a/docs/changelog/104394.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104394 -summary: Endpoint to find positions of Grok pattern matches -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104396.yaml b/docs/changelog/104396.yaml deleted file mode 100644 index 586fdc1b22624..0000000000000 --- a/docs/changelog/104396.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104396 -summary: Report current master in `PeerFinder` -area: Cluster Coordination -type: enhancement -issues: [] diff --git a/docs/changelog/104406.yaml b/docs/changelog/104406.yaml deleted file mode 100644 index d26ef664abc07..0000000000000 --- a/docs/changelog/104406.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104406 -summary: Support patch transport version from 8.12 -area: Downsampling -type: enhancement -issues: [] diff --git a/docs/changelog/104407.yaml b/docs/changelog/104407.yaml deleted file mode 100644 index 1ce6b6f97f580..0000000000000 --- a/docs/changelog/104407.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104407 -summary: Set read timeout for fetching IMDSv2 token -area: Discovery-Plugins -type: enhancement -issues: - - 104244 diff --git a/docs/changelog/104408.yaml b/docs/changelog/104408.yaml deleted file mode 100644 index 7303740168ea5..0000000000000 --- a/docs/changelog/104408.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104408 -summary: Move `TransportTermsEnumAction` coordination off transport threads -area: Search -type: bug -issues: [] diff --git a/docs/changelog/104433.yaml b/docs/changelog/104433.yaml deleted file mode 100644 index b3b292923e290..0000000000000 --- a/docs/changelog/104433.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104433 -summary: Added 3 automatic restarts for `pytorch_inference` processes which stop unexpectedly -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104440.yaml b/docs/changelog/104440.yaml deleted file mode 100644 index 4242b7786f05f..0000000000000 --- a/docs/changelog/104440.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104440 -summary: Fix write index resolution when an alias is pointing to a TSDS -area: Data streams -type: bug -issues: - - 104189 diff --git a/docs/changelog/104460.yaml b/docs/changelog/104460.yaml deleted file mode 100644 index c92acdd5cb8ad..0000000000000 --- a/docs/changelog/104460.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104460 -summary: Dyamically adjust node metrics cache expire -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/104483.yaml b/docs/changelog/104483.yaml deleted file mode 100644 index 99917b4e8e017..0000000000000 --- a/docs/changelog/104483.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104483 -summary: Make `task_type` optional in `_inference` APIs -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104500.yaml b/docs/changelog/104500.yaml deleted file mode 100644 index 61c45c6dde3cb..0000000000000 --- a/docs/changelog/104500.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104500 -summary: Thread pool metrics -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/104505.yaml b/docs/changelog/104505.yaml deleted file mode 100644 index 4d0c482a88d85..0000000000000 --- a/docs/changelog/104505.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104505 -summary: "Revert \"x-pack/plugin/apm-data: download geoip DB on pipeline creation\"" -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/104529.yaml b/docs/changelog/104529.yaml deleted file mode 100644 index 5b223a0924d86..0000000000000 --- a/docs/changelog/104529.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104529 -summary: Add rest spec for Query User API -area: Client -type: enhancement -issues: [] diff --git a/docs/changelog/104553.yaml b/docs/changelog/104553.yaml deleted file mode 100644 index e1f5c974bd74e..0000000000000 --- a/docs/changelog/104553.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104553 -summary: "ESQL: Fix a bug loading unindexed text fields" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104559.yaml b/docs/changelog/104559.yaml deleted file mode 100644 index d6d030783c4cc..0000000000000 --- a/docs/changelog/104559.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104559 -summary: Adding support for Cohere inference service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104573.yaml b/docs/changelog/104573.yaml deleted file mode 100644 index a333bc3024772..0000000000000 --- a/docs/changelog/104573.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104573 -summary: Fix logger Strings.format calls -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/104574.yaml b/docs/changelog/104574.yaml deleted file mode 100644 index 68be002142fd9..0000000000000 --- a/docs/changelog/104574.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 104574 -summary: Deprecate `client.type` -area: Infra/Core -type: deprecation -issues: [] -deprecation: - title: Deprecate `client.type` - area: Cluster and node setting - details: The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now deprecated and will be removed in a future release. - impact: Remove the `client.type` setting from `elasticsearch.yml` diff --git a/docs/changelog/104575.yaml b/docs/changelog/104575.yaml deleted file mode 100644 index ba17b705fca10..0000000000000 --- a/docs/changelog/104575.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104575 -summary: Introduce Alias.unwrap method -area: "Query Languages" -type: enhancement -issues: [] diff --git a/docs/changelog/104581.yaml b/docs/changelog/104581.yaml deleted file mode 100644 index 5f9b71acbfed7..0000000000000 --- a/docs/changelog/104581.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104581 -summary: Fix bogus assertion tripped by force-executed tasks -area: Infra/Core -type: bug -issues: - - 104580 diff --git a/docs/changelog/104594.yaml b/docs/changelog/104594.yaml deleted file mode 100644 index 7729eb028f68e..0000000000000 --- a/docs/changelog/104594.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104594 -summary: Support of `match` for the Query API Key API -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104614.yaml b/docs/changelog/104614.yaml deleted file mode 100644 index 9b2c25a643825..0000000000000 --- a/docs/changelog/104614.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104614 -summary: Extend `repository_integrity` health indicator for unknown and invalid repos -area: Health -type: enhancement -issues: - - 103784 diff --git a/docs/changelog/104625.yaml b/docs/changelog/104625.yaml deleted file mode 100644 index 28951936107fb..0000000000000 --- a/docs/changelog/104625.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104625 -summary: "Add support for the `type` parameter, for sorting, to the Query API Key\ - \ API" -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104636.yaml b/docs/changelog/104636.yaml deleted file mode 100644 index d74682f2eba18..0000000000000 --- a/docs/changelog/104636.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104636 -summary: Modifying request builders -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104643.yaml b/docs/changelog/104643.yaml deleted file mode 100644 index 5a09cd081b376..0000000000000 --- a/docs/changelog/104643.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104643 -summary: "[Connectors API] Implement update service type action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104648.yaml b/docs/changelog/104648.yaml deleted file mode 100644 index e8bb5fea392ac..0000000000000 --- a/docs/changelog/104648.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104648 -summary: "[Connector API] Implement update `index_name` action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104654.yaml b/docs/changelog/104654.yaml deleted file mode 100644 index 1d007ad39a854..0000000000000 --- a/docs/changelog/104654.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104654 -summary: "[Connectors API] Implement update native action endpoint" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104665.yaml b/docs/changelog/104665.yaml deleted file mode 100644 index a7043cbdc9dda..0000000000000 --- a/docs/changelog/104665.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104665 -summary: Restrict usage of certain aggregations when in sort order execution is required -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/104666.yaml b/docs/changelog/104666.yaml deleted file mode 100644 index 5009052bd5b0a..0000000000000 --- a/docs/changelog/104666.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104666 -summary: Require the name field for `inner_hits` for collapse -area: Search -type: bug -issues: [] diff --git a/docs/changelog/104674.yaml b/docs/changelog/104674.yaml deleted file mode 100644 index 12951488f89ce..0000000000000 --- a/docs/changelog/104674.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104674 -summary: "[Profiling] Speed up processing of stacktraces" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104718.yaml b/docs/changelog/104718.yaml deleted file mode 100644 index ffe889bb28a3e..0000000000000 --- a/docs/changelog/104718.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104718 -summary: "ESQL: Fix replacement of nested expressions in aggs with multiple parameters" -area: ES|QL -type: bug -issues: - - 104706 diff --git a/docs/changelog/104721.yaml b/docs/changelog/104721.yaml deleted file mode 100644 index 3bfe8a21646c8..0000000000000 --- a/docs/changelog/104721.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104721 -summary: Add default rollover conditions to ILM explain API response -area: ILM+SLM -type: enhancement -issues: - - 103395 diff --git a/docs/changelog/104730.yaml b/docs/changelog/104730.yaml deleted file mode 100644 index fe5e2e157a004..0000000000000 --- a/docs/changelog/104730.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104730 -summary: "[Profiling] Support downsampling of generic events" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104750.yaml b/docs/changelog/104750.yaml deleted file mode 100644 index 948b19a5eaaa6..0000000000000 --- a/docs/changelog/104750.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104750 -summary: "[Connectors API] Implement connector status update action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104753.yaml b/docs/changelog/104753.yaml deleted file mode 100644 index f95fd3da44084..0000000000000 --- a/docs/changelog/104753.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104753 -summary: Upgrade to Lucene 9.9.2 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/104778.yaml b/docs/changelog/104778.yaml deleted file mode 100644 index 7dae338efc09c..0000000000000 --- a/docs/changelog/104778.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104778 -summary: Adding a `RequestBuilder` interface -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104784.yaml b/docs/changelog/104784.yaml deleted file mode 100644 index 3d60222c2aa19..0000000000000 --- a/docs/changelog/104784.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104784 -summary: "Fix blob cache race, decay, time dependency" -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/104787.yaml b/docs/changelog/104787.yaml deleted file mode 100644 index 9c4ce688ce6ad..0000000000000 --- a/docs/changelog/104787.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104787 -summary: Add troubleshooting docs link to `PeerFinder` logs -area: Cluster Coordination -type: enhancement -issues: [] diff --git a/docs/changelog/104796.yaml b/docs/changelog/104796.yaml deleted file mode 100644 index a683f9ce22d49..0000000000000 --- a/docs/changelog/104796.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104796 -summary: "ESQL: Pre-allocate rows in TopNOperator" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104840.yaml b/docs/changelog/104840.yaml deleted file mode 100644 index 5b7d83a966dbc..0000000000000 --- a/docs/changelog/104840.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104840 -summary: Support enrich ANY mode in cross clusters query -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104859.yaml b/docs/changelog/104859.yaml deleted file mode 100644 index 55e5758e31ae2..0000000000000 --- a/docs/changelog/104859.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104859 -summary: ES - document observing with rejections -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/104872.yaml b/docs/changelog/104872.yaml deleted file mode 100644 index ad70946be02ae..0000000000000 --- a/docs/changelog/104872.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104872 -summary: Add new int8_flat and flat vector index types -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/104878.yaml b/docs/changelog/104878.yaml deleted file mode 100644 index 2ae6d5c0c1da3..0000000000000 --- a/docs/changelog/104878.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104878 -summary: "Transforms: Adding basic stats API param" -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/104893.yaml b/docs/changelog/104893.yaml deleted file mode 100644 index e4685e160f8f8..0000000000000 --- a/docs/changelog/104893.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104893 -summary: Release resources in `BestBucketsDeferringCollector` earlier -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/104895.yaml b/docs/changelog/104895.yaml deleted file mode 100644 index 020dcff891f03..0000000000000 --- a/docs/changelog/104895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104895 -summary: Aggs support for Query API Key Information API -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104905.yaml b/docs/changelog/104905.yaml deleted file mode 100644 index 80e06dc3b0cf5..0000000000000 --- a/docs/changelog/104905.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104905 -summary: "Execute lazy rollover with an internal dedicated user #104732" -area: Data streams -type: bug -issues: - - 104732 diff --git a/docs/changelog/104909.yaml b/docs/changelog/104909.yaml deleted file mode 100644 index 6d250c22a745a..0000000000000 --- a/docs/changelog/104909.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104909 -summary: "[Connectors API] Relax strict response parsing for get/list operations" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104911.yaml b/docs/changelog/104911.yaml deleted file mode 100644 index 17a335337e345..0000000000000 --- a/docs/changelog/104911.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 104911 -summary: "ES|QL: Improve type validation in aggs for UNSIGNED_LONG better support\ - \ for VERSION" -area: ES|QL -type: bug -issues: - - 102961 diff --git a/docs/changelog/104927.yaml b/docs/changelog/104927.yaml deleted file mode 100644 index e0e098ba10b7b..0000000000000 --- a/docs/changelog/104927.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104927 -summary: Adding `ActionRequestLazyBuilder` implementation of `RequestBuilder` -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104936.yaml b/docs/changelog/104936.yaml deleted file mode 100644 index cfa170f550681..0000000000000 --- a/docs/changelog/104936.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104936 -summary: Support enrich coordinator mode -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104943.yaml b/docs/changelog/104943.yaml deleted file mode 100644 index 094ce66c4f994..0000000000000 --- a/docs/changelog/104943.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104943 -summary: Fix server cli to always pass through exit code -area: Infra/CLI -type: bug -issues: [] diff --git a/docs/changelog/104949.yaml b/docs/changelog/104949.yaml deleted file mode 100644 index c2682fc911f1d..0000000000000 --- a/docs/changelog/104949.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104949 -summary: Add text_embedding inference service with multilingual-e5 and custom eland models -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/104958.yaml b/docs/changelog/104958.yaml deleted file mode 100644 index 936342db03b45..0000000000000 --- a/docs/changelog/104958.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104958 -summary: "ESQL: Extend STATS command to support aggregate expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104982.yaml b/docs/changelog/104982.yaml deleted file mode 100644 index 62194aa68b80c..0000000000000 --- a/docs/changelog/104982.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104982 -summary: "[Connectors API] Add new field `api_key_secret_id` to Connector" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104993.yaml b/docs/changelog/104993.yaml deleted file mode 100644 index df9875563d5a1..0000000000000 --- a/docs/changelog/104993.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104993 -summary: Support enrich remote mode -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104996.yaml b/docs/changelog/104996.yaml deleted file mode 100644 index b94711111adfe..0000000000000 --- a/docs/changelog/104996.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104996 -summary: "Enhancement: Metrics for Search Took Times using Action Listeners" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105015.yaml b/docs/changelog/105015.yaml deleted file mode 100644 index 94ffc2b0e58d5..0000000000000 --- a/docs/changelog/105015.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105015 -summary: Modify name of threadpool metric for rejected -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/105044.yaml b/docs/changelog/105044.yaml deleted file mode 100644 index 5a9a11f928f98..0000000000000 --- a/docs/changelog/105044.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105044 -summary: Expose `OperationPurpose` via `CustomQueryParameter` to s3 logs -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/105055.yaml b/docs/changelog/105055.yaml deleted file mode 100644 index 0db70a6b9e558..0000000000000 --- a/docs/changelog/105055.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105055 -summary: "Do not enable APM agent 'instrument', it's not required for manual tracing" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/105062.yaml b/docs/changelog/105062.yaml deleted file mode 100644 index 928786f62381a..0000000000000 --- a/docs/changelog/105062.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105062 -summary: Nest pass-through objects within objects -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/105064.yaml b/docs/changelog/105064.yaml deleted file mode 100644 index 81c62b3148f1c..0000000000000 --- a/docs/changelog/105064.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 105064 -summary: "ES|QL: remove PROJECT keyword from the grammar" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "ES|QL: remove PROJECT keyword from the grammar" - area: REST API - details: "Removes the PROJECT keyword (an alias for KEEP) from ES|QL grammar" - impact: "Before this change, users could use PROJECT as an alias for KEEP in ESQL queries,\ - \ (eg. 'FROM idx | PROJECT name, surname')\ - \ the parser replaced PROJECT with KEEP, emitted a warning:\ - \ 'PROJECT command is no longer supported, please use KEEP instead'\ - \ and the query was executed normally.\ - \ With this change, PROJECT command is no longer recognized by the query parser;\ - \ queries using PROJECT command now return a parsing exception." - notable: false diff --git a/docs/changelog/105081.yaml b/docs/changelog/105081.yaml deleted file mode 100644 index efa686bd7b4a4..0000000000000 --- a/docs/changelog/105081.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105081 -summary: For empty mappings use a `LocalRelation` -area: ES|QL -type: bug -issues: - - 104809 diff --git a/docs/changelog/105088.yaml b/docs/changelog/105088.yaml deleted file mode 100644 index 8b5d1fa7f9e02..0000000000000 --- a/docs/changelog/105088.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105088 -summary: "ESQL: Speed up reading many nulls" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105089.yaml b/docs/changelog/105089.yaml deleted file mode 100644 index 6f43c58af8a41..0000000000000 --- a/docs/changelog/105089.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105089 -summary: Return results in order -area: Transform -type: bug -issues: - - 104847 diff --git a/docs/changelog/105103.yaml b/docs/changelog/105103.yaml deleted file mode 100644 index 599d2e3666e4b..0000000000000 --- a/docs/changelog/105103.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105103 -summary: Do not record s3 http request time when it is not available -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/105105.yaml b/docs/changelog/105105.yaml deleted file mode 100644 index 848a9637d1388..0000000000000 --- a/docs/changelog/105105.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105105 -summary: Add s3 `HeadObject` request to request stats -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/105131.yaml b/docs/changelog/105131.yaml deleted file mode 100644 index 36993527da583..0000000000000 --- a/docs/changelog/105131.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105131 -summary: "[Connector API] Support filtering by name, index name in list action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105150.yaml b/docs/changelog/105150.yaml deleted file mode 100644 index d9fc3d337f952..0000000000000 --- a/docs/changelog/105150.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105150 -summary: Remove `SearchException` usages without a proper status code -area: Search -type: bug -issues: [] diff --git a/docs/changelog/105163.yaml b/docs/changelog/105163.yaml deleted file mode 100644 index f28bf4de14792..0000000000000 --- a/docs/changelog/105163.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105163 -summary: Add stable `ThreadPool` constructor to `LogstashInternalBridge` -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/105164.yaml b/docs/changelog/105164.yaml deleted file mode 100644 index 7affb0911bc6d..0000000000000 --- a/docs/changelog/105164.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105164 -summary: Remove duplicate checkpoint audits -area: Transform -type: bug -issues: - - 105106 diff --git a/docs/changelog/105178.yaml b/docs/changelog/105178.yaml deleted file mode 100644 index e8fc9cfd6898f..0000000000000 --- a/docs/changelog/105178.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105178 -summary: "[Connector API] Support filtering connectors by service type and a query" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105180.yaml b/docs/changelog/105180.yaml deleted file mode 100644 index ac7ed20f151b7..0000000000000 --- a/docs/changelog/105180.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105180 -summary: Use new `ignore_dynamic_beyond_limit` in logs and metric data streams -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/105192.yaml b/docs/changelog/105192.yaml deleted file mode 100644 index b15d58ef40fe7..0000000000000 --- a/docs/changelog/105192.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105192 -summary: Allow transforms to use PIT with remote clusters again -area: Transform -type: enhancement -issues: - - 104518 diff --git a/docs/changelog/105196.yaml b/docs/changelog/105196.yaml deleted file mode 100644 index 8fe7b50cfa989..0000000000000 --- a/docs/changelog/105196.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105196 -summary: Adding a custom exception for problems with the graph of pipelines to be - applied to a document -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/105207.yaml b/docs/changelog/105207.yaml deleted file mode 100644 index 00d227248abfb..0000000000000 --- a/docs/changelog/105207.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105207 -summary: Introduce an `AggregatorReducer` to reduce the footprint of aggregations - in the coordinating node -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/105221.yaml b/docs/changelog/105221.yaml deleted file mode 100644 index 2ef64ef110d95..0000000000000 --- a/docs/changelog/105221.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 105221 -summary: "ESQL: Grammar - FROM METADATA no longer requires []" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "ESQL: Grammar - FROM METADATA no longer requires []" - area: REST API - details: "Remove [ ] for METADATA option inside FROM command statements" - impact: "Previously to return metadata fields, one had to use square brackets:\ - \ (eg. 'FROM index [METADATA _index]').\ - \ This is no longer needed: the [ ] are dropped and do not have to be specified,\ - \ thus simplifying the command above to:'FROM index METADATA _index'." - notable: false diff --git a/docs/changelog/105223.yaml b/docs/changelog/105223.yaml deleted file mode 100644 index e2a95fcd6ba48..0000000000000 --- a/docs/changelog/105223.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105223 -summary: "x-pack/plugin/apm-data: Add a new field transaction.profiler_stack_trace_ids to traces-apm@mappings.yaml" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/105232.yaml b/docs/changelog/105232.yaml deleted file mode 100644 index a2ad7ad9451e9..0000000000000 --- a/docs/changelog/105232.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105232 -summary: Execute SAML authentication on the generic threadpool -area: Authentication -type: bug -issues: - - 104962 diff --git a/docs/changelog/105249.yaml b/docs/changelog/105249.yaml deleted file mode 100644 index 979253e452008..0000000000000 --- a/docs/changelog/105249.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105249 -summary: "[Connector API] Support updating configuration values only" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105259.yaml b/docs/changelog/105259.yaml deleted file mode 100644 index a360bc8bc1672..0000000000000 --- a/docs/changelog/105259.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105259 -summary: Lower G1 minimum full GC interval -area: Infra/Circuit Breakers -type: enhancement -issues: [] diff --git a/docs/changelog/105265.yaml b/docs/changelog/105265.yaml deleted file mode 100644 index 70231dbfabc52..0000000000000 --- a/docs/changelog/105265.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105265 -summary: Improving the performance of the ingest simulate verbose API -area: "Ingest Node" -type: enhancement -issues: [] diff --git a/docs/changelog/105269.yaml b/docs/changelog/105269.yaml deleted file mode 100644 index acf05b05ecfc4..0000000000000 --- a/docs/changelog/105269.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105269 -summary: Reserve bytes before serializing page -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105272.yaml b/docs/changelog/105272.yaml deleted file mode 100644 index 1032a17fc10f8..0000000000000 --- a/docs/changelog/105272.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105272 -summary: "Stop the periodic health logger when es is stopping" -area: Health -type: bug -issues: [] diff --git a/docs/changelog/105273.yaml b/docs/changelog/105273.yaml deleted file mode 100644 index 83db9eac2a14a..0000000000000 --- a/docs/changelog/105273.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105273 -summary: "x-pack/plugin/core: make automatic rollovers lazy" -area: Data streams -type: enhancement -issues: - - 104083 diff --git a/docs/changelog/105289.yaml b/docs/changelog/105289.yaml deleted file mode 100644 index a51778a93beb8..0000000000000 --- a/docs/changelog/105289.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105289 -summary: "[Connector API] Change required privileges to indices:data/read(write)" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105299.yaml b/docs/changelog/105299.yaml deleted file mode 100644 index b1f9b3ac4a2aa..0000000000000 --- a/docs/changelog/105299.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105299 -summary: Conditionally send the dimensions field as part of the openai requests -area: Machine Learning -type: enhancement -issues: - - 105005 diff --git a/docs/changelog/105325.yaml b/docs/changelog/105325.yaml deleted file mode 100644 index ab3724efca30f..0000000000000 --- a/docs/changelog/105325.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105325 -summary: "ESQL: Fix Analyzer to not interpret escaped * as a pattern" -area: ES|QL -type: bug -issues: - - 104955 diff --git a/docs/changelog/105334.yaml b/docs/changelog/105334.yaml deleted file mode 100644 index 498fdf4113b3c..0000000000000 --- a/docs/changelog/105334.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105334 -summary: Upgrade ANTLR4 to 4.13.1 -area: Query Languages -type: upgrade -issues: - - 102953 diff --git a/docs/changelog/105346.yaml b/docs/changelog/105346.yaml deleted file mode 100644 index 7c6eab93f6c10..0000000000000 --- a/docs/changelog/105346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105346 -summary: Allow GET inference models by user a with read only permission -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105371.yaml b/docs/changelog/105371.yaml deleted file mode 100644 index 500c64b677a10..0000000000000 --- a/docs/changelog/105371.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105371 -summary: "ESQL: Add plan consistency verification after each optimizer" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105373.yaml b/docs/changelog/105373.yaml deleted file mode 100644 index f9d3c718f7ae3..0000000000000 --- a/docs/changelog/105373.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105373 -summary: "Fix parsing of flattened fields within subobjects: false" -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/105391.yaml b/docs/changelog/105391.yaml deleted file mode 100644 index 6b9b39c00a150..0000000000000 --- a/docs/changelog/105391.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105391 -summary: Catch all the potential exceptions in the ingest processor code -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105403.yaml b/docs/changelog/105403.yaml deleted file mode 100644 index f855c0e8ed94f..0000000000000 --- a/docs/changelog/105403.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105403 -summary: "ESQL: make `cidr_match` foldable" -area: ES|QL -type: bug -issues: - - 105376 diff --git a/docs/changelog/105427.yaml b/docs/changelog/105427.yaml deleted file mode 100644 index e73853b9dce92..0000000000000 --- a/docs/changelog/105427.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105427 -summary: Adding `executedPipelines` to the `IngestDocument` copy constructor -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/105428.yaml b/docs/changelog/105428.yaml deleted file mode 100644 index 49a80150b4303..0000000000000 --- a/docs/changelog/105428.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105428 -summary: Limiting the number of nested pipelines that can be executed -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/105429.yaml b/docs/changelog/105429.yaml deleted file mode 100644 index 706375649b7ca..0000000000000 --- a/docs/changelog/105429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105429 -summary: Changed system auditor to use levels -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105440.yaml b/docs/changelog/105440.yaml deleted file mode 100644 index 8aacac3e641bf..0000000000000 --- a/docs/changelog/105440.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105440 -summary: Avoid false-positive matches on intermediate objects in `ecs@mappings` -area: Data streams -type: bug -issues: - - 102794 diff --git a/docs/changelog/105442.yaml b/docs/changelog/105442.yaml deleted file mode 100644 index b0af1b634d984..0000000000000 --- a/docs/changelog/105442.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105442 -summary: Handling exceptions on watcher reload -area: Watcher -type: bug -issues: - - 69842 diff --git a/docs/changelog/105458.yaml b/docs/changelog/105458.yaml deleted file mode 100644 index 2bab415884975..0000000000000 --- a/docs/changelog/105458.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105458 -summary: The OpenAI model parameter should be in service settings not task settings. Move the configuration field to service settings -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105468.yaml b/docs/changelog/105468.yaml deleted file mode 100644 index 0de36a71862a4..0000000000000 --- a/docs/changelog/105468.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105468 -summary: Include better output in profiling & `toString` for automaton based queries -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105476.yaml b/docs/changelog/105476.yaml deleted file mode 100644 index 6520df78520e7..0000000000000 --- a/docs/changelog/105476.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105476 -summary: "ESQL: Fix bug in grammar that allowed spaces inside id pattern" -area: ES|QL -type: bug -issues: - - 105441 diff --git a/docs/changelog/105486.yaml b/docs/changelog/105486.yaml deleted file mode 100644 index befdaec2301c6..0000000000000 --- a/docs/changelog/105486.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105486 -summary: Fix use-after-free at event-loop shutdown -area: Network -type: bug -issues: [] diff --git a/docs/changelog/105499.yaml b/docs/changelog/105499.yaml deleted file mode 100644 index bfc297411efa7..0000000000000 --- a/docs/changelog/105499.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105499 -summary: Fix a bug where destination index aliases are not set up for an unattended transform -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/105546.yaml b/docs/changelog/105546.yaml deleted file mode 100644 index 0b54e124f2495..0000000000000 --- a/docs/changelog/105546.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105546 -summary: '`GlobalOrdCardinalityAggregator` should use `HyperLogLogPlusPlus` instead - of `HyperLogLogPlusPlusSparse`' -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/105578.yaml b/docs/changelog/105578.yaml deleted file mode 100644 index 1ffa0128c1d0a..0000000000000 --- a/docs/changelog/105578.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 105578 -summary: Upgrade to Lucene 9.10.0 -area: Search -type: enhancement -issues: [] -highlight: - title: New Lucene 9.10 release - body: |- - - https://github.com/apache/lucene/pull/13090: Prevent humongous allocations in ScalarQuantizer when building quantiles. - - https://github.com/apache/lucene/pull/12962: Speedup concurrent multi-segment HNSW graph search - - https://github.com/apache/lucene/pull/13033: Range queries on numeric/date/ip fields now exit earlier on segments whose values don't intersect with the query range. This should especially help when there are other required clauses in the `bool` query and when the range filter is narrow, e.g. filtering on the last 5 minutes. - - https://github.com/apache/lucene/pull/13026: `bool` queries that mix `filter` and `should` clauses will now propagate minimum competitive scores through the `should` clauses. This should yield speedups when sorting by descending score. - notable: true diff --git a/docs/changelog/105588.yaml b/docs/changelog/105588.yaml deleted file mode 100644 index e43ff8cd75c60..0000000000000 --- a/docs/changelog/105588.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105588 -summary: '`URLRepository` should not block shutdown' -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/105593.yaml b/docs/changelog/105593.yaml deleted file mode 100644 index 4eef0d9404f42..0000000000000 --- a/docs/changelog/105593.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105593 -summary: "ESQL: push down \"[text_field] is not null\"" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105633.yaml b/docs/changelog/105633.yaml deleted file mode 100644 index b19ec67f4602a..0000000000000 --- a/docs/changelog/105633.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105633 -summary: "[Connector API] Bugfix: support list type in filtering advenced snippet\ - \ value" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/105650.yaml b/docs/changelog/105650.yaml deleted file mode 100644 index f43da5b315f4c..0000000000000 --- a/docs/changelog/105650.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105650 -summary: "ESQL: Fix wrong attribute shadowing in pushdown rules" -area: ES|QL -type: bug -issues: - - 105434 diff --git a/docs/changelog/105691.yaml b/docs/changelog/105691.yaml deleted file mode 100644 index 89797782b06ee..0000000000000 --- a/docs/changelog/105691.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105691 -summary: "ES|QL: Disable optimizations that rely on Expression.nullable()" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/105770.yaml b/docs/changelog/105770.yaml deleted file mode 100644 index ec8ae4f380e2f..0000000000000 --- a/docs/changelog/105770.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105770 -summary: Field-caps field has value lookup use map instead of looping array -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105772.yaml b/docs/changelog/105772.yaml deleted file mode 100644 index 73680aa04e5ab..0000000000000 --- a/docs/changelog/105772.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105772 -summary: "[ILM] Delete step deletes data stream with only one index" -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/105789.yaml b/docs/changelog/105789.yaml deleted file mode 100644 index 02a6936fa3294..0000000000000 --- a/docs/changelog/105789.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105789 -summary: Make Health API more resilient to multi-version clusters -area: Health -type: bug -issues: - - 90183 diff --git a/docs/changelog/105848.yaml b/docs/changelog/105848.yaml deleted file mode 100644 index 18291066177f6..0000000000000 --- a/docs/changelog/105848.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105848 -summary: '`ProjectOperator` should not retain references to released blocks' -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/105941.yaml b/docs/changelog/105941.yaml deleted file mode 100644 index 8e2eea1657208..0000000000000 --- a/docs/changelog/105941.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105941 -summary: Field caps performance pt2 -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105945.yaml b/docs/changelog/105945.yaml deleted file mode 100644 index ec76faf6ef76f..0000000000000 --- a/docs/changelog/105945.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105945 -summary: "[Connector API] Fix default ordering in `SyncJob` list endpoint" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/105987.yaml b/docs/changelog/105987.yaml deleted file mode 100644 index d09a6907c72bf..0000000000000 --- a/docs/changelog/105987.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105987 -summary: Fix `categorize_text` aggregation nested under empty buckets -area: Machine Learning -type: bug -issues: - - 105836 diff --git a/docs/changelog/105994.yaml b/docs/changelog/105994.yaml deleted file mode 100644 index ef9889d0a47af..0000000000000 --- a/docs/changelog/105994.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105994 -summary: Fix bug when nested knn pre-filter might match nested docs -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/106020.yaml b/docs/changelog/106020.yaml deleted file mode 100644 index 094a43b430f89..0000000000000 --- a/docs/changelog/106020.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106020 -summary: Fix resetting a job if the original reset task no longer exists. -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/106057.yaml b/docs/changelog/106057.yaml deleted file mode 100644 index c07f658fbbf8a..0000000000000 --- a/docs/changelog/106057.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106057 -summary: Avoid computing `currentInferenceProcessors` on every cluster state -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/106060.yaml b/docs/changelog/106060.yaml deleted file mode 100644 index 2b6a47372ddd3..0000000000000 --- a/docs/changelog/106060.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106060 -summary: "[Connector API] Fix serialisation of script params in connector index service" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/106062.yaml b/docs/changelog/106062.yaml deleted file mode 100644 index f4ff3df4045e6..0000000000000 --- a/docs/changelog/106062.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106062 -summary: "During ML maintenance, reset jobs in the reset state without a corresponding\ - \ task" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/106105.yaml b/docs/changelog/106105.yaml deleted file mode 100644 index 09f80e9e71e6d..0000000000000 --- a/docs/changelog/106105.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106105 -summary: Respect --pass option in certutil csr mode -area: TLS -type: bug -issues: [] diff --git a/docs/changelog/106156.yaml b/docs/changelog/106156.yaml deleted file mode 100644 index 63232efe6e5fb..0000000000000 --- a/docs/changelog/106156.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106156 -summary: Disable parallel collection for terms aggregation with `min_doc_count` equals - to 0 -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/106288.yaml b/docs/changelog/106288.yaml deleted file mode 100644 index 0f14e53c237a1..0000000000000 --- a/docs/changelog/106288.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106288 -summary: Small time series agg improvement -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/106329.yaml b/docs/changelog/106329.yaml deleted file mode 100644 index 78e811e7987b6..0000000000000 --- a/docs/changelog/106329.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106329 -summary: Fix Search Applications bug where deleting an alias before deleting an application intermittently caused errors -area: Application -type: bug -issues: [] diff --git a/docs/changelog/106351.yaml b/docs/changelog/106351.yaml deleted file mode 100644 index 45868acc3a284..0000000000000 --- a/docs/changelog/106351.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106351 -summary: "Fix error on sorting unsortable `geo_point` and `cartesian_point`" -area: ES|QL -type: bug -issues: - - 106007 diff --git a/docs/changelog/106392.yaml b/docs/changelog/106392.yaml deleted file mode 100644 index ff1a0284ee5db..0000000000000 --- a/docs/changelog/106392.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106392 -summary: Resume driver when failing to fetch pages -area: ES|QL -type: bug -issues: - - 106262 diff --git a/docs/changelog/106398.yaml b/docs/changelog/106398.yaml deleted file mode 100644 index cffc5ceeb214d..0000000000000 --- a/docs/changelog/106398.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106398 -summary: Release `TranslogSnapshot` buffer after iteration -area: Engine -type: bug -issues: - - 106390 diff --git a/docs/changelog/106544.yaml b/docs/changelog/106544.yaml deleted file mode 100644 index 6557ba478126d..0000000000000 --- a/docs/changelog/106544.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106544 -summary: Force execution of `SearchService.Reaper` -area: Search -type: bug -issues: - - 106543 diff --git a/docs/changelog/106574.yaml b/docs/changelog/106574.yaml deleted file mode 100644 index 8063450bc0db1..0000000000000 --- a/docs/changelog/106574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106574 -summary: Fix `_reset` API when called with `force=true` on a failed transform -area: Transform -type: bug -issues: - - 106573 diff --git a/docs/changelog/96235.yaml b/docs/changelog/96235.yaml deleted file mode 100644 index 83d1eaf74916b..0000000000000 --- a/docs/changelog/96235.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96235 -summary: Add `index.mapping.total_fields.ignore_dynamic_beyond_limit` setting to ignore dynamic fields when field limit is reached -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/99142.yaml b/docs/changelog/99142.yaml deleted file mode 100644 index 885946cec909b..0000000000000 --- a/docs/changelog/99142.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99142 -summary: Reuse number field mapper tests in other modules -area: Search -type: enhancement -issues: - - 92947 diff --git a/docs/changelog/99747.yaml b/docs/changelog/99747.yaml deleted file mode 100644 index e3e6edc585ca6..0000000000000 --- a/docs/changelog/99747.yaml +++ /dev/null @@ -1,19 +0,0 @@ -pr: 99747 -summary: Improve storage efficiency for non-metric fields in TSDB -area: TSDB -type: enhancement -issues: [] -highlight: - title: Improve storage efficiency for non-metric fields in TSDB - body: |- - Adds a new `doc_values` encoding for non-metric fields in TSDB that takes advantage of TSDB's index sorting. - While terms that are used in multiple documents (such as the host name) are already stored only once in the terms dictionary, - there are a lot of repetitions in the references to the terms dictionary that are stored in `doc_values` (ordinals). - In TSDB, documents (and therefore `doc_values`) are implicitly sorted by dimenstions and timestamp. - This means that for each time series, we are storing long consecutive runs of the same ordinal. - With this change, we are introducing an encoding that detects and efficiently stores runs of the same value (such as `1 1 1 2 2 2 …`), - and runs of cycling values (such as `1 2 1 2 …`). - In our testing, we have seen a reduction in storage size by about 13%. - The effectiveness of this encoding depends on how many non-metric fields, such as dimensions, are used. - The more non-metric fields, the more effective this improvement will be. - notable: true diff --git a/docs/changelog/99961.yaml b/docs/changelog/99961.yaml deleted file mode 100644 index 457f7801ce218..0000000000000 --- a/docs/changelog/99961.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99961 -summary: "ESQL: Correct out-of-range filter pushdowns" -area: ES|QL -type: bug -issues: - - 99960 From a49f8b863d6063fde17b6bdea7393f626c0d0d23 Mon Sep 17 00:00:00 2001 From: James Baiera Date: Tue, 26 Mar 2024 16:19:03 -0400 Subject: [PATCH 76/79] Redirect shard-level bulk failures to a failure store if applicable (#105362) This PR expands upon previous work in the failure store by inspecting failed shard-level bulk operations and possibly redirecting them to a failure store. --- .../190_failure_store_redirection.yml | 80 ++ .../action/bulk/BulkOperation.java | 444 +++++++-- .../action/bulk/BulkRequestModifier.java | 4 +- ...ava => FailureStoreDocumentConverter.java} | 8 +- .../common/collect/Iterators.java | 101 +- .../action/bulk/BulkOperationTests.java | 870 ++++++++++++++++++ ...> FailureStoreDocumentConverterTests.java} | 11 +- .../common/collect/IteratorsTests.java | 25 + 8 files changed, 1467 insertions(+), 76 deletions(-) rename server/src/main/java/org/elasticsearch/action/bulk/{FailureStoreDocument.java => FailureStoreDocumentConverter.java} (94%) create mode 100644 server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java rename server/src/test/java/org/elasticsearch/action/bulk/{FailureStoreDocumentTests.java => FailureStoreDocumentConverterTests.java} (90%) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index b9621977ff3aa..f22267357104e 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -108,3 +108,83 @@ teardown: indices.delete: index: .fs-logs-foobar-* - is_true: acknowledged + +--- +"Redirect shard failure in data stream to failure store": + - skip: + version: " - 8.13.99" + reason: "data stream failure stores only redirect shard failures in 8.14+" + features: [allowed_warnings, contains] + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + mappings: + properties: + '@timestamp': + type: date + count: + type: long + + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: {} } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.count + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.count: 'invalid value' } + - match: { hits.hits.0._source.error.type: 'document_parsing_exception' } + - contains: { hits.hits.0._source.error.message: "failed to parse field [count] of type [long] in document with id " } + - contains: { hits.hits.0._source.error.message: "Preview of field's value: 'invalid value'" } + - contains: { hits.hits.0._source.error.stack_trace: "org.elasticsearch.index.mapper.DocumentParsingException: " } + - contains: { hits.hits.0._source.error.stack_trace: "failed to parse field [count] of type [long] in document with id" } + - contains: { hits.hits.0._source.error.stack_trace: "Preview of field's value: 'invalid value'" } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 1d95f430d5c7e..1e9b1446850af 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -16,18 +16,21 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; @@ -39,11 +42,16 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.LongSupplier; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.EXCLUDED_DATA_STREAMS_KEY; @@ -59,14 +67,16 @@ final class BulkOperation extends ActionRunnable { private final Task task; private final ThreadPool threadPool; private final ClusterService clusterService; - private BulkRequest bulkRequest; // set to null once all requests are sent out + private BulkRequest bulkRequest; // set to null once all requests are completed private final ActionListener listener; private final AtomicArray responses; + private final ConcurrentLinkedQueue failureStoreRedirects = new ConcurrentLinkedQueue<>(); private final long startTimeNanos; private final ClusterStateObserver observer; private final Map indicesThatCannotBeCreated; private final String executorName; private final LongSupplier relativeTimeProvider; + private final FailureStoreDocumentConverter failureStoreDocumentConverter; private IndexNameExpressionResolver indexNameExpressionResolver; private NodeClient client; @@ -83,6 +93,40 @@ final class BulkOperation extends ActionRunnable { LongSupplier relativeTimeProvider, long startTimeNanos, ActionListener listener + ) { + this( + task, + threadPool, + executorName, + clusterService, + bulkRequest, + client, + responses, + indicesThatCannotBeCreated, + indexNameExpressionResolver, + relativeTimeProvider, + startTimeNanos, + listener, + new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), + new FailureStoreDocumentConverter() + ); + } + + BulkOperation( + Task task, + ThreadPool threadPool, + String executorName, + ClusterService clusterService, + BulkRequest bulkRequest, + NodeClient client, + AtomicArray responses, + Map indicesThatCannotBeCreated, + IndexNameExpressionResolver indexNameExpressionResolver, + LongSupplier relativeTimeProvider, + long startTimeNanos, + ActionListener listener, + ClusterStateObserver observer, + FailureStoreDocumentConverter failureStoreDocumentConverter ) { super(listener); this.task = task; @@ -97,68 +141,90 @@ final class BulkOperation extends ActionRunnable { this.relativeTimeProvider = relativeTimeProvider; this.indexNameExpressionResolver = indexNameExpressionResolver; this.client = client; - this.observer = new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()); + this.observer = observer; + this.failureStoreDocumentConverter = failureStoreDocumentConverter; } @Override protected void doRun() { assert bulkRequest != null; final ClusterState clusterState = observer.setAndGetObservedState(); - if (handleBlockExceptions(clusterState)) { + if (handleBlockExceptions(clusterState, BulkOperation.this, this::onFailure)) { + return; + } + Map> requestsByShard = groupBulkRequestsByShards(clusterState); + executeBulkRequestsByShard(requestsByShard, clusterState, this::redirectFailuresOrCompleteBulkOperation); + } + + private void doRedirectFailures() { + assert failureStoreRedirects.isEmpty() != true : "Attempting to redirect failures, but none were present in the queue"; + final ClusterState clusterState = observer.setAndGetObservedState(); + // If the cluster is blocked at this point, discard the failure store redirects and complete the response with the original failures + if (handleBlockExceptions(clusterState, ActionRunnable.run(listener, this::doRedirectFailures), this::discardRedirectsAndFinish)) { return; } - Map> requestsByShard = groupRequestsByShards(clusterState); - executeBulkRequestsByShard(requestsByShard, clusterState); + Map> requestsByShard = drainAndGroupRedirectsByShards(clusterState); + executeBulkRequestsByShard(requestsByShard, clusterState, this::completeBulkOperation); } private long buildTookInMillis(long startTimeNanos) { return TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeNanos); } - private Map> groupRequestsByShards(ClusterState clusterState) { + private Map> groupBulkRequestsByShards(ClusterState clusterState) { + return groupRequestsByShards( + clusterState, + Iterators.enumerate(bulkRequest.requests.iterator(), BulkItemRequest::new), + BulkOperation::validateWriteIndex + ); + } + + private Map> drainAndGroupRedirectsByShards(ClusterState clusterState) { + return groupRequestsByShards( + clusterState, + Iterators.fromSupplier(failureStoreRedirects::poll), + (ia, ignore) -> validateRedirectIndex(ia) + ); + } + + private Map> groupRequestsByShards( + ClusterState clusterState, + Iterator it, + BiConsumer> indexOperationValidator + ) { final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); Metadata metadata = clusterState.metadata(); // Group the requests by ShardId -> Operations mapping Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest docWriteRequest = bulkRequest.requests.get(i); + while (it.hasNext()) { + BulkItemRequest bulkItemRequest = it.next(); + DocWriteRequest docWriteRequest = bulkItemRequest.request(); + // the request can only be null because we set it to null in the previous step, so it gets ignored if (docWriteRequest == null) { continue; } - if (addFailureIfRequiresAliasAndAliasIsMissing(docWriteRequest, i, metadata)) { + if (addFailureIfRequiresAliasAndAliasIsMissing(docWriteRequest, bulkItemRequest.id(), metadata)) { continue; } - if (addFailureIfIndexCannotBeCreated(docWriteRequest, i)) { + if (addFailureIfIndexCannotBeCreated(docWriteRequest, bulkItemRequest.id())) { continue; } - if (addFailureIfRequiresDataStreamAndNoParentDataStream(docWriteRequest, i, metadata)) { + if (addFailureIfRequiresDataStreamAndNoParentDataStream(docWriteRequest, bulkItemRequest.id(), metadata)) { continue; } IndexAbstraction ia = null; - boolean includeDataStreams = docWriteRequest.opType() == DocWriteRequest.OpType.CREATE; try { ia = concreteIndices.resolveIfAbsent(docWriteRequest); - if (ia.isDataStreamRelated() && includeDataStreams == false) { - throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); - } - // The ConcreteIndices#resolveIfAbsent(...) method validates via IndexNameExpressionResolver whether - // an operation is allowed in index into a data stream, but this isn't done when resolve call is cached, so - // the validation needs to be performed here too. - if (ia.getParentDataStream() != null && - // avoid valid cases when directly indexing into a backing index - // (for example when directly indexing into .ds-logs-foobar-000001) - ia.getName().equals(docWriteRequest.index()) == false && docWriteRequest.opType() != DocWriteRequest.OpType.CREATE) { - throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); - } + indexOperationValidator.accept(ia, docWriteRequest); TransportBulkAction.prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); TransportBulkAction.prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); final Index concreteIndex = docWriteRequest.getConcreteWriteIndex(ia, metadata); - if (addFailureIfIndexIsClosed(docWriteRequest, concreteIndex, i, metadata)) { + if (addFailureIfIndexIsClosed(docWriteRequest, concreteIndex, bulkItemRequest.id(), metadata)) { continue; } IndexRouting indexRouting = concreteIndices.routing(concreteIndex); @@ -168,37 +234,56 @@ private Map> groupRequestsByShards(ClusterState c new ShardId(concreteIndex, shardId), shard -> new ArrayList<>() ); - shardRequests.add(new BulkItemRequest(i, docWriteRequest)); + shardRequests.add(bulkItemRequest); } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException | ResourceNotFoundException e) { String name = ia != null ? ia.getName() : docWriteRequest.index(); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(name, docWriteRequest.id(), e); - BulkItemResponse bulkItemResponse = BulkItemResponse.failure(i, docWriteRequest.opType(), failure); - responses.set(i, bulkItemResponse); - // make sure the request gets never processed again - bulkRequest.requests.set(i, null); + addFailureAndDiscardRequest(docWriteRequest, bulkItemRequest.id(), name, e); } } return requestsByShard; } - private void executeBulkRequestsByShard(Map> requestsByShard, ClusterState clusterState) { + /** + * Validates that an index abstraction is capable of receiving the provided write request + */ + private static void validateWriteIndex(IndexAbstraction ia, DocWriteRequest docWriteRequest) { + boolean includeDataStreams = docWriteRequest.opType() == DocWriteRequest.OpType.CREATE; + if (ia.isDataStreamRelated() && includeDataStreams == false) { + throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); + } + // The ConcreteIndices#resolveIfAbsent(...) method validates via IndexNameExpressionResolver whether + // an operation is allowed in index into a data stream, but this isn't done when resolve call is cached, so + // the validation needs to be performed here too. + if (ia.getParentDataStream() != null && + // avoid valid cases when directly indexing into a backing index + // (for example when directly indexing into .ds-logs-foobar-000001) + ia.getName().equals(docWriteRequest.index()) == false && docWriteRequest.opType() != DocWriteRequest.OpType.CREATE) { + throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); + } + } + + /** + * Validates that an index abstraction is capable of receiving a failure store redirect + */ + private static void validateRedirectIndex(IndexAbstraction ia) { + if (ia.isDataStreamRelated() == false) { + // We should only be dealing with traffic targeting concrete data streams. + throw new IllegalArgumentException("only write ops to data streams with enabled failure stores can be redirected on failure."); + } + } + + private void executeBulkRequestsByShard( + Map> requestsByShard, + ClusterState clusterState, + Runnable onRequestsCompleted + ) { if (requestsByShard.isEmpty()) { - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); + onRequestsCompleted.run(); return; } String nodeId = clusterService.localNode().getId(); - Runnable onBulkItemsComplete = () -> { - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); - // Allow memory for bulk shard request items to be reclaimed before all items have been completed - bulkRequest = null; - }; - - try (RefCountingRunnable bulkItemRequestCompleteRefCount = new RefCountingRunnable(onBulkItemsComplete)) { + try (RefCountingRunnable bulkItemRequestCompleteRefCount = new RefCountingRunnable(onRequestsCompleted)) { for (Map.Entry> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); @@ -219,18 +304,75 @@ private void executeBulkRequestsByShard(Map> requ } } + private void redirectFailuresOrCompleteBulkOperation() { + if (DataStream.isFailureStoreEnabled() && failureStoreRedirects.isEmpty() == false) { + doRedirectFailures(); + } else { + completeBulkOperation(); + } + } + + private void completeBulkOperation() { + listener.onResponse( + new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) + ); + // Allow memory for bulk shard request items to be reclaimed before all items have been completed + bulkRequest = null; + } + + /** + * Discards all failure store redirections and completes the bulk request. + * @param exception any documents that could have been redirected will have this exception added as a suppressed exception + * on their original failure information. + */ + private void discardRedirectsAndFinish(Exception exception) { + assert failureStoreRedirects.isEmpty() != true : "Attempting to discard redirects, but there were none to discard"; + Iterator redirectedBulkItemIterator = Iterators.fromSupplier(failureStoreRedirects::poll); + while (redirectedBulkItemIterator.hasNext()) { + BulkItemRequest cancelledRedirectBulkItem = redirectedBulkItemIterator.next(); + int slot = cancelledRedirectBulkItem.id(); + BulkItemResponse originalFailure = responses.get(slot); + if (originalFailure.isFailed()) { + originalFailure.getFailure().getCause().addSuppressed(exception); + } + } + completeBulkOperation(); + } + private void executeBulkShardRequest(BulkShardRequest bulkShardRequest, Releasable releaseOnFinish) { client.executeLocally(TransportShardBulkAction.TYPE, bulkShardRequest, new ActionListener<>() { + + // Lazily get the cluster state to avoid keeping it around longer than it is needed + private ClusterState clusterState = null; + + private ClusterState getClusterState() { + if (clusterState == null) { + clusterState = clusterService.state(); + } + return clusterState; + } + @Override public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { + for (int idx = 0; idx < bulkShardResponse.getResponses().length; idx++) { + // We zip the requests and responses together so that we can identify failed documents and potentially store them + BulkItemResponse bulkItemResponse = bulkShardResponse.getResponses()[idx]; + + if (bulkItemResponse.isFailed()) { + BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; + assert bulkItemRequest.id() == bulkItemResponse.getItemId() : "Bulk items were returned out of order"; + + String failureStoreReference = getRedirectTarget(bulkItemRequest.request(), getClusterState().metadata()); + if (failureStoreReference != null) { + addDocumentToRedirectRequests(bulkItemRequest, bulkItemResponse.getFailure().getCause(), failureStoreReference); + } + addFailure(bulkItemResponse); + } else { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } - releaseOnFinish.close(); + completeShardOperation(); } @Override @@ -239,33 +381,135 @@ public void onFailure(Exception e) { for (BulkItemRequest request : bulkShardRequest.items()) { final String indexName = request.index(); DocWriteRequest docWriteRequest = request.request(); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e); - responses.set(request.id(), BulkItemResponse.failure(request.id(), docWriteRequest.opType(), failure)); + + String failureStoreReference = getRedirectTarget(docWriteRequest, getClusterState().metadata()); + if (failureStoreReference != null) { + addDocumentToRedirectRequests(request, e, failureStoreReference); + } + addFailure(docWriteRequest, request.id(), indexName, e); } + completeShardOperation(); + } + + private void completeShardOperation() { + // Clear our handle on the cluster state to allow it to be cleaned up + clusterState = null; releaseOnFinish.close(); } }); } - private boolean handleBlockExceptions(ClusterState state) { + /** + * Determines if the write request can be redirected if it fails. Write requests can be redirected IFF they are targeting a data stream + * with a failure store and are not already redirected themselves. If the document can be redirected, the data stream name to use for + * the redirection is returned. + * + * @param docWriteRequest the write request to check + * @param metadata cluster state metadata for resolving index abstractions + * @return a data stream name if the write request points to a data stream that has the failure store enabled, + * or {@code null} if it does + */ + private static String getRedirectTarget(DocWriteRequest docWriteRequest, Metadata metadata) { + // Feature flag guard + if (DataStream.isFailureStoreEnabled() == false) { + return null; + } + // Do not resolve a failure store for documents that were already headed to one + if (docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore()) { + return null; + } + // If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support + IndexAbstraction ia = metadata.getIndicesLookup().get(docWriteRequest.index()); + if (ia == null) { + return null; + } + if (ia.isDataStreamRelated()) { + // The index abstraction could be an alias. Alias abstractions (even for data streams) only keep track of which _index_ they + // will write to, not which _data stream_. + // We work backward to find the data stream from the concrete write index to cover this case. + Index concreteIndex = ia.getWriteIndex(); + IndexAbstraction writeIndexAbstraction = metadata.getIndicesLookup().get(concreteIndex.getName()); + DataStream parentDataStream = writeIndexAbstraction.getParentDataStream(); + if (parentDataStream != null && parentDataStream.isFailureStore()) { + // Keep the data stream name around to resolve the redirect to failure store if the shard level request fails. + return parentDataStream.getName(); + } + } + return null; + } + + /** + * Marks a failed bulk item for redirection. At the end of the first round of shard requests, any documents in the + * redirect list are processed to their final destinations. + * + * @param request The bulk item request that failed + * @param cause The exception for the experienced the failure + * @param failureStoreReference The data stream that contains the failure store for this item + */ + private void addDocumentToRedirectRequests(BulkItemRequest request, Exception cause, String failureStoreReference) { + // Convert the document into a failure document + IndexRequest failureStoreRequest; + try { + failureStoreRequest = failureStoreDocumentConverter.transformFailedRequest( + TransportBulkAction.getIndexWriteRequest(request.request()), + cause, + failureStoreReference, + threadPool::absoluteTimeInMillis + ); + } catch (IOException ioException) { + logger.debug( + () -> "Could not transform failed bulk request item into failure store document. Attempted for [" + + request.request().opType() + + ": index=" + + request.index() + + "; id=" + + request.request().id() + + "; bulk_slot=" + + request.id() + + "] Proceeding with failing the original.", + ioException + ); + // Suppress and do not redirect + cause.addSuppressed(ioException); + return; + } + + // Store for second phase + BulkItemRequest redirected = new BulkItemRequest(request.id(), failureStoreRequest); + failureStoreRedirects.add(redirected); + } + + /** + * Examine the cluster state for blocks before continuing. If any block exists in the cluster state, this function will return + * {@code true}. If the block is retryable, the {@code retryOperation} runnable will be called asynchronously if the cluster ever + * becomes unblocked. If a non retryable block exists, or if we encounter a timeout before the blocks could be cleared, the + * {@code onClusterBlocked} consumer will be invoked with the cluster block exception. + * + * @param state The current state to check for blocks + * @param retryOperation If retryable blocks exist, the runnable to execute after they have cleared. + * @param onClusterBlocked Consumes the block exception if the cluster has a non retryable block or if we encounter a timeout while + * waiting for a block to clear. + * @return {@code true} if the cluster is currently blocked at all, {@code false} if the cluster has no blocks. + */ + private boolean handleBlockExceptions(ClusterState state, Runnable retryOperation, Consumer onClusterBlocked) { ClusterBlockException blockException = state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); if (blockException != null) { if (blockException.retryable()) { logger.trace("cluster is blocked, scheduling a retry", blockException); - retry(blockException); + retry(blockException, retryOperation, onClusterBlocked); } else { - onFailure(blockException); + onClusterBlocked.accept(blockException); } return true; } return false; } - void retry(Exception failure) { + void retry(Exception failure, final Runnable operation, final Consumer onClusterBlocked) { assert failure != null; if (observer.isTimedOut()) { - // we running as a last attempt after a timeout has happened. don't retry - onFailure(failure); + // we are running as a last attempt after a timeout has happened. don't retry + onClusterBlocked.accept(failure); return; } observer.waitForNextChange(new ClusterStateObserver.Listener() { @@ -282,6 +526,8 @@ public void onNewClusterState(ClusterState state) { @Override public void onClusterServiceClose() { + // There is very little we can do about this, and our time in this JVM is likely short. + // Let's just try to get out of here ASAP. onFailure(new NodeClosedException(clusterService.localNode())); } @@ -297,7 +543,7 @@ public void onTimeout(TimeValue timeout) { } private void dispatchRetry() { - threadPool.executor(executorName).submit(BulkOperation.this); + threadPool.executor(executorName).submit(operation); } }); } @@ -308,7 +554,7 @@ private boolean addFailureIfRequiresAliasAndAliasIsMissing(DocWriteRequest re "[" + DocWriteRequest.REQUIRE_ALIAS + "] request flag is [true] and [" + request.index() + "] is not an alias", request.index() ); - addFailure(request, idx, exception); + addFailureAndDiscardRequest(request, idx, request.index(), exception); return true; } return false; @@ -320,7 +566,7 @@ private boolean addFailureIfRequiresDataStreamAndNoParentDataStream(DocWriteRequ "[" + DocWriteRequest.REQUIRE_DATA_STREAM + "] request flag is [true] and [" + request.index() + "] is not a data stream", request.index() ); - addFailure(request, idx, exception); + addFailureAndDiscardRequest(request, idx, request.index(), exception); return true; } return false; @@ -329,7 +575,7 @@ private boolean addFailureIfRequiresDataStreamAndNoParentDataStream(DocWriteRequ private boolean addFailureIfIndexIsClosed(DocWriteRequest request, Index concreteIndex, int idx, final Metadata metadata) { IndexMetadata indexMetadata = metadata.getIndexSafe(concreteIndex); if (indexMetadata.getState() == IndexMetadata.State.CLOSE) { - addFailure(request, idx, new IndexClosedException(concreteIndex)); + addFailureAndDiscardRequest(request, idx, request.index(), new IndexClosedException(concreteIndex)); return true; } return false; @@ -338,20 +584,73 @@ private boolean addFailureIfIndexIsClosed(DocWriteRequest request, Index conc private boolean addFailureIfIndexCannotBeCreated(DocWriteRequest request, int idx) { IndexNotFoundException cannotCreate = indicesThatCannotBeCreated.get(request.index()); if (cannotCreate != null) { - addFailure(request, idx, cannotCreate); + addFailureAndDiscardRequest(request, idx, request.index(), cannotCreate); return true; } return false; } - private void addFailure(DocWriteRequest request, int idx, Exception unavailableException) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.id(), unavailableException); - BulkItemResponse bulkItemResponse = BulkItemResponse.failure(idx, request.opType(), failure); - responses.set(idx, bulkItemResponse); + /** + * Like {@link BulkOperation#addFailure(DocWriteRequest, int, String, Exception)} but this method will remove the corresponding entry + * from the working bulk request so that it never gets processed again during this operation. + */ + private void addFailureAndDiscardRequest(DocWriteRequest request, int idx, String index, Exception exception) { + addFailure(request, idx, index, exception); // make sure the request gets never processed again bulkRequest.requests.set(idx, null); } + /** + * Checks if a bulk item response exists for this entry. If none exists, a failure response is created and set in the response array. + * If a response exists already, the failure information provided to this call will be added to the existing failure as a suppressed + * exception. + * + * @param request The document write request that should be failed + * @param idx The slot of the bulk entry this request corresponds to + * @param index The resource that this entry was being written to when it failed + * @param exception The exception encountered for this entry + * @see BulkOperation#addFailure(BulkItemResponse) BulkOperation.addFailure if you have a bulk item response object already + */ + private void addFailure(DocWriteRequest request, int idx, String index, Exception exception) { + BulkItemResponse bulkItemResponse = responses.get(idx); + if (bulkItemResponse == null) { + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.id(), exception); + bulkItemResponse = BulkItemResponse.failure(idx, request.opType(), failure); + } else { + // Response already recorded. We should only be here if the existing response is a failure and + // we are encountering a new failure while redirecting. + assert bulkItemResponse.isFailed() : "Attempting to overwrite successful bulk item result with a failure"; + bulkItemResponse.getFailure().getCause().addSuppressed(exception); + } + // Always replace the item in the responses for thread visibility of any mutations + responses.set(idx, bulkItemResponse); + } + + /** + * Checks if a bulk item response exists for this entry. If none exists, the failure is set in the response array. If a response exists + * already, the failure information provided to this call will be added to the existing failure as a suppressed exception. + * + * @param bulkItemResponse the item response to add to the overall result array + * @see BulkOperation#addFailure(DocWriteRequest, int, String, Exception) BulkOperation.addFailure which conditionally creates the + * failure response only when one does not exist already + */ + private void addFailure(BulkItemResponse bulkItemResponse) { + assert bulkItemResponse.isFailed() : "Attempting to add a successful bulk item response via the addFailure method"; + BulkItemResponse existingBulkItemResponse = responses.get(bulkItemResponse.getItemId()); + if (existingBulkItemResponse != null) { + // Response already recorded. We should only be here if the existing response is a failure and + // we are encountering a new failure while redirecting. + assert existingBulkItemResponse.isFailed() : "Attempting to overwrite successful bulk item result with a failure"; + existingBulkItemResponse.getFailure().getCause().addSuppressed(bulkItemResponse.getFailure().getCause()); + bulkItemResponse = existingBulkItemResponse; + } + // Always replace the item in the responses for thread visibility of any mutations + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } + + /** + * Resolves and caches index and routing abstractions to more efficiently group write requests into shards. + */ private static class ConcreteIndices { private final ClusterState state; private final IndexNameExpressionResolver indexNameExpressionResolver; @@ -363,6 +662,13 @@ private static class ConcreteIndices { this.indexNameExpressionResolver = indexNameExpressionResolver; } + /** + * Resolves the index abstraction that the write request is targeting, potentially obtaining it from a cache. This instance isn't + * fully resolved, meaning that {@link IndexAbstraction#getWriteIndex()} should be invoked in order to get concrete write index. + * + * @param request a write request + * @return the index abstraction that the write request is targeting + */ IndexAbstraction resolveIfAbsent(DocWriteRequest request) { try { IndexAbstraction indexAbstraction = indexAbstractions.get(request.index()); @@ -380,6 +686,12 @@ IndexAbstraction resolveIfAbsent(DocWriteRequest request) { } } + /** + * Determines which routing strategy to use for a document being written to the provided index, potentially obtaining the result + * from a cache. + * @param index the index to determine routing strategy for + * @return an {@link IndexRouting} object to use for assigning a write request to a shard + */ IndexRouting routing(Index index) { IndexRouting routing = routings.get(index); if (routing == null) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java index 5e630bf9cdef5..2112ad48bec62 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java @@ -53,6 +53,7 @@ final class BulkRequestModifier implements Iterator> { final SparseFixedBitSet failedSlots; final List itemResponses; final AtomicIntegerArray originalSlots; + final FailureStoreDocumentConverter failureStoreDocumentConverter; volatile int currentSlot = -1; @@ -61,6 +62,7 @@ final class BulkRequestModifier implements Iterator> { this.failedSlots = new SparseFixedBitSet(bulkRequest.requests().size()); this.itemResponses = new ArrayList<>(bulkRequest.requests().size()); this.originalSlots = new AtomicIntegerArray(bulkRequest.requests().size()); // oversize, but that's ok + this.failureStoreDocumentConverter = new FailureStoreDocumentConverter(); } @Override @@ -243,7 +245,7 @@ public void markItemForFailureStore(int slot, String targetIndexName, Exception ); } else { try { - IndexRequest errorDocument = FailureStoreDocument.transformFailedRequest(indexRequest, e, targetIndexName); + IndexRequest errorDocument = failureStoreDocumentConverter.transformFailedRequest(indexRequest, e, targetIndexName); // This is a fresh index request! We need to do some preprocessing on it. If we do not, when this is returned to // the bulk action, the action will see that it hasn't been processed by ingest yet and attempt to ingest it again. errorDocument.isPipelineResolved(true); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocument.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java similarity index 94% rename from server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocument.java rename to server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java index e0d6e8200e86d..ce76f377ac94e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocument.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java @@ -22,9 +22,7 @@ /** * Transforms an indexing request using error information into a new index request to be stored in a data stream's failure store. */ -public final class FailureStoreDocument { - - private FailureStoreDocument() {} +public class FailureStoreDocumentConverter { /** * Combines an {@link IndexRequest} that has failed during the bulk process with the error thrown for that request. The result is a @@ -35,7 +33,7 @@ private FailureStoreDocument() {} * @return A new {@link IndexRequest} with a failure store compliant structure * @throws IOException If there is a problem when the document's new source is serialized */ - public static IndexRequest transformFailedRequest(IndexRequest source, Exception exception, String targetIndexName) throws IOException { + public IndexRequest transformFailedRequest(IndexRequest source, Exception exception, String targetIndexName) throws IOException { return transformFailedRequest(source, exception, targetIndexName, System::currentTimeMillis); } @@ -49,7 +47,7 @@ public static IndexRequest transformFailedRequest(IndexRequest source, Exception * @return A new {@link IndexRequest} with a failure store compliant structure * @throws IOException If there is a problem when the document's new source is serialized */ - public static IndexRequest transformFailedRequest( + public IndexRequest transformFailedRequest( IndexRequest source, Exception exception, String targetIndexName, diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index ea8eadd66acaa..165280e370025 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -15,11 +15,13 @@ import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.function.BiFunction; import java.util.function.BiPredicate; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; +import java.util.function.Supplier; import java.util.function.ToIntFunction; public class Iterators { @@ -56,7 +58,7 @@ public static Iterator concat(Iterator... iterators) { for (int i = 0; i < iterators.length; i++) { if (iterators[i].hasNext()) { // explicit generic type argument needed for type inference - return new ConcatenatedIterator(iterators, i); + return new ConcatenatedIterator<>(iterators, i); } } @@ -258,6 +260,103 @@ public T next() { } } + /** + * Enumerates the elements of an iterator together with their index, using a function to combine the pair together into the final items + * produced by the iterator. + *

    + * An example of its usage to enumerate a list of names together with their positional index in the list: + *

    + *
    
    +     * Iterator<String> nameIterator = ...;
    +     * Iterator<Tuple<Integer, String>> enumeratedNames = Iterators.enumerate(nameIterator, Tuple::new);
    +     * enumeratedNames.forEachRemaining(tuple -> System.out.println("Index: " + t.v1() + ", Name: " + t.v2()));
    +     * 
    + * + * @param input The iterator to wrap + * @param fn A function that takes the index for an entry and the entry itself, returning an item that combines them together + * @return An iterator that combines elements together with their indices in the underlying collection + * @param The object type contained in the original iterator + * @param The object type that results from combining the original entry with its index in the iterator + */ + public static Iterator enumerate(Iterator input, BiFunction fn) { + return new EnumeratingIterator<>(Objects.requireNonNull(input), Objects.requireNonNull(fn)); + } + + private static class EnumeratingIterator implements Iterator { + private final Iterator input; + private final BiFunction fn; + + private int idx = 0; + + EnumeratingIterator(Iterator input, BiFunction fn) { + this.input = input; + this.fn = fn; + } + + @Override + public boolean hasNext() { + return input.hasNext(); + } + + @Override + public U next() { + return fn.apply(idx++, input.next()); + } + + @Override + public void forEachRemaining(Consumer action) { + input.forEachRemaining(t -> action.accept(fn.apply(idx++, t))); + } + } + + /** + * Adapts a {@link Supplier} object into an iterator. The resulting iterator will return values from the delegate Supplier until the + * delegate returns a null value. Once the delegate returns null, the iterator will claim to be empty. + *

    + * An example of its usage to iterate over a queue while draining it at the same time: + *

    + *
    
    +     *     LinkedList<String> names = ...;
    +     *     assert names.size() != 0;
    +     *
    +     *     Iterator<String> nameIterator = Iterator.fromSupplier(names::pollFirst);
    +     *     nameIterator.forEachRemaining(System.out::println)
    +     *     assert names.size() == 0;
    +     * 
    + * + * @param input A {@link Supplier} that returns null when no more elements should be returned from the iterator + * @return An iterator that returns elements by calling the supplier until a null value is returned + * @param The object type returned from the supplier function + */ + public static Iterator fromSupplier(Supplier input) { + return new SupplierIterator<>(Objects.requireNonNull(input)); + } + + private static final class SupplierIterator implements Iterator { + private final Supplier fn; + private T head; + + SupplierIterator(Supplier fn) { + this.fn = fn; + this.head = fn.get(); + } + + @Override + public boolean hasNext() { + return head != null; + } + + @Override + public T next() { + if (head == null) { + throw new NoSuchElementException(); + } + T next = head; + head = fn.get(); + return next; + } + } + public static boolean equals(Iterator iterator1, Iterator iterator2, BiPredicate itemComparer) { if (iterator1 == null) { return iterator2 == null; diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java new file mode 100644 index 0000000000000..2226c40b618f4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -0,0 +1,870 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.coordination.NoMasterBlockService; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.MapperException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpNodeClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Assume; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BulkOperationTests extends ESTestCase { + + private final long millis = randomMillisUpToYear9999(); + private final String indexName = "my_index"; + private final String dataStreamName = "my_data_stream"; + private final String fsDataStreamName = "my_failure_store_data_stream"; + + private final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .build() + ) + .build(); + private final IndexMetadata ds1BackingIndex1 = DataStreamTestHelper.createBackingIndex(dataStreamName, 1, millis) + .numberOfShards(2) + .build(); + private final IndexMetadata ds1BackingIndex2 = DataStreamTestHelper.createBackingIndex(dataStreamName, 2, millis + 1) + .numberOfShards(2) + .build(); + private final IndexMetadata ds2BackingIndex1 = DataStreamTestHelper.createBackingIndex(fsDataStreamName, 1, millis) + .numberOfShards(2) + .build(); + private final IndexMetadata ds2FailureStore1 = DataStreamTestHelper.createFailureStore(fsDataStreamName, 1, millis) + .numberOfShards(1) + .build(); + + private final DataStream dataStream1 = DataStreamTestHelper.newInstance( + dataStreamName, + List.of(ds1BackingIndex1.getIndex(), ds1BackingIndex2.getIndex()) + ); + private final DataStream dataStream2 = DataStreamTestHelper.newInstance( + fsDataStreamName, + List.of(ds2BackingIndex1.getIndex()), + List.of(ds2FailureStore1.getIndex()) + ); + + private final ClusterState DEFAULT_STATE = ClusterState.builder(ClusterName.DEFAULT) + .metadata( + Metadata.builder() + .indexTemplates( + Map.of( + "ds-template", + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, false)) + .template(new Template(null, null, null, null)) + .build(), + "ds-template-with-failure-store", + ComposableIndexTemplate.builder() + .indexPatterns(List.of(fsDataStreamName)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .template(new Template(null, null, null, null)) + .build() + ) + ) + .indices( + Map.of( + indexName, + indexMetadata, + ds1BackingIndex1.getIndex().getName(), + ds1BackingIndex1, + ds1BackingIndex2.getIndex().getName(), + ds1BackingIndex2, + ds2BackingIndex1.getIndex().getName(), + ds2BackingIndex1, + ds2FailureStore1.getIndex().getName(), + ds2FailureStore1 + ) + ) + .dataStreams(Map.of(dataStreamName, dataStream1, fsDataStreamName, dataStream2), Map.of()) + .build() + ) + .build(); + + private TestThreadPool threadPool; + + @Before + public void setupThreadpool() { + threadPool = new TestThreadPool(getClass().getName()); + } + + @After + public void tearDownThreadpool() { + terminate(threadPool); + } + + /** + * If a bulk operation begins and the cluster is experiencing a non-retryable block, the bulk operation should fail + */ + public void testClusterBlockedFailsBulk() { + NodeClient client = getNodeClient((r) -> { + fail("Should not have executed shard action on blocked cluster"); + return null; + }); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Not retryable + ClusterState state = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK).build()) + .build(); + + // Make sure we don't wait at all + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(state); + when(observer.isTimedOut()).thenReturn(false); + doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); + + newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + + expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); + } + + /** + * If a bulk operation times out while waiting for cluster blocks to be cleared, it should fail the request. + */ + public void testTimeoutOnRetryableClusterBlockedFailsBulk() { + NodeClient client = getNodeClient((r) -> { + fail("Should not have executed shard action on blocked cluster"); + return null; + }); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Retryable + final ClusterState state = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // Always return cluster state, first observation: return same cluster state, second observation: time out, ensure no further wait + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(state); + when(observer.isTimedOut()).thenReturn(false, true); + doAnswer((i) -> { + // Returning same state or timing out will result in one more attempt. + if (randomBoolean()) { + i.getArgument(0, ClusterStateObserver.Listener.class).onNewClusterState(state); + } else { + i.getArgument(0, ClusterStateObserver.Listener.class).onTimeout(null); + } + return null; + }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); + + newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + + expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); + verify(observer, times(2)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * If the cluster service closes while a bulk operation is waiting for cluster blocks to be cleared, it should fail the request. + */ + public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { + NodeClient client = getNodeClient((r) -> { + fail("Should not have executed shard action on blocked cluster"); + return null; + }); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Retryable + final ClusterState state = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // Always return cluster state, first observation: signal cluster service closed, ensure no further wait + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(state); + when(observer.isTimedOut()).thenReturn(false); + doAnswer((i) -> { + i.getArgument(0, ClusterStateObserver.Listener.class).onClusterServiceClose(); + return null; + }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); + + newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + + expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + verify(observer, times(1)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * A bulk operation to an index should succeed if all of its shard level requests succeed + */ + public void testBulkToIndex() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).id("1").source(Map.of("key", "val"))); + bulkRequest.add(new IndexRequest(indexName).id("3").source(Map.of("key", "val"))); + + NodeClient client = getNodeClient(this::acceptAllShardWrites); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + } + + /** + * A bulk operation to an index should partially succeed if only some of its shard level requests fail + */ + public void testBulkToIndexFailingEntireShard() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).id("1").source(Map.of("key", "val"))); + bulkRequest.add(new IndexRequest(indexName).id("3").source(Map.of("key", "val"))); + + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(indexMetadata.getIndex(), 0), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find failed item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("test"))); + } + + /** + * A bulk operation to a data stream should succeed if all of its shard level requests succeed + */ + public void testBulkToDataStream() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(dataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(dataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient(this::acceptAllShardWrites); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + } + + /** + * A bulk operation to a data stream should partially succeed if only some of its shard level requests fail + */ + public void testBulkToDataStreamFailingEntireShard() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(dataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(dataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds1BackingIndex2.getIndex(), 0), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find failed item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("test"))); + } + + /** + * A bulk operation to a data stream with a failure store enabled should redirect any shard level failures to the failure store. + */ + public void testFailingEntireShardRedirectsToFailureStore() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem, is(notNullValue())); + } + + /** + * A bulk operation to a data stream with a failure store enabled should redirect any documents that fail at a shard level to the + * failure store. + */ + public void testFailingDocumentRedirectsToFailureStore() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getIndex(), is(notNullValue())); + } + + /** + * A bulk operation to a data stream with a failure store enabled may still partially fail if the redirected documents experience + * a shard-level failure while writing to the failure store indices. + */ + public void testFailureStoreShardFailureRejectsDocument() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards( + Map.of( + new ShardId(ds2BackingIndex1.getIndex(), 0), + () -> new MapperException("root cause"), + new ShardId(ds2FailureStore1.getIndex(), 0), + () -> new MapperException("failure store test failure") + ) + ) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), is(equalTo("failure store test failure"))); + } + + /** + * A document that fails at the shard level will be converted into a failure document if an applicable failure store is present. + * In the unlikely case that the failure document cannot be created, the document will not be redirected to the failure store and + * instead will simply report its original failure in the response, with the conversion failure present as a suppressed exception. + */ + public void testFailedDocumentCanNotBeConvertedFails() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("root cause"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Mock a failure store document converter that always fails + FailureStoreDocumentConverter mockConverter = mock(FailureStoreDocumentConverter.class); + when(mockConverter.transformFailedRequest(any(), any(), any(), any())).thenThrow(new IOException("Could not serialize json")); + + newBulkOperation(client, bulkRequest, mockConverter, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(IOException.class))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), is(equalTo("Could not serialize json"))); + } + + /** + * A bulk operation to a data stream with a failure store enabled may still partially fail if the cluster is experiencing a + * non-retryable block when the redirected documents would be sent to the shard-level action. + */ + public void testBlockedClusterRejectsFailureStoreDocument() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("root cause"))) + ); + + // Create a new cluster state that has a non-retryable cluster block on it + ClusterState blockedState = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(IndexMetadata.INDEX_READ_ONLY_BLOCK).build()) + .build(); + + // First time we will return the normal cluster state (before normal writes) which skips any further interactions, + // Second time we will return a blocked cluster state (before the redirects) causing us to start observing the cluster + // Finally, we will simulate the observer timing out causing the redirects to fail. + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(DEFAULT_STATE).thenReturn(blockedState); + when(observer.isTimedOut()).thenReturn(false); + doThrow(new AssertionError("Should not wait on non retryable block")).when(observer).waitForNextChange(any()); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(ClusterBlockException.class))); + assertThat( + failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), + is(equalTo("blocked by: [FORBIDDEN/5/index read-only (api)];")) + ); + + verify(observer, times(0)).isTimedOut(); + verify(observer, times(0)).waitForNextChange(any()); + } + + /** + * A bulk operation to a data stream with a failure store enabled may still partially fail if the cluster times out while waiting for a + * retryable block to clear when the redirected documents would be sent to the shard-level action. + */ + public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("root cause"))) + ); + + // Create a new cluster state that has a retryable cluster block on it + ClusterState blockedState = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // First time we will return the normal cluster state (before normal writes) which skips any further interactions, + // Second time we will return a blocked cluster state (before the redirects) causing us to start observing the cluster + // Finally, we will simulate the observer timing out causing the redirects to fail. + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(DEFAULT_STATE).thenReturn(blockedState); + when(observer.isTimedOut()).thenReturn(false, true); + doAnswer((i) -> { + // Returning same state or timing out will result in one more attempt. + if (randomBoolean()) { + i.getArgument(0, ClusterStateObserver.Listener.class).onNewClusterState(blockedState); + } else { + i.getArgument(0, ClusterStateObserver.Listener.class).onTimeout(null); + } + return null; + }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(ClusterBlockException.class))); + assertThat( + failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), + is(equalTo("blocked by: [SERVICE_UNAVAILABLE/2/no master];")) + ); + + verify(observer, times(2)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * A bulk operation to a data stream with a failure store enabled may completely fail if the cluster service closes out while waiting + * for a retryable block to clear when the redirected documents would be sent to the shard-level action. + */ + public void testNodeClosureRejectsFailureStoreDocument() { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("root cause"))) + ); + + // Create a new cluster state that has a retryable cluster block on it + ClusterState blockedState = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // First time we will return the normal cluster state (before normal writes) which skips any further interactions, + // Second time we will return a blocked cluster state (before the redirects) causing us to start observing the cluster + // Finally, we will simulate the node closing causing the redirects to fail. + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(DEFAULT_STATE).thenReturn(blockedState); + when(observer.isTimedOut()).thenReturn(false, true); + doAnswer((i) -> { + i.getArgument(0, ClusterStateObserver.Listener.class).onClusterServiceClose(); + return null; + }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); + + expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + + verify(observer, times(1)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * Accepts all write operations from the given request object when it is encountered in the mock shard bulk action + */ + private BulkShardResponse acceptAllShardWrites(BulkShardRequest request) { + return new BulkShardResponse( + request.shardId(), + Arrays.stream(request.items()).map(item -> requestToResponse(request.shardId(), item)).toArray(BulkItemResponse[]::new) + ); + } + + /** + * Maps an entire shard id to an exception to throw when it is encountered in the mock shard bulk action + */ + private CheckedFunction failingShards(Map> shardsToFail) { + return (BulkShardRequest request) -> { + if (shardsToFail.containsKey(request.shardId())) { + throw shardsToFail.get(request.shardId()).get(); + } else { + return acceptAllShardWrites(request); + } + }; + } + + /** + * Index name / id tuple + */ + private record IndexAndId(String indexName, String id) {} + + /** + * Maps a document to an exception to thrown when it is encountered in the mock shard bulk action + */ + private CheckedFunction thatFailsDocuments( + Map> documentsToFail + ) { + return (BulkShardRequest request) -> new BulkShardResponse(request.shardId(), Arrays.stream(request.items()).map(item -> { + IndexAndId key = new IndexAndId(request.index(), item.request().id()); + if (documentsToFail.containsKey(key)) { + return requestToFailedResponse(item, documentsToFail.get(key).get()); + } else { + return requestToResponse(request.shardId(), item); + } + }).toArray(BulkItemResponse[]::new)); + } + + /** + * Create a shard-level result given a bulk item + */ + private static BulkItemResponse requestToResponse(ShardId shardId, BulkItemRequest itemRequest) { + return BulkItemResponse.success(itemRequest.id(), itemRequest.request().opType(), switch (itemRequest.request().opType()) { + case INDEX, CREATE -> new IndexResponse(shardId, itemRequest.request().id(), 1, 1, 1, true); + case UPDATE -> new UpdateResponse(shardId, itemRequest.request().id(), 1, 1, 1, DocWriteResponse.Result.UPDATED); + case DELETE -> new DeleteResponse(shardId, itemRequest.request().id(), 1, 1, 1, true); + }); + } + + /** + * Create a shard-level failure given a bulk item + */ + private static BulkItemResponse requestToFailedResponse(BulkItemRequest itemRequest, Exception reason) { + return BulkItemResponse.failure( + itemRequest.id(), + itemRequest.request().opType(), + new BulkItemResponse.Failure(itemRequest.index(), itemRequest.request().id(), reason) + ); + } + + /** + * Create a client that redirects expected actions to the provided function and fails if an unexpected operation happens. + * @param onShardAction Called when TransportShardBulkAction is executed. + * @return A node client for the test. + */ + private NodeClient getNodeClient(CheckedFunction onShardAction) { + return new NoOpNodeClient(threadPool) { + @Override + @SuppressWarnings("unchecked") + public Task executeLocally( + ActionType action, + Request request, + ActionListener listener + ) { + if (TransportShardBulkAction.TYPE.equals(action)) { + Response response = null; + Exception exception = null; + try { + response = (Response) onShardAction.apply((BulkShardRequest) request); + } catch (Exception responseException) { + exception = responseException; + } + if (response != null) { + listener.onResponse(response); + } else { + listener.onFailure(exception); + } + } else { + fail("Unexpected client call to " + action.name()); + } + return null; + } + }; + } + + private BulkOperation newBulkOperation(NodeClient client, BulkRequest request, ActionListener listener) { + return newBulkOperation( + DEFAULT_STATE, + client, + request, + new AtomicArray<>(request.numberOfActions()), + Map.of(), + mockObserver(DEFAULT_STATE), + listener, + new FailureStoreDocumentConverter() + ); + } + + private BulkOperation newBulkOperation( + NodeClient client, + BulkRequest request, + FailureStoreDocumentConverter failureStoreDocumentConverter, + ActionListener listener + ) { + return newBulkOperation( + DEFAULT_STATE, + client, + request, + new AtomicArray<>(request.numberOfActions()), + Map.of(), + mockObserver(DEFAULT_STATE), + listener, + failureStoreDocumentConverter + ); + } + + private BulkOperation newBulkOperation( + NodeClient client, + BulkRequest request, + ClusterState state, + ClusterStateObserver observer, + ActionListener listener + ) { + return newBulkOperation( + state, + client, + request, + new AtomicArray<>(request.numberOfActions()), + Map.of(), + observer, + listener, + new FailureStoreDocumentConverter() + ); + } + + private BulkOperation newBulkOperation( + ClusterState state, + NodeClient client, + BulkRequest request, + AtomicArray existingResponses, + Map indicesThatCanNotBeCreated, + ClusterStateObserver observer, + ActionListener listener, + FailureStoreDocumentConverter failureStoreDocumentConverter + ) { + // Time provision + long timeZero = TimeUnit.MILLISECONDS.toNanos(randomMillisUpToYear9999() - TimeUnit.DAYS.toMillis(1)); + long duration = TimeUnit.SECONDS.toNanos(randomLongBetween(1, 60)); + long endTime = timeZero + duration; + + // Expressions + ThreadContext ctx = threadPool.getThreadContext(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(ctx, new SystemIndices(List.of())); + + // Mocks + final DiscoveryNode mockNode = mock(DiscoveryNode.class); + when(mockNode.getId()).thenReturn(randomAlphaOfLength(10)); + final ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(state); + when(clusterService.localNode()).thenReturn(mockNode); + + return new BulkOperation( + null, + threadPool, + ThreadPool.Names.SAME, + clusterService, + request, + client, + existingResponses, + indicesThatCanNotBeCreated, + indexNameExpressionResolver, + () -> endTime, + timeZero, + listener, + observer, + failureStoreDocumentConverter + ); + } + + /** + * A default mock cluster state observer that simply returns the state + */ + private ClusterStateObserver mockObserver(ClusterState state) { + ClusterStateObserver mockObserver = mock(ClusterStateObserver.class); + when(mockObserver.setAndGetObservedState()).thenReturn(state); + when(mockObserver.isTimedOut()).thenReturn(false); + return mockObserver; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java similarity index 90% rename from server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java rename to server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java index 962c796e18c2a..67116bd40c2c8 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java @@ -22,7 +22,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.startsWith; -public class FailureStoreDocumentTests extends ESTestCase { +public class FailureStoreDocumentConverterTests extends ESTestCase { public void testFailureStoreDocumentConverstion() throws Exception { IndexRequest source = new IndexRequest("original_index").routing("fake_routing") @@ -36,7 +36,12 @@ public void testFailureStoreDocumentConverstion() throws Exception { String targetIndexName = "rerouted_index"; long testTime = 1702357200000L; // 2023-12-12T05:00:00.000Z - IndexRequest convertedRequest = FailureStoreDocument.transformFailedRequest(source, exception, targetIndexName, () -> testTime); + IndexRequest convertedRequest = new FailureStoreDocumentConverter().transformFailedRequest( + source, + exception, + targetIndexName, + () -> testTime + ); // Retargeting write assertThat(convertedRequest.id(), is(nullValue())); @@ -63,7 +68,7 @@ public void testFailureStoreDocumentConverstion() throws Exception { ); assertThat( ObjectPath.eval("error.stack_trace", convertedRequest.sourceAsMap()), - containsString("at org.elasticsearch.action.bulk.FailureStoreDocumentTests.testFailureStoreDocumentConverstion") + containsString("at org.elasticsearch.action.bulk.FailureStoreDocumentConverterTests.testFailureStoreDocumentConverstion") ); assertThat(convertedRequest.isWriteToFailureStore(), is(true)); diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index 351efa59f2381..67f74df78e256 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -9,12 +9,14 @@ package org.elasticsearch.common.collect; import org.elasticsearch.common.Randomness; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; @@ -242,6 +244,29 @@ public Integer next() { assertEquals(array.length, index.get()); } + public void testEnumerate() { + assertEmptyIterator(Iterators.enumerate(Iterators.concat(), Tuple::new)); + + final var array = randomIntegerArray(); + final var index = new AtomicInteger(); + Iterators.enumerate(Iterators.forArray(array), Tuple::new).forEachRemaining(t -> { + int idx = index.getAndIncrement(); + assertEquals(idx, t.v1().intValue()); + assertEquals(array[idx], t.v2()); + }); + assertEquals(array.length, index.get()); + } + + public void testSupplier() { + assertEmptyIterator(Iterators.fromSupplier(() -> null)); + + final var array = randomIntegerArray(); + final var index = new AtomicInteger(); + final var queue = new LinkedList<>(Arrays.asList(array)); + Iterators.fromSupplier(queue::pollFirst).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + } + public void testEquals() { final BiPredicate notCalled = (a, b) -> { throw new AssertionError("not called"); }; From 9495f2daa1bc05ec103f2ffaad96121315f8572a Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 26 Mar 2024 20:21:15 +0000 Subject: [PATCH 77/79] Bump versions after 8.13.0 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 12 ++++++------ .buildkite/pipelines/periodic.yml | 16 ++++++++-------- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 3 +-- .../src/main/java/org/elasticsearch/Version.java | 2 +- .../org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 20 insertions(+), 19 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index f45caaf7fdfaf..cb8062fef02b4 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.20", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.13.1", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index c38e0e48cd070..9992d940e3c97 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1889,8 +1889,8 @@ steps: env: BWC_VERSION: 8.12.2 - - label: "{{matrix.image}} / 8.12.3 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.3 + - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 timeout_in_minutes: 300 matrix: setup: @@ -1903,10 +1903,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.3 + BWC_VERSION: 8.13.0 - - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 + - label: "{{matrix.image}} / 8.13.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.1 timeout_in_minutes: 300 matrix: setup: @@ -1919,7 +1919,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.13.0 + BWC_VERSION: 8.13.1 - label: "{{matrix.image}} / 8.14.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 23f0e7d4bbacf..ff378477f7aa6 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1162,8 +1162,8 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.2 - - label: 8.12.3 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.3#bwcTest + - label: 8.13.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1171,9 +1171,9 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.3 - - label: 8.13.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest + BWC_VERSION: 8.13.0 + - label: 8.13.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1181,7 +1181,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.13.0 + BWC_VERSION: 8.13.1 - label: 8.14.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.0#bwcTest timeout_in_minutes: 300 @@ -1256,7 +1256,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.20", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.13.1", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -1300,7 +1300,7 @@ steps: - openjdk17 - openjdk21 - openjdk22 - BWC_VERSION: ["7.17.20", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.13.1", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index bc5c24cf0f365..a655b5a862683 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -115,6 +115,6 @@ BWC_VERSION: - "8.12.0" - "8.12.1" - "8.12.2" - - "8.12.3" - "8.13.0" + - "8.13.1" - "8.14.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 6d391a3fd72ae..f31603772a7f7 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - "7.17.20" - - "8.12.3" - - "8.13.0" + - "8.13.1" - "8.14.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 391ede4d2aa40..3a4958e046a82 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -167,8 +167,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version V_8_12_1 = new Version(8_12_01_99); public static final Version V_8_12_2 = new Version(8_12_02_99); - public static final Version V_8_12_3 = new Version(8_12_03_99); public static final Version V_8_13_0 = new Version(8_13_00_99); + public static final Version V_8_13_1 = new Version(8_13_01_99); public static final Version V_8_14_0 = new Version(8_14_00_99); public static final Version CURRENT = V_8_14_0; diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 17f594ec992d1..0a1480526c9f0 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -113,3 +113,4 @@ 8.12.0,8560000 8.12.1,8560001 8.12.2,8560001 +8.13.0,8595000 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index b29ae972c9b13..f66cda3c08fc7 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -113,3 +113,4 @@ 8.12.0,8500008 8.12.1,8500010 8.12.2,8500010 +8.13.0,8503000 From 9e6b893896a738bbb77b2dab7cc1e6640881ceee Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 26 Mar 2024 13:37:19 -0700 Subject: [PATCH 78/79] Text fields are stored by default in TSDB indices (#106338) * Text fields are stored by default with synthetic source Synthetic source requires text fields to be stored or have keyword sub-field that supports synthetic source. If there are no keyword fields users currently have to explicitly set 'store' to 'true' or get a validation exception. This is not the best experience. It is quite likely that setting `store` to `true` is the correct thing to do but users still get an error and need to investigate it. With this change if `store` setting is not specified in such context it will be set to `true` by default. Setting it explicitly to `false` results in the exception. Closes #97039 --- docs/changelog/106338.yaml | 6 ++ docs/reference/mapping/types/text.asciidoc | 7 +- .../test/tsdb/90_unsupported_operations.yml | 1 + .../index/mapper/DynamicFieldsBuilder.java | 6 +- .../index/mapper/FieldMapper.java | 23 ++++++ .../index/mapper/KeywordFieldMapper.java | 17 +++++ .../index/mapper/TextFieldMapper.java | 68 ++++++++++++----- .../index/query/QueryRewriteContext.java | 6 +- .../fielddata/AbstractFieldDataTestCase.java | 8 +- .../index/fielddata/FilterFieldDataTests.java | 27 +++++-- .../fielddata/IndexFieldDataServiceTests.java | 24 +++--- .../mapper/DocumentParserContextTests.java | 8 +- .../index/mapper/MultiFieldsTests.java | 72 ++++++++++++++++++ .../index/mapper/ObjectMapperMergeTests.java | 6 +- .../index/mapper/ObjectMapperTests.java | 4 +- .../mapper/TextFieldAnalyzerModeTests.java | 13 ++++ .../index/mapper/TextFieldMapperTests.java | 74 ++++++++++++++++++- .../highlight/HighlightBuilderTests.java | 6 +- .../rescore/QueryRescorerBuilderTests.java | 12 ++- 19 files changed, 332 insertions(+), 56 deletions(-) create mode 100644 docs/changelog/106338.yaml create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java diff --git a/docs/changelog/106338.yaml b/docs/changelog/106338.yaml new file mode 100644 index 0000000000000..c05826d87a11f --- /dev/null +++ b/docs/changelog/106338.yaml @@ -0,0 +1,6 @@ +pr: 106338 +summary: Text fields are stored by default in TSDB indices +area: TSDB +type: enhancement +issues: + - 97039 diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 2a6e2f3ef8ae8..c33af69df5607 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -133,8 +133,11 @@ The following parameters are accepted by `text` fields: <>:: Whether the field value should be stored and retrievable separately from - the <> field. Accepts `true` or `false` - (default). + the <> field. Accepts `true` or `false` (default). + This parameter will be automatically set to `true` for TSDB indices + (indices that have `index.mode` set to `time_series`) + if there is no <> + sub-field that supports synthetic `_source`. <>:: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index 049b9670b6b46..57ad446eaf637 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -278,6 +278,7 @@ synthetic source text field: type: keyword name: type: text + store: false value: type: long time_series_metric: gauge diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index 8505c561bfb1a..799042b4f3a87 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -333,7 +333,11 @@ public boolean newDynamicStringField(DocumentParserContext context, String name) ); } else { return createDynamicField( - new TextFieldMapper.Builder(name, context.indexAnalyzers()).addMultiField( + new TextFieldMapper.Builder( + name, + context.indexAnalyzers(), + context.indexSettings().getMode().isSyntheticSourceEnabled() + ).addMultiField( new KeywordFieldMapper.Builder("keyword", context.indexSettings().getIndexVersionCreated()).ignoreAbove(256) ), context diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index e029aaa657d23..fe9bdd73cfa10 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -450,13 +450,28 @@ public static class Builder { private final Map> mapperBuilders = new HashMap<>(); + private boolean hasSyntheticSourceCompatibleKeywordField; + public Builder add(FieldMapper.Builder builder) { mapperBuilders.put(builder.name(), builder::build); + + if (builder instanceof KeywordFieldMapper.Builder kwd) { + if (kwd.hasNormalizer() == false && (kwd.hasDocValues() || kwd.isStored())) { + hasSyntheticSourceCompatibleKeywordField = true; + } + } + return this; } private void add(FieldMapper mapper) { mapperBuilders.put(mapper.simpleName(), context -> mapper); + + if (mapper instanceof KeywordFieldMapper kwd) { + if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { + hasSyntheticSourceCompatibleKeywordField = true; + } + } } private void update(FieldMapper toMerge, MapperMergeContext context) { @@ -474,6 +489,10 @@ public boolean hasMultiFields() { return mapperBuilders.isEmpty() == false; } + public boolean hasSyntheticSourceCompatibleKeywordField() { + return hasSyntheticSourceCompatibleKeywordField; + } + public MultiFields build(Mapper.Builder mainFieldBuilder, MapperBuilderContext context) { if (mapperBuilders.isEmpty()) { return empty(); @@ -1134,6 +1153,10 @@ public static Parameter storeParam(Function initi return Parameter.boolParam("store", false, initializer, defaultValue); } + public static Parameter storeParam(Function initializer, Supplier defaultValue) { + return Parameter.boolParam("store", false, initializer, defaultValue); + } + public static Parameter docValuesParam(Function initializer, boolean defaultValue) { return Parameter.boolParam("doc_values", false, initializer, defaultValue); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 4024798a85370..bdf25307d3343 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -227,6 +227,10 @@ Builder normalizer(String normalizerName) { return this; } + public boolean hasNormalizer() { + return this.normalizer.get() != null; + } + Builder nullValue(String nullValue) { this.nullValue.setValue(nullValue); return this; @@ -237,6 +241,10 @@ public Builder docValues(boolean hasDocValues) { return this; } + public boolean hasDocValues() { + return this.hasDocValues.get(); + } + public Builder dimension(boolean dimension) { this.dimension.setValue(dimension); return this; @@ -247,6 +255,15 @@ public Builder indexed(boolean indexed) { return this; } + public Builder stored(boolean stored) { + this.stored.setValue(stored); + return this; + } + + public boolean isStored() { + return this.stored.get(); + } + private FieldValues scriptValues() { if (script.get() == null) { return null; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index faa840dacc732..ef512e2bbd46b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -236,9 +236,11 @@ private static FielddataFrequencyFilter parseFrequencyFilter(String name, Mappin public static class Builder extends FieldMapper.Builder { private final IndexVersion indexCreatedVersion; + private final Parameter store; + + private final boolean isSyntheticSourceEnabledViaIndexMode; private final Parameter index = Parameter.indexParam(m -> ((TextFieldMapper) m).index, true); - private final Parameter store = Parameter.storeParam(m -> ((TextFieldMapper) m).store, false); final Parameter similarity = TextParams.similarity(m -> ((TextFieldMapper) m).similarity); @@ -283,12 +285,28 @@ public static class Builder extends FieldMapper.Builder { final TextParams.Analyzers analyzers; - public Builder(String name, IndexAnalyzers indexAnalyzers) { - this(name, IndexVersion.current(), indexAnalyzers); + public Builder(String name, IndexAnalyzers indexAnalyzers, boolean isSyntheticSourceEnabledViaIndexMode) { + this(name, IndexVersion.current(), indexAnalyzers, isSyntheticSourceEnabledViaIndexMode); } - public Builder(String name, IndexVersion indexCreatedVersion, IndexAnalyzers indexAnalyzers) { + public Builder( + String name, + IndexVersion indexCreatedVersion, + IndexAnalyzers indexAnalyzers, + boolean isSyntheticSourceEnabledViaIndexMode + ) { super(name); + + // If synthetic source is used we need to either store this field + // to recreate the source or use keyword multi-fields for that. + // So if there are no suitable multi-fields we will default to + // storing the field without requiring users to explicitly set 'store'. + // + // If 'store' parameter was explicitly provided we'll reject the request. + this.store = Parameter.storeParam( + m -> ((TextFieldMapper) m).store, + () -> isSyntheticSourceEnabledViaIndexMode && multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField() == false + ); this.indexCreatedVersion = indexCreatedVersion; this.analyzers = new TextParams.Analyzers( indexAnalyzers, @@ -296,6 +314,7 @@ public Builder(String name, IndexVersion indexCreatedVersion, IndexAnalyzers ind m -> (((TextFieldMapper) m).positionIncrementGap), indexCreatedVersion ); + this.isSyntheticSourceEnabledViaIndexMode = isSyntheticSourceEnabledViaIndexMode; } public Builder index(boolean index) { @@ -387,13 +406,9 @@ private static KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate(Field if (fieldType.stored()) { return null; } - for (Mapper sub : multiFields) { - if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { - KeywordFieldMapper kwd = (KeywordFieldMapper) sub; - if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { - return kwd.fieldType(); - } - } + var kwd = getKeywordFieldMapperForSyntheticSource(multiFields); + if (kwd != null) { + return kwd.fieldType(); } return null; } @@ -483,7 +498,7 @@ public TextFieldMapper build(MapperBuilderContext context) { private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); public static final TypeParser PARSER = new TypeParser( - (n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers()), + (n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers(), c.getIndexSettings().getMode().isSyntheticSourceEnabled()), MINIMUM_COMPATIBILITY_VERSION ); @@ -1203,6 +1218,8 @@ public Query existsQuery(SearchExecutionContext context) { private final SubFieldInfo prefixFieldInfo; private final SubFieldInfo phraseFieldInfo; + private final boolean isSyntheticSourceEnabledViaIndexMode; + private TextFieldMapper( String simpleName, FieldType fieldType, @@ -1235,6 +1252,7 @@ private TextFieldMapper( this.indexPrefixes = builder.indexPrefixes.getValue(); this.freqFilter = builder.freqFilter.getValue(); this.fieldData = builder.fieldData.get(); + this.isSyntheticSourceEnabledViaIndexMode = builder.isSyntheticSourceEnabledViaIndexMode; } @Override @@ -1258,7 +1276,7 @@ public Map indexAnalyzers() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), indexCreatedVersion, indexAnalyzers).init(this); + return new Builder(simpleName(), indexCreatedVersion, indexAnalyzers, isSyntheticSourceEnabledViaIndexMode).init(this); } @Override @@ -1454,15 +1472,12 @@ protected void write(XContentBuilder b, Object value) throws IOException { } }; } - for (Mapper sub : this) { - if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { - KeywordFieldMapper kwd = (KeywordFieldMapper) sub; - if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { - return kwd.syntheticFieldLoader(simpleName()); - } - } + var kwd = getKeywordFieldMapperForSyntheticSource(this); + if (kwd != null) { + return kwd.syntheticFieldLoader(simpleName()); } + throw new IllegalArgumentException( String.format( Locale.ROOT, @@ -1473,4 +1488,17 @@ protected void write(XContentBuilder b, Object value) throws IOException { ) ); } + + private static KeywordFieldMapper getKeywordFieldMapperForSyntheticSource(Iterable multiFields) { + for (Mapper sub : multiFields) { + if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { + KeywordFieldMapper kwd = (KeywordFieldMapper) sub; + if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { + return kwd; + } + } + } + + return null; + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index fbfce6aab403f..6ab5d6d77d86d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -196,7 +196,11 @@ MappedFieldType failIfFieldMappingNotFound(String name, MappedFieldType fieldMap if (fieldMapping != null || allowUnmappedFields) { return fieldMapping; } else if (mapUnmappedFieldAsString) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, getIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + getIndexAnalyzers(), + getIndexSettings() != null && getIndexSettings().getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } else { throw new QueryShardException(this, "No field mapping can be found for the field with name [{}]", name); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 43628fe59daa3..683bfb19aac26 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -90,9 +90,11 @@ public > IFD getForField(String type, String field if (docValues) { fieldType = new KeywordFieldMapper.Builder(fieldName, IndexVersion.current()).build(context).fieldType(); } else { - fieldType = new TextFieldMapper.Builder(fieldName, createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); + fieldType = new TextFieldMapper.Builder( + fieldName, + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); } } else if (type.equals("float")) { fieldType = new NumberFieldMapper.Builder( diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java index 45ebfba265c2f..4df1961c123af 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java @@ -52,10 +52,11 @@ public void testFilterByFrequency() throws Exception { { indexService.clearCaches(false, true); - MappedFieldType ft = new TextFieldMapper.Builder("high_freq", createDefaultIndexAnalyzers()).fielddata(true) - .fielddataFrequencyFilter(0, random.nextBoolean() ? 100 : 0.5d, 0) - .build(builderContext) - .fieldType(); + MappedFieldType ft = new TextFieldMapper.Builder( + "high_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).fielddataFrequencyFilter(0, random.nextBoolean() ? 100 : 0.5d, 0).build(builderContext).fieldType(); IndexOrdinalsFieldData fieldData = searchExecutionContext.getForField(ft, MappedFieldType.FielddataOperation.SEARCH); for (LeafReaderContext context : contexts) { LeafOrdinalsFieldData loadDirect = fieldData.loadDirect(context); @@ -67,7 +68,11 @@ public void testFilterByFrequency() throws Exception { } { indexService.clearCaches(false, true); - MappedFieldType ft = new TextFieldMapper.Builder("high_freq", createDefaultIndexAnalyzers()).fielddata(true) + MappedFieldType ft = new TextFieldMapper.Builder( + "high_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true) .fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, 201, 100) .build(builderContext) .fieldType(); @@ -82,7 +87,11 @@ public void testFilterByFrequency() throws Exception { { indexService.clearCaches(false, true);// test # docs with value - MappedFieldType ft = new TextFieldMapper.Builder("med_freq", createDefaultIndexAnalyzers()).fielddata(true) + MappedFieldType ft = new TextFieldMapper.Builder( + "med_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true) .fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, Integer.MAX_VALUE, 101) .build(builderContext) .fieldType(); @@ -98,7 +107,11 @@ public void testFilterByFrequency() throws Exception { { indexService.clearCaches(false, true); - MappedFieldType ft = new TextFieldMapper.Builder("med_freq", createDefaultIndexAnalyzers()).fielddata(true) + MappedFieldType ft = new TextFieldMapper.Builder( + "med_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true) .fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, Integer.MAX_VALUE, 101) .build(builderContext) .fieldType(); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index bf9176de1b124..8c583fe3976fa 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -156,12 +156,16 @@ public void testClearField() throws Exception { ); final MapperBuilderContext context = MapperBuilderContext.root(false, false); - final MappedFieldType mapper1 = new TextFieldMapper.Builder("field_1", createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); - final MappedFieldType mapper2 = new TextFieldMapper.Builder("field_2", createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); + final MappedFieldType mapper1 = new TextFieldMapper.Builder( + "field_1", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); + final MappedFieldType mapper2 = new TextFieldMapper.Builder( + "field_2", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); final IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); Document doc = new Document(); doc.add(new StringField("field_1", "thisisastring", Store.NO)); @@ -223,9 +227,11 @@ public void testFieldDataCacheListener() throws Exception { ); final MapperBuilderContext context = MapperBuilderContext.root(false, false); - final MappedFieldType mapper1 = new TextFieldMapper.Builder("s", createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); + final MappedFieldType mapper1 = new TextFieldMapper.Builder( + "s", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); final IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); Document doc = new Document(); doc.add(new StringField("s", "thisisastring", Store.NO)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java index 03716f8ad4497..9b66d0011ba69 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java @@ -20,9 +20,9 @@ public class DocumentParserContextTests extends ESTestCase { private final MapperBuilderContext root = MapperBuilderContext.root(false, false); public void testDynamicMapperSizeMultipleMappers() { - context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(1, context.getNewFieldsSize()); - context.addDynamicMapper(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(2, context.getNewFieldsSize()); context.addDynamicRuntimeField(new TestRuntimeField("runtime1", "keyword")); assertEquals(3, context.getNewFieldsSize()); @@ -37,9 +37,9 @@ public void testDynamicMapperSizeSameFieldMultipleRuntimeFields() { } public void testDynamicMapperSizeSameFieldMultipleMappers() { - context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(1, context.getNewFieldsSize()); - context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(1, context.getNewFieldsSize()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java new file mode 100644 index 0000000000000..01cbe496e6a3d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.script.ScriptCompiler; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +public class MultiFieldsTests extends ESTestCase { + + public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordField() { + var isStored = randomBoolean(); + var hasNormalizer = randomBoolean(); + + var builder = new FieldMapper.MultiFields.Builder(); + assertFalse(builder.hasSyntheticSourceCompatibleKeywordField()); + + var keywordFieldMapperBuilder = getKeywordFieldMapperBuilder(isStored, hasNormalizer); + builder.add(keywordFieldMapperBuilder); + + var expected = hasNormalizer == false; + assertEquals(expected, builder.hasSyntheticSourceCompatibleKeywordField()); + } + + public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordFieldDuringMerge() { + var isStored = randomBoolean(); + var hasNormalizer = randomBoolean(); + + var builder = new TextFieldMapper.Builder("text_field", createDefaultIndexAnalyzers(), false); + assertFalse(builder.multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField()); + + var keywordFieldMapperBuilder = getKeywordFieldMapperBuilder(isStored, hasNormalizer); + + var newField = new TextFieldMapper.Builder("text_field", createDefaultIndexAnalyzers(), false).addMultiField( + keywordFieldMapperBuilder + ).build(MapperBuilderContext.root(false, false)); + + builder.merge(newField, new FieldMapper.Conflicts("TextFieldMapper"), MapperMergeContext.root(false, false, Long.MAX_VALUE)); + + var expected = hasNormalizer == false; + assertEquals(expected, builder.multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField()); + } + + private KeywordFieldMapper.Builder getKeywordFieldMapperBuilder(boolean isStored, boolean hasNormalizer) { + var keywordFieldMapperBuilder = new KeywordFieldMapper.Builder( + "field", + IndexAnalyzers.of(Map.of(), Map.of("normalizer", Lucene.STANDARD_ANALYZER), Map.of()), + ScriptCompiler.NONE, + IndexVersion.current() + ); + if (isStored) { + keywordFieldMapperBuilder.stored(true); + if (randomBoolean()) { + keywordFieldMapperBuilder.docValues(false); + } + } + if (hasNormalizer) { + keywordFieldMapperBuilder.normalizer("normalizer"); + } + return keywordFieldMapperBuilder; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index e024f2fa7b1ea..3c4aca4d36284 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -27,10 +27,10 @@ private RootObjectMapper createMapping( rootBuilder.add(new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE).enabled(disabledFieldEnabled)); ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Explicit.IMPLICIT_TRUE).enabled(fooFieldEnabled); if (includeBarField) { - fooBuilder.add(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers())); + fooBuilder.add(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers(), false)); } if (includeBazField) { - fooBuilder.add(new TextFieldMapper.Builder("baz", createDefaultIndexAnalyzers())); + fooBuilder.add(new TextFieldMapper.Builder("baz", createDefaultIndexAnalyzers(), false)); } rootBuilder.add(fooBuilder); return rootBuilder.build(MapperBuilderContext.root(false, false)); @@ -366,7 +366,7 @@ private TextFieldMapper.Builder createTextKeywordMultiField(String name) { } private TextFieldMapper.Builder createTextKeywordMultiField(String name, String multiFieldName) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers(), false); builder.multiFieldsBuilder.add(new KeywordFieldMapper.Builder(multiFieldName, IndexVersion.current())); return builder; } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 6472f09ce1be7..74b293ca7d6d6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -530,11 +530,11 @@ public void testSyntheticSourceDocValuesFieldWithout() throws IOException { public void testNestedObjectWithMultiFieldsgetTotalFieldsCount() { ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Explicit.IMPLICIT_TRUE).add( new ObjectMapper.Builder("child_size_2", Explicit.IMPLICIT_TRUE).add( - new TextFieldMapper.Builder("grand_child_size_3", createDefaultIndexAnalyzers()).addMultiField( + new TextFieldMapper.Builder("grand_child_size_3", createDefaultIndexAnalyzers(), false).addMultiField( new KeywordFieldMapper.Builder("multi_field_size_4", IndexVersion.current()) ) .addMultiField( - new TextFieldMapper.Builder("grand_child_size_5", createDefaultIndexAnalyzers()).addMultiField( + new TextFieldMapper.Builder("grand_child_size_5", createDefaultIndexAnalyzers(), false).addMultiField( new KeywordFieldMapper.Builder("multi_field_of_multi_field_size_6", IndexVersion.current()) ) ) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java index 8cb3ecef4c35c..def8841045746 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AnalysisMode; @@ -67,6 +68,9 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { fieldNode.put("analyzer", "my_analyzer"); MappingParserContext parserContext = mock(MappingParserContext.class); when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); + when(parserContext.getIndexSettings()).thenReturn( + new IndexSettings(IndexMetadata.builder("index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(), Settings.EMPTY) + ); // check AnalysisMode.ALL works Map analyzers = defaultAnalyzers(); @@ -102,6 +106,12 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { } MappingParserContext parserContext = mock(MappingParserContext.class); when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); + when(parserContext.getIndexSettings()).thenReturn( + new IndexSettings( + IndexMetadata.builder("index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(), + Settings.EMPTY + ) + ); // check AnalysisMode.ALL and AnalysisMode.SEARCH_TIME works Map analyzers = defaultAnalyzers(); @@ -143,6 +153,9 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { fieldNode.put("analyzer", "my_analyzer"); MappingParserContext parserContext = mock(MappingParserContext.class); when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); + when(parserContext.getIndexSettings()).thenReturn( + new IndexSettings(IndexMetadata.builder("index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(), Settings.EMPTY) + ); // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked if there is no search analyzer AnalysisMode mode = AnalysisMode.INDEX_TIME; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index f92867d1ce461..1c5ae3baca827 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -44,9 +44,11 @@ import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; @@ -249,6 +251,64 @@ public void testDefaults() throws IOException { assertEquals(DocValuesType.NONE, fieldType.docValuesType()); } + public void testStoreParameterDefaults() throws IOException { + var timeSeriesIndexMode = randomBoolean(); + var isStored = randomBoolean(); + var hasKeywordFieldForSyntheticSource = randomBoolean(); + + var indexSettingsBuilder = getIndexSettingsBuilder(); + if (timeSeriesIndexMode) { + indexSettingsBuilder.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dimension") + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-08T23:40:53.384Z") + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z"); + } + var indexSettings = indexSettingsBuilder.build(); + + var mapping = mapping(b -> { + b.startObject("field"); + b.field("type", "text"); + if (isStored) { + b.field("store", isStored); + } + if (hasKeywordFieldForSyntheticSource) { + b.startObject("fields"); + b.startObject("keyword"); + b.field("type", "keyword"); + b.endObject(); + b.endObject(); + } + b.endObject(); + + if (timeSeriesIndexMode) { + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("dimension"); + b.field("type", "keyword"); + b.field("time_series_dimension", "true"); + b.endObject(); + } + }); + DocumentMapper mapper = createMapperService(getVersion(), indexSettings, () -> true, mapping).documentMapper(); + + var source = source(TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE, b -> { + b.field("field", "1234"); + if (timeSeriesIndexMode) { + b.field("@timestamp", randomMillisUpToYear9999()); + b.field("dimension", "dimension1"); + } + }, null); + ParsedDocument doc = mapper.parse(source); + List fields = doc.rootDoc().getFields("field"); + IndexableFieldType fieldType = fields.get(0).fieldType(); + if (isStored || (timeSeriesIndexMode && hasKeywordFieldForSyntheticSource == false)) { + assertTrue(fieldType.stored()); + } else { + assertFalse(fieldType.stored()); + } + } + public void testBWCSerialization() throws IOException { MapperService mapperService = createMapperService(fieldMapping(b -> { b.field("type", "text"); @@ -1138,7 +1198,8 @@ public SyntheticSourceExample example(int maxValues) { delegate.expectedForSyntheticSource(), delegate.expectedForBlockLoader(), b -> { - b.field("type", "text").field("store", true); + b.field("type", "text"); + b.field("store", true); if (indexText == false) { b.field("index", false); } @@ -1196,6 +1257,17 @@ public List invalidExample() throws IOException { b.endObject(); } b.endObject(); + }), + new SyntheticSourceInvalidExample(err, b -> { + b.field("type", "text"); + b.startObject("fields"); + { + b.startObject("kwd"); + b.field("type", "keyword"); + b.field("doc_values", "false"); + b.endObject(); + } + b.endObject(); }) ); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index c521ab5e047aa..4e4f5c9c0ddfa 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -317,7 +317,11 @@ public void testBuildSearchContextHighlight() throws IOException { ) { @Override public MappedFieldType getFieldType(String name) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + createDefaultIndexAnalyzers(), + idxSettings.getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } }; diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 0ade522ae1ffa..7113117a4d7fa 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -160,7 +160,11 @@ public void testBuildRescoreSearchContext() throws ElasticsearchParseException, ) { @Override public MappedFieldType getFieldType(String name) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + createDefaultIndexAnalyzers(), + idxSettings.getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } }; @@ -222,7 +226,11 @@ public void testRewritingKeepsSettings() throws IOException { ) { @Override public MappedFieldType getFieldType(String name) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + createDefaultIndexAnalyzers(), + idxSettings.getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } }; From 2f0a7b8629a9892b1ef0b8f40bd02832558c8b84 Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:27:36 -0400 Subject: [PATCH 79/79] [ES|QL] Refactor data type conversions between String/UnsignedLong and other data types (#106628) * refactor data type conversions related to string and unsignedlong in esql. --- .../xpack/esql/qa/rest/RestEsqlTestCase.java | 2 +- .../src/main/resources/floats.csv-spec | 4 +- .../src/main/resources/ints.csv-spec | 12 +- .../src/main/resources/row.csv-spec | 14 +- .../convert/ToDoubleFromStringEvaluator.java | 8 +- .../convert/ToIntegerFromStringEvaluator.java | 7 +- .../convert/ToLongFromStringEvaluator.java | 8 +- .../xpack/esql/action/PositionToXContent.java | 22 +-- .../xpack/esql/action/ResponseValueUtils.java | 39 +++-- .../function/scalar/convert/ToBoolean.java | 9 +- .../scalar/convert/ToCartesianPoint.java | 4 +- .../scalar/convert/ToCartesianShape.java | 4 +- .../function/scalar/convert/ToDouble.java | 10 +- .../function/scalar/convert/ToGeoPoint.java | 4 +- .../function/scalar/convert/ToGeoShape.java | 4 +- .../function/scalar/convert/ToIP.java | 4 +- .../function/scalar/convert/ToInteger.java | 23 +-- .../function/scalar/convert/ToLong.java | 19 +-- .../function/scalar/convert/ToString.java | 32 ++-- .../scalar/convert/ToUnsignedLong.java | 20 +-- .../function/scalar/convert/ToVersion.java | 4 +- .../expression/function/scalar/math/Cast.java | 12 +- .../function/scalar/math/Log10.java | 3 +- .../function/scalar/math/Round.java | 9 +- .../expression/function/scalar/math/Sqrt.java | 4 +- .../function/scalar/multivalue/MvAvg.java | 2 +- .../function/scalar/multivalue/MvMedian.java | 16 +- .../function/scalar/multivalue/MvSlice.java | 5 +- .../predicate/operator/arithmetic/Div.java | 4 +- .../predicate/operator/arithmetic/Mod.java | 4 +- .../xpack/esql/io/stream/PlanNamedTypes.java | 11 +- .../xpack/esql/parser/ExpressionBuilder.java | 10 +- .../xpack/esql/parser/LogicalPlanBuilder.java | 3 +- .../planner/EsqlExpressionTranslators.java | 9 +- .../esql/planner/LocalExecutionPlanner.java | 3 +- .../esql/type/EsqlDataTypeConverter.java | 161 +++++++++++++++++- .../scalar/convert/ToDoubleTests.java | 7 +- .../scalar/convert/ToIntegerTests.java | 10 +- .../function/scalar/convert/ToLongTests.java | 10 +- .../function/scalar/math/Log10Tests.java | 6 +- .../function/scalar/math/SqrtTests.java | 4 +- .../scalar/multivalue/MvAvgTests.java | 3 +- 42 files changed, 343 insertions(+), 206 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index cccd1a3f8854b..2d0a39da5a8b4 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -422,7 +422,7 @@ public void testWarningHeadersOnFailedConversions() throws IOException { for (int i = 1; i <= expectedWarnings; i++) { assertThat( warnings.get(i), - containsString("java.lang.NumberFormatException: For input string: \\\"keyword" + (2 * i - 1) + "\\\"") + containsString("org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [keyword" + (2 * i - 1) + "]") ); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 75011388a9f5a..9c343083275cd 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -55,14 +55,14 @@ emp_no:integer |hire_date:date |hire_double:double 10003 |1986-08-28T00:00:00.000Z|5.255712E11 ; -convertFromString +convertFromString#[skip:-8.13.99, reason:warning changed in 8.14] // tag::to_double-str[] ROW str1 = "5.20128E11", str2 = "foo" | EVAL dbl = TO_DOUBLE("520128000000"), dbl1 = TO_DOUBLE(str1), dbl2 = TO_DOUBLE(str2) // end::to_double-str[] ; warning:Line 2:72: evaluation of [TO_DOUBLE(str2)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 2:72: java.lang.NumberFormatException: For input string: \"foo\" +warning:Line 2:72: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [foo] // tag::to_double-str-result[] str1:keyword |str2:keyword |dbl:double |dbl1:double |dbl2:double diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 7a64c9a87e0c9..8657602e7b16f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -153,14 +153,14 @@ tf:boolean |t2l:long |f2l:long |tf2l:long [true, false] |1 |0 |[1, 0] ; -convertStringToLong +convertStringToLong#[skip:-8.13.99, reason:warning changed in 8.14] // tag::to_long-str[] ROW str1 = "2147483648", str2 = "2147483648.2", str3 = "foo" | EVAL long1 = TO_LONG(str1), long2 = TO_LONG(str2), long3 = TO_LONG(str3) // end::to_long-str[] ; warning:Line 2:62: evaluation of [TO_LONG(str3)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 2:62: java.lang.NumberFormatException: For input string: \"foo\" +warning:Line 2:62: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [foo] // tag::to_long-str-result[] @@ -238,16 +238,16 @@ int_str:keyword |int_dbl_str:keyword |is2i:integer|ids2i:integer 2147483647 |2147483646.2 |2147483647 |2147483646 ; -convertStringToIntFail +convertStringToIntFail#[skip:-8.13.99, reason:warning changed in 8.14] required_feature: esql.mv_warn row str1 = "2147483647.2", str2 = "2147483648", non = "no number" | eval i1 = to_integer(str1), i2 = to_integer(str2), noi = to_integer(non); warning:Line 1:79: evaluation of [to_integer(str1)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:79: java.lang.NumberFormatException: For input string: \"2147483647.2\" +warning:Line 1:79: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [2147483647.2] warning:Line 1:102: evaluation of [to_integer(str2)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:102: java.lang.NumberFormatException: For input string: \"2147483648\" +warning:Line 1:102: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [2147483648] warning:Line 1:126: evaluation of [to_integer(non)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:126: java.lang.NumberFormatException: For input string: \"no number\" +warning:Line 1:126: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [no number] str1:keyword |str2:keyword |non:keyword |i1:integer |i2:integer |noi:integer 2147483647.2 |2147483648 |no number |null |null |null diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec index 4e5df6c535be7..3f441c94967d5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec @@ -289,30 +289,30 @@ a:integer |b:integer |c:integer // end::in-with-expressions-result[] ; -convertMvToMvDifferentCardinality +convertMvToMvDifferentCardinality#[skip:-8.13.99, reason:warning changed in 8.14] row strings = ["1", "2", "three"] | eval ints = to_int(strings); warning:Line 1:49: evaluation of [to_int(strings)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:49: java.lang.NumberFormatException: For input string: \"three\" +warning:Line 1:49: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [three] strings:keyword |ints:integer [1, 2, three] |[1, 2] ; -convertMvToSv +convertMvToSv#[skip:-8.13.99, reason:warning changed in 8.14] row strings = ["1", "two"] | eval ints = to_int(strings); warning:Line 1:42: evaluation of [to_int(strings)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:42: java.lang.NumberFormatException: For input string: \"two\" +warning:Line 1:42: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [two] strings:keyword |ints:integer [1, two] |1 ; -convertMvToNull +convertMvToNull#[skip:-8.13.99, reason:warning changed in 8.14] row strings = ["one", "two"] | eval ints = to_int(strings); warning:Line 1:44: evaluation of [to_int(strings)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:44: java.lang.NumberFormatException: For input string: \"one\" -warning:Line 1:44: java.lang.NumberFormatException: For input string: \"two\" +warning:Line 1:44: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [one] +warning:Line 1:44: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [two] strings:keyword |ints:integer [one, two] |null diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java index b1fc80b9260ad..209b15ef21a2f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java @@ -4,7 +4,6 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; -import java.lang.NumberFormatException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -15,6 +14,7 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -40,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -49,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendDouble(evalValue(vector, p, scratchPad)); - } catch (NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); builder.appendNull(); } @@ -84,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendDouble(value); valuesAppended = true; - } catch (NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java index 600fa293394f9..ef91bf890cd23 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java @@ -4,7 +4,6 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; -import java.lang.NumberFormatException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -41,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (InvalidArgumentException | NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -50,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendInt(evalValue(vector, p, scratchPad)); - } catch (InvalidArgumentException | NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); builder.appendNull(); } @@ -85,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendInt(value); valuesAppended = true; - } catch (InvalidArgumentException | NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java index e0eca6b6bcbff..0d7a2cb9d7459 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java @@ -4,7 +4,6 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; -import java.lang.NumberFormatException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -15,6 +14,7 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -40,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -49,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p, scratchPad)); - } catch (NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); builder.appendNull(); } @@ -84,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (NumberFormatException e) { + } catch (InvalidArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java index 8770d313ac2fe..5488efda7834f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java @@ -17,21 +17,20 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.lucene.UnsupportedValueSource; -import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.esql.action.ColumnInfo; -import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.ipToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.spatialToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.versionToString; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; abstract class PositionToXContent { protected final Block block; @@ -109,7 +108,7 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); - return builder.value(DocValueFormat.IP.format(val)); + return builder.value(ipToString(val)); } }; case "date" -> new PositionToXContent(block) { @@ -120,18 +119,11 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(dateTimeToString(longVal)); } }; - case "geo_point", "geo_shape" -> new PositionToXContent(block) { + case "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { - return builder.value(GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); - } - }; - case "cartesian_point", "cartesian_shape" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); + return builder.value(spatialToString(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); } }; case "boolean" -> new PositionToXContent(block) { @@ -146,7 +138,7 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); - return builder.value(new Version(val).toString()); + return builder.value(versionToString(val)); } }; case "null" -> new PositionToXContent(block) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java index 8691f2b142d87..f467512fd6c0b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -21,7 +21,6 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.UnsupportedValueSource; -import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -30,7 +29,6 @@ import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; -import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; import java.io.UncheckedIOException; @@ -41,11 +39,14 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToLong; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToString; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.ipToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.spatialToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToIP; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToVersion; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.versionToString; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; -import static org.elasticsearch.xpack.ql.util.StringUtils.parseIP; /** * Collection of static utility methods for helping transform response data between pages and values. @@ -128,16 +129,17 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef case "keyword", "text" -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); case "ip" -> { BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); - yield DocValueFormat.IP.format(val); + yield ipToString(val); } case "date" -> { long longVal = ((LongBlock) block).getLong(offset); yield dateTimeToString(longVal); } case "boolean" -> ((BooleanBlock) block).getBoolean(offset); - case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); - case "geo_point", "geo_shape" -> GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); - case "cartesian_point", "cartesian_shape" -> CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "version" -> versionToString(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" -> spatialToString( + ((BytesRefBlock) block).getBytesRef(offset, scratch) + ); case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; case "_source" -> { BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); @@ -169,21 +171,23 @@ static Page valuesToPage(BlockFactory blockFactory, List columns, Li var builder = results.get(c); var value = row.get(c); switch (dataTypes.get(c)) { - case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong(asLongUnsigned(((Number) value).longValue())); + case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong( + longToUnsignedLong(((Number) value).longValue(), true) + ); case "long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); case "integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); case "double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); case "keyword", "text", "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( new BytesRef(value.toString()) ); - case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef(parseIP(value.toString())); + case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef(stringToIP(value.toString())); case "date" -> { long longVal = dateTimeToLong(value.toString()); ((LongBlock.Builder) builder).appendLong(longVal); } case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); case "null" -> builder.appendNull(); - case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); + case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(stringToVersion(new BytesRef(value.toString()))); case "_source" -> { @SuppressWarnings("unchecked") Map o = (Map) value; @@ -196,14 +200,9 @@ static Page valuesToPage(BlockFactory blockFactory, List columns, Li throw new UncheckedIOException(e); } } - case "geo_point", "geo_shape" -> { - // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here - BytesRef wkb = GEO.wktToWkb(value.toString()); - ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); - } - case "cartesian_point", "cartesian_shape" -> { + case "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" -> { // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here - BytesRef wkb = CARTESIAN.wktToWkb(value.toString()); + BytesRef wkb = stringToSpatial(value.toString()); ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); } default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java index b5f33184d1395..cd9fcb0390937 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBoolean.java @@ -16,10 +16,11 @@ import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import java.math.BigInteger; import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToBoolean; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToBoolean; import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; import static org.elasticsearch.xpack.ql.type.DataTypes.INTEGER; @@ -27,7 +28,6 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; public class ToBoolean extends AbstractConvertFunction { @@ -71,7 +71,7 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromString") static boolean fromKeyword(BytesRef keyword) { - return Boolean.parseBoolean(keyword.utf8ToString()); + return stringToBoolean(keyword.utf8ToString()); } @ConvertEvaluator(extraName = "FromDouble") @@ -86,8 +86,7 @@ static boolean fromLong(long l) { @ConvertEvaluator(extraName = "FromUnsignedLong") static boolean fromUnsignedLong(long ul) { - Number n = unsignedLongAsNumber(ul); - return n instanceof BigInteger || n.longValue() != 0; + return unsignedLongToBoolean(ul); } @ConvertEvaluator(extraName = "FromInt") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java index 7a67681018727..4ef666b03dfb3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java @@ -19,10 +19,10 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; public class ToCartesianPoint extends AbstractConvertFunction { @@ -59,6 +59,6 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) static BytesRef fromKeyword(BytesRef in) { - return CARTESIAN.wktToWkb(in.utf8ToString()); + return stringToSpatial(in.utf8ToString()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java index 0e52e0870a354..6bd57a1dd2641 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java @@ -19,11 +19,11 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; public class ToCartesianShape extends AbstractConvertFunction { @@ -64,6 +64,6 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) static BytesRef fromKeyword(BytesRef in) { - return CARTESIAN.wktToWkb(in.utf8ToString()); + return stringToSpatial(in.utf8ToString()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java index d74ba553fc9d0..005f27abc2a56 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java @@ -11,6 +11,7 @@ import org.elasticsearch.compute.ann.ConvertEvaluator; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -19,6 +20,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToDouble; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; @@ -27,7 +30,6 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; public class ToDouble extends AbstractConvertFunction { @@ -78,14 +80,14 @@ static double fromBoolean(boolean bool) { return bool ? 1d : 0d; } - @ConvertEvaluator(extraName = "FromString", warnExceptions = { NumberFormatException.class }) + @ConvertEvaluator(extraName = "FromString", warnExceptions = { InvalidArgumentException.class }) static double fromKeyword(BytesRef in) { - return Double.parseDouble(in.utf8ToString()); + return stringToDouble(in.utf8ToString()); } @ConvertEvaluator(extraName = "FromUnsignedLong") static double fromUnsignedLong(long l) { - return unsignedLongAsNumber(l).doubleValue(); + return unsignedLongToDouble(l); } @ConvertEvaluator(extraName = "FromLong") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java index acfaa7c3964c2..96e366be25e44 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java @@ -19,10 +19,10 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; public class ToGeoPoint extends AbstractConvertFunction { @@ -59,6 +59,6 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) static BytesRef fromKeyword(BytesRef in) { - return GEO.wktToWkb(in.utf8ToString()); + return stringToSpatial(in.utf8ToString()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java index e557735e6dfe1..d8381547b1651 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java @@ -19,11 +19,11 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToSpatial; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; public class ToGeoShape extends AbstractConvertFunction { @@ -61,6 +61,6 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) static BytesRef fromKeyword(BytesRef in) { - return GEO.wktToWkb(in.utf8ToString()); + return stringToSpatial(in.utf8ToString()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java index 856f903e278c5..acb9ef7b46d63 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java @@ -19,7 +19,7 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.EsqlConverter.STRING_TO_IP; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToIP; import static org.elasticsearch.xpack.ql.type.DataTypes.IP; import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; @@ -59,6 +59,6 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) static BytesRef fromKeyword(BytesRef asString) { - return (BytesRef) STRING_TO_IP.convert(asString); + return stringToIP(asString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java index 16b2d45c7ae26..32d83de8da846 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java @@ -20,6 +20,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToInt; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToInt; import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToInt; import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; @@ -29,7 +31,6 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; public class ToInteger extends AbstractConvertFunction { @@ -80,18 +81,9 @@ static int fromBoolean(boolean bool) { return bool ? 1 : 0; } - @ConvertEvaluator(extraName = "FromString", warnExceptions = { InvalidArgumentException.class, NumberFormatException.class }) + @ConvertEvaluator(extraName = "FromString", warnExceptions = { InvalidArgumentException.class }) static int fromKeyword(BytesRef in) { - String asString = in.utf8ToString(); - try { - return Integer.parseInt(asString); - } catch (NumberFormatException nfe) { - try { - return fromDouble(Double.parseDouble(asString)); - } catch (Exception e) { - throw nfe; - } - } + return stringToInt(in.utf8ToString()); } @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class }) @@ -101,12 +93,7 @@ static int fromDouble(double dbl) { @ConvertEvaluator(extraName = "FromUnsignedLong", warnExceptions = { InvalidArgumentException.class }) static int fromUnsignedLong(long ul) { - Number n = unsignedLongAsNumber(ul); - int i = n.intValue(); - if (i != n.longValue()) { - throw new InvalidArgumentException("[{}] out of [integer] range", n); - } - return i; + return unsignedLongToInt(ul); } @ConvertEvaluator(extraName = "FromLong", warnExceptions = { InvalidArgumentException.class }) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java index 3139b2ee740b4..b8dea5d8b42ea 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java @@ -20,8 +20,9 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToLong; import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeDoubleToLong; -import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToLong; import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; @@ -30,7 +31,6 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; public class ToLong extends AbstractConvertFunction { @@ -81,18 +81,9 @@ static long fromBoolean(boolean bool) { return bool ? 1L : 0L; } - @ConvertEvaluator(extraName = "FromString", warnExceptions = { NumberFormatException.class }) + @ConvertEvaluator(extraName = "FromString", warnExceptions = { InvalidArgumentException.class }) static long fromKeyword(BytesRef in) { - String asString = in.utf8ToString(); - try { - return Long.parseLong(asString); - } catch (NumberFormatException nfe) { - try { - return fromDouble(Double.parseDouble(asString)); - } catch (Exception e) { - throw nfe; - } - } + return stringToLong(in.utf8ToString()); } @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class }) @@ -102,7 +93,7 @@ static long fromDouble(double dbl) { @ConvertEvaluator(extraName = "FromUnsignedLong", warnExceptions = { InvalidArgumentException.class }) static long fromUnsignedLong(long ul) { - return safeToLong(unsignedLongAsNumber(ul)); + return unsignedLongToLong(ul); } @ConvertEvaluator(extraName = "FromInt") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java index 33663534bf6cd..a15d610f2b517 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java @@ -9,7 +9,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; -import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; @@ -17,12 +16,16 @@ import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.versionfield.Version; import java.util.List; import java.util.Map; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.ipToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.numericBooleanToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.spatialToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.versionToString; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; @@ -37,9 +40,6 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; public class ToString extends AbstractConvertFunction implements EvaluatorMapper { @@ -107,12 +107,12 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromBoolean") static BytesRef fromBoolean(boolean bool) { - return new BytesRef(String.valueOf(bool)); + return numericBooleanToString(bool); } @ConvertEvaluator(extraName = "FromIP") static BytesRef fromIP(BytesRef ip) { - return new BytesRef(DocValueFormat.IP.format(ip)); + return new BytesRef(ipToString(ip)); } @ConvertEvaluator(extraName = "FromDatetime") @@ -122,46 +122,46 @@ static BytesRef fromDatetime(long datetime) { @ConvertEvaluator(extraName = "FromDouble") static BytesRef fromDouble(double dbl) { - return new BytesRef(String.valueOf(dbl)); + return numericBooleanToString(dbl); } @ConvertEvaluator(extraName = "FromLong") static BytesRef fromDouble(long lng) { - return new BytesRef(String.valueOf(lng)); + return numericBooleanToString(lng); } @ConvertEvaluator(extraName = "FromInt") static BytesRef fromDouble(int integer) { - return new BytesRef(String.valueOf(integer)); + return numericBooleanToString(integer); } @ConvertEvaluator(extraName = "FromVersion") static BytesRef fromVersion(BytesRef version) { - return new BytesRef(new Version(version).toString()); + return new BytesRef(versionToString(version)); } @ConvertEvaluator(extraName = "FromUnsignedLong") static BytesRef fromUnsignedLong(long lng) { - return new BytesRef(unsignedLongAsNumber(lng).toString()); + return unsignedLongToString(lng); } @ConvertEvaluator(extraName = "FromGeoPoint") static BytesRef fromGeoPoint(BytesRef wkb) { - return new BytesRef(GEO.wkbToWkt(wkb)); + return new BytesRef(spatialToString(wkb)); } @ConvertEvaluator(extraName = "FromCartesianPoint") static BytesRef fromCartesianPoint(BytesRef wkb) { - return new BytesRef(CARTESIAN.wkbToWkt(wkb)); + return new BytesRef(spatialToString(wkb)); } @ConvertEvaluator(extraName = "FromCartesianShape") static BytesRef fromCartesianShape(BytesRef wkb) { - return new BytesRef(GEO.wkbToWkt(wkb)); + return new BytesRef(spatialToString(wkb)); } @ConvertEvaluator(extraName = "FromGeoShape") static BytesRef fromGeoShape(BytesRef wkb) { - return new BytesRef(CARTESIAN.wkbToWkt(wkb)); + return new BytesRef(spatialToString(wkb)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java index 31bbcd4bf302f..8127fd2103051 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java @@ -20,7 +20,11 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.booleanToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.doubleToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.intToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToUnsignedLong; import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; @@ -29,9 +33,6 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.ONE_AS_UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.ZERO_AS_UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; public class ToUnsignedLong extends AbstractConvertFunction { @@ -79,27 +80,26 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromBoolean") static long fromBoolean(boolean bool) { - return bool ? ONE_AS_UNSIGNED_LONG : ZERO_AS_UNSIGNED_LONG; + return booleanToUnsignedLong(bool); } @ConvertEvaluator(extraName = "FromString", warnExceptions = { InvalidArgumentException.class, NumberFormatException.class }) static long fromKeyword(BytesRef in) { - String asString = in.utf8ToString(); - return asLongUnsigned(safeToUnsignedLong(asString)); + return stringToUnsignedLong(in.utf8ToString()); } @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class }) static long fromDouble(double dbl) { - return asLongUnsigned(safeToUnsignedLong(dbl)); + return doubleToUnsignedLong(dbl); } @ConvertEvaluator(extraName = "FromLong", warnExceptions = { InvalidArgumentException.class }) static long fromLong(long lng) { - return asLongUnsigned(safeToUnsignedLong(lng)); + return longToUnsignedLong(lng, false); } @ConvertEvaluator(extraName = "FromInt", warnExceptions = { InvalidArgumentException.class }) static long fromInt(int i) { - return fromLong(i); + return intToUnsignedLong(i); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java index ca3012871fced..b0e0b385ee3c4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersion.java @@ -15,11 +15,11 @@ import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.versionfield.Version; import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToVersion; import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; @@ -59,6 +59,6 @@ protected NodeInfo info() { @ConvertEvaluator(extraName = "FromString") static BytesRef fromKeyword(BytesRef asString) { - return new Version(asString.utf8ToString()).toBytesRef(); + return stringToVersion(asString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java index 1c9f42de2f640..60bb904ab4849 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java @@ -11,12 +11,13 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongToDouble; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.intToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; public class Cast { /** @@ -84,15 +85,12 @@ static double castUnsignedLongToDouble(long v) { @Evaluator(extraName = "IntToUnsignedLong") static long castIntToUnsignedLong(int v) { - return castLongToUnsignedLong(v); + return intToUnsignedLong(v); } @Evaluator(extraName = "LongToUnsignedLong") // TODO: catch-to-null in evaluator? static long castLongToUnsignedLong(long v) { - if (v < 0) { - throw new InvalidArgumentException("[" + v + "] out of [unsigned_long] range"); - } - return v; + return longToUnsignedLong(v, false); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java index 1b41d2d407bd5..46df37c685cf7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.function.Function; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isNumeric; @@ -74,7 +75,7 @@ static double processUnsignedLong(long val) { if (val == NumericUtils.ZERO_AS_UNSIGNED_LONG) { throw new ArithmeticException("Log of non-positive number"); } - return Math.log10(NumericUtils.unsignedLongToDouble(val)); + return Math.log10(unsignedLongToDouble(val)); } @Evaluator(extraName = "Int", warnExceptions = ArithmeticException.class) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java index 758b56093d40f..2edb1c7c3a159 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java @@ -29,12 +29,12 @@ import java.util.function.BiFunction; import java.util.function.Function; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.bigIntegerToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isNumeric; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asUnsignedLong; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; public class Round extends EsqlScalarFunction implements OptionalArgument { @@ -99,10 +99,9 @@ static long processUnsignedLong(long val, long decimals) { Number ul = unsignedLongAsNumber(val); if (ul instanceof BigInteger bi) { BigInteger rounded = Maths.round(bi, decimals); - BigInteger unsignedLong = asUnsignedLong(rounded); - return asLongUnsigned(unsignedLong); + return bigIntegerToUnsignedLong(rounded); } else { - return asLongUnsigned(Maths.round(ul.longValue(), decimals)); + return longToUnsignedLong(Maths.round(ul.longValue(), decimals), false); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java index 542f667c61b95..17882f1baa81d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java @@ -18,11 +18,11 @@ import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; -import org.elasticsearch.xpack.ql.util.NumericUtils; import java.util.List; import java.util.function.Function; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isNumeric; @@ -71,7 +71,7 @@ static double process(long val) { @Evaluator(extraName = "UnsignedLong") static double processUnsignedLong(long val) { - return Math.sqrt(NumericUtils.unsignedLongToDouble(val)); + return Math.sqrt(unsignedLongToDouble(val)); } @Evaluator(extraName = "Int", warnExceptions = ArithmeticException.class) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java index 0b4652b305741..5265d5bcad660 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java @@ -23,9 +23,9 @@ import java.util.List; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isRepresentable; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongToDouble; /** * Reduce a multivalued field to a single valued field containing the average value. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java index 7c3cb2c1c2d1d..8f65d15134cfa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java @@ -26,10 +26,10 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.bigIntegerToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToBigInteger; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isRepresentable; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsBigInteger; /** * Reduce a multivalued field to a single valued field containing the average value. @@ -156,9 +156,9 @@ static long finishUnsignedLong(Longs longs) { Arrays.sort(longs.values, 0, longs.count); int middle = longs.count / 2; longs.count = 0; - BigInteger a = unsignedLongAsBigInteger(longs.values[middle - 1]); - BigInteger b = unsignedLongAsBigInteger(longs.values[middle]); - return asLongUnsigned(a.add(b).shiftRight(1).longValue()); + BigInteger a = unsignedLongToBigInteger(longs.values[middle - 1]); + BigInteger b = unsignedLongToBigInteger(longs.values[middle]); + return bigIntegerToUnsignedLong(a.add(b).shiftRight(1)); } /** @@ -169,9 +169,9 @@ static long ascendingUnsignedLong(LongBlock values, int firstValue, int count) { if (count % 2 == 1) { return values.getLong(middle); } - BigInteger a = unsignedLongAsBigInteger(values.getLong(middle - 1)); - BigInteger b = unsignedLongAsBigInteger(values.getLong(middle)); - return asLongUnsigned(a.add(b).shiftRight(1).longValue()); + BigInteger a = unsignedLongToBigInteger(values.getLong(middle - 1)); + BigInteger b = unsignedLongToBigInteger(values.getLong(middle)); + return bigIntegerToUnsignedLong(a.add(b).shiftRight(1)); } static class Ints { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index 29ec231f08555..e054fc7e00e24 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -35,6 +35,7 @@ import java.util.Objects; import java.util.function.Function; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToInt; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; @@ -129,8 +130,8 @@ public EvalOperator.ExpressionEvaluator.Factory toEvaluator( Function toEvaluator ) { if (start.foldable() && end.foldable()) { - int startOffset = Integer.parseInt(String.valueOf(start.fold())); - int endOffset = Integer.parseInt(String.valueOf(end.fold())); + int startOffset = stringToInt(String.valueOf(start.fold())); + int endOffset = stringToInt(String.valueOf(end.fold())); checkStartEnd(startOffset, endOffset); } return switch (PlannerUtils.toElementType(field.dataType())) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java index 42fd526cb3b99..170e3de6e4209 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Div.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.ql.type.DataType; import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.DIV; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; public class Div extends EsqlArithmeticOperation implements BinaryComparisonInversible { @@ -73,7 +73,7 @@ static long processLongs(long lhs, long rhs) { @Evaluator(extraName = "UnsignedLongs", warnExceptions = { ArithmeticException.class }) static long processUnsignedLongs(long lhs, long rhs) { - return asLongUnsigned(Long.divideUnsigned(asLongUnsigned(lhs), asLongUnsigned(rhs))); + return longToUnsignedLong(Long.divideUnsigned(longToUnsignedLong(lhs, true), longToUnsignedLong(rhs, true)), true); } @Evaluator(extraName = "Doubles") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java index 41a396819a7f2..bc1ad8fcb5f94 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Mod.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.ql.tree.Source; import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.MOD; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.longToUnsignedLong; public class Mod extends EsqlArithmeticOperation { @@ -52,7 +52,7 @@ static long processLongs(long lhs, long rhs) { @Evaluator(extraName = "UnsignedLongs", warnExceptions = { ArithmeticException.class }) static long processUnsignedLongs(long lhs, long rhs) { - return asLongUnsigned(Long.remainderUnsigned(asLongUnsigned(lhs), asLongUnsigned(rhs))); + return longToUnsignedLong(Long.remainderUnsigned(longToUnsignedLong(lhs, true), longToUnsignedLong(rhs, true)), true); } @Evaluator(extraName = "Doubles") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index c20763e08f343..d198d740029e1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -209,6 +209,7 @@ import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.Entry.of; import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToLong; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; @@ -996,7 +997,7 @@ static void writeFieldAttribute(PlanStreamOutput out, FieldAttribute fileAttribu out.writeNamed(EsField.class, fileAttribute.field()); out.writeOptionalString(fileAttribute.qualifier()); out.writeEnum(fileAttribute.nullable()); - out.writeLong(Long.parseLong(fileAttribute.id().toString())); + out.writeLong(stringToLong(fileAttribute.id().toString())); out.writeBoolean(fileAttribute.synthetic()); } @@ -1018,7 +1019,7 @@ static void writeReferenceAttr(PlanStreamOutput out, ReferenceAttribute referenc out.writeString(referenceAttribute.dataType().typeName()); out.writeOptionalString(referenceAttribute.qualifier()); out.writeEnum(referenceAttribute.nullable()); - out.writeLong(Long.parseLong(referenceAttribute.id().toString())); + out.writeLong(stringToLong(referenceAttribute.id().toString())); out.writeBoolean(referenceAttribute.synthetic()); } @@ -1041,7 +1042,7 @@ static void writeMetadataAttr(PlanStreamOutput out, MetadataAttribute metadataAt out.writeString(metadataAttribute.dataType().typeName()); out.writeOptionalString(metadataAttribute.qualifier()); out.writeEnum(metadataAttribute.nullable()); - out.writeLong(Long.parseLong(metadataAttribute.id().toString())); + out.writeLong(stringToLong(metadataAttribute.id().toString())); out.writeBoolean(metadataAttribute.synthetic()); out.writeBoolean(metadataAttribute.searchable()); } @@ -1061,7 +1062,7 @@ static void writeUnsupportedAttr(PlanStreamOutput out, UnsupportedAttribute unsu out.writeString(unsupportedAttribute.name()); writeUnsupportedEsField(out, unsupportedAttribute.field()); out.writeOptionalString(unsupportedAttribute.hasCustomMessage() ? unsupportedAttribute.unresolvedMessage() : null); - out.writeLong(Long.parseLong(unsupportedAttribute.id().toString())); + out.writeLong(stringToLong(unsupportedAttribute.id().toString())); } // -- EsFields @@ -1735,7 +1736,7 @@ static void writeAlias(PlanStreamOutput out, Alias alias) throws IOException { out.writeString(alias.name()); out.writeOptionalString(alias.qualifier()); out.writeExpression(alias.child()); - out.writeLong(Long.parseLong(alias.id().toString())); + out.writeLong(stringToLong(alias.id().toString())); out.writeBoolean(alias.synthetic()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 8a13c80c0ea68..223d318a64324 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -73,7 +73,9 @@ import java.util.function.BiFunction; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.bigIntegerToUnsignedLong; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.parseTemporalAmout; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToIntegral; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; import static org.elasticsearch.xpack.ql.parser.ParserUtils.source; @@ -124,11 +126,11 @@ public Literal visitIntegerValue(EsqlBaseParser.IntegerValueContext ctx) { Number number; try { - number = StringUtils.parseIntegral(text); + number = stringToIntegral(text); } catch (InvalidArgumentException siae) { // if it's too large, then quietly try to parse as a float instead try { - return new Literal(source, StringUtils.parseDouble(text), DataTypes.DOUBLE); + return new Literal(source, EsqlDataTypeConverter.stringToDouble(text), DataTypes.DOUBLE); } catch (InvalidArgumentException ignored) {} throw new ParsingException(source, siae.getMessage()); @@ -161,7 +163,9 @@ public Object visitNumericArrayLiteral(EsqlBaseParser.NumericArrayLiteralContext source, mapNumbers( numbers, - (no, dt) -> dt == DataTypes.UNSIGNED_LONG ? no.longValue() : asLongUnsigned(BigInteger.valueOf(no.longValue())) + (no, dt) -> dt == DataTypes.UNSIGNED_LONG + ? no.longValue() + : bigIntegerToUnsignedLong(BigInteger.valueOf(no.longValue())) ), DataTypes.UNSIGNED_LONG ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 5ae0584d28a44..64ce1633e8772 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -64,6 +64,7 @@ import static org.elasticsearch.common.logging.HeaderWarning.addWarning; import static org.elasticsearch.xpack.esql.plan.logical.Enrich.Mode; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToInt; import static org.elasticsearch.xpack.ql.parser.ParserUtils.source; import static org.elasticsearch.xpack.ql.parser.ParserUtils.typedParsing; import static org.elasticsearch.xpack.ql.parser.ParserUtils.visitList; @@ -250,7 +251,7 @@ public PlanFactory visitWhereCommand(EsqlBaseParser.WhereCommandContext ctx) { @Override public PlanFactory visitLimitCommand(EsqlBaseParser.LimitCommandContext ctx) { Source source = source(ctx); - int limit = Integer.parseInt(ctx.INTEGER_LITERAL().getText()); + int limit = stringToInt(ctx.INTEGER_LITERAL().getText()); return input -> new Limit(source, new Literal(source, limit, DataTypes.INTEGER), input); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 37e87a456a4d0..0afa6179fd3c8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; @@ -59,6 +58,8 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.DEFAULT_DATE_TIME_FORMATTER; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.HOUR_MINUTE_SECOND; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.ipToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.versionToString; import static org.elasticsearch.xpack.ql.type.DataTypes.IP; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; @@ -191,14 +192,14 @@ static Query translate(BinaryComparison bc, TranslatorHandler handler) { format = formatter.pattern(); isDateLiteralComparison = true; } else if (attribute.dataType() == IP && value instanceof BytesRef bytesRef) { - value = DocValueFormat.IP.format(bytesRef); + value = ipToString(bytesRef); } else if (attribute.dataType() == VERSION) { // VersionStringFieldMapper#indexedValueForSearch() only accepts as input String or BytesRef with the String (i.e. not // encoded) representation of the version as it'll do the encoding itself. if (value instanceof BytesRef bytesRef) { - value = new Version(bytesRef).toString(); + value = versionToString(bytesRef); } else if (value instanceof Version version) { - value = version.toString(); + value = versionToString(version); } } else if (attribute.dataType() == UNSIGNED_LONG && value instanceof Long ul) { value = unsignedLongAsNumber(ul); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 34a31ac7e656d..aad80b6c673ba 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -99,6 +99,7 @@ import static java.util.stream.Collectors.joining; import static org.elasticsearch.compute.operator.LimitOperator.Factory; import static org.elasticsearch.compute.operator.ProjectOperator.ProjectOperatorFactory; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToInt; /** * The local execution planner takes a plan (represented as PlanNode tree / digraph) as input and creates the corresponding @@ -366,7 +367,7 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte int limit; if (topNExec.limit() instanceof Literal literal) { - limit = Integer.parseInt(literal.value().toString()); + limit = stringToInt(literal.value().toString()); } else { throw new EsqlIllegalArgumentException("limit only supported with literal values"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index 51f96196f29d7..82e7fc2e9fc88 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; @@ -19,8 +20,12 @@ import org.elasticsearch.xpack.ql.type.Converter; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypeConverter; +import org.elasticsearch.xpack.ql.util.NumericUtils; +import org.elasticsearch.xpack.ql.util.StringUtils; +import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; +import java.math.BigInteger; import java.time.Duration; import java.time.Instant; import java.time.Period; @@ -30,12 +35,19 @@ import java.util.Locale; import java.util.function.Function; +import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeDoubleToLong; import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToInt; import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToLong; +import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToUnsignedLong; import static org.elasticsearch.xpack.ql.type.DataTypes.NULL; import static org.elasticsearch.xpack.ql.type.DataTypes.isPrimitive; import static org.elasticsearch.xpack.ql.type.DataTypes.isString; -import static org.elasticsearch.xpack.ql.util.StringUtils.parseIP; +import static org.elasticsearch.xpack.ql.util.NumericUtils.ONE_AS_UNSIGNED_LONG; +import static org.elasticsearch.xpack.ql.util.NumericUtils.ZERO_AS_UNSIGNED_LONG; +import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; +import static org.elasticsearch.xpack.ql.util.NumericUtils.asUnsignedLong; +import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; public class EsqlDataTypeConverter { @@ -161,10 +173,13 @@ public static TemporalAmount parseTemporalAmout(Number value, String qualifier, }; } - private static ChronoField stringToChrono(Object value) { + /** + * The following conversions are used by DateExtract. + */ + private static ChronoField stringToChrono(Object field) { ChronoField chronoField = null; try { - BytesRef br = BytesRefs.toBytesRef(value); + BytesRef br = BytesRefs.toBytesRef(field); chronoField = ChronoField.valueOf(br.utf8ToString().toUpperCase(Locale.ROOT)); } catch (Exception e) { return null; @@ -172,10 +187,6 @@ private static ChronoField stringToChrono(Object value) { return chronoField; } - private static BytesRef stringToIP(BytesRef value) { - return parseIP(value.utf8ToString()); - } - public static long chronoToLong(long dateTime, BytesRef chronoField, ZoneId zone) { ChronoField chrono = ChronoField.valueOf(chronoField.utf8ToString().toUpperCase(Locale.ROOT)); return Instant.ofEpochMilli(dateTime).atZone(zone).getLong(chrono); @@ -185,6 +196,41 @@ public static long chronoToLong(long dateTime, ChronoField chronoField, ZoneId z return Instant.ofEpochMilli(dateTime).atZone(zone).getLong(chronoField); } + /** + * The following conversions are between String and other data types. + */ + public static BytesRef stringToIP(BytesRef field) { + return StringUtils.parseIP(field.utf8ToString()); + } + + public static BytesRef stringToIP(String field) { + return StringUtils.parseIP(field); + } + + public static String ipToString(BytesRef field) { + return DocValueFormat.IP.format(field); + } + + public static BytesRef stringToVersion(BytesRef field) { + return new Version(field.utf8ToString()).toBytesRef(); + } + + public static String versionToString(BytesRef field) { + return new Version(field).toString(); + } + + public static String versionToString(Version field) { + return field.toString(); + } + + public static String spatialToString(BytesRef field) { + return UNSPECIFIED.wkbToWkt(field); + } + + public static BytesRef stringToSpatial(String field) { + return UNSPECIFIED.wktToWkb(field); + } + public static long dateTimeToLong(String dateTime) { return DEFAULT_DATE_TIME_FORMATTER.parseMillis(dateTime); } @@ -201,12 +247,109 @@ public static String dateTimeToString(long dateTime, DateFormatter formatter) { return formatter == null ? dateTimeToString(dateTime) : formatter.formatMillis(dateTime); } + public static BytesRef numericBooleanToString(Object field) { + return new BytesRef(String.valueOf(field)); + } + + public static boolean stringToBoolean(String field) { + return Boolean.parseBoolean(field); + } + + public static int stringToInt(String field) { + try { + return Integer.parseInt(field); + } catch (NumberFormatException nfe) { + try { + return safeToInt(stringToDouble(field)); + } catch (Exception e) { + throw new InvalidArgumentException(nfe, "Cannot parse number [{}]", field); + } + } + } + + public static long stringToLong(String field) { + try { + return StringUtils.parseLong(field); + } catch (InvalidArgumentException iae) { + try { + return safeDoubleToLong(stringToDouble(field)); + } catch (Exception e) { + throw new InvalidArgumentException(iae, "Cannot parse number [{}]", field); + } + } + } + + public static double stringToDouble(String field) { + return StringUtils.parseDouble(field); + } + + public static BytesRef unsignedLongToString(long number) { + return new BytesRef(unsignedLongAsNumber(number).toString()); + } + + public static long stringToUnsignedLong(String field) { + return asLongUnsigned(safeToUnsignedLong(field)); + } + + public static Number stringToIntegral(String field) { + return StringUtils.parseIntegral(field); + } + + /** + * The following conversion are between unsignedLong and other numeric data types. + */ + public static double unsignedLongToDouble(long number) { + return NumericUtils.unsignedLongAsNumber(number).doubleValue(); + } + + public static long doubleToUnsignedLong(double number) { + return NumericUtils.asLongUnsigned(safeToUnsignedLong(number)); + } + + public static int unsignedLongToInt(long number) { + Number n = NumericUtils.unsignedLongAsNumber(number); + int i = n.intValue(); + if (i != n.longValue()) { + throw new InvalidArgumentException("[{}] out of [integer] range", n); + } + return i; + } + + public static long intToUnsignedLong(int number) { + return longToUnsignedLong(number, false); + } + + public static long unsignedLongToLong(long number) { + return DataTypeConverter.safeToLong(unsignedLongAsNumber(number)); + } + + public static long longToUnsignedLong(long number, boolean allowNegative) { + return allowNegative == false ? NumericUtils.asLongUnsigned(safeToUnsignedLong(number)) : NumericUtils.asLongUnsigned(number); + } + + public static long bigIntegerToUnsignedLong(BigInteger field) { + BigInteger unsignedLong = asUnsignedLong(field); + return NumericUtils.asLongUnsigned(unsignedLong); + } + + public static BigInteger unsignedLongToBigInteger(long number) { + return NumericUtils.unsignedLongAsBigInteger(number); + } + + public static boolean unsignedLongToBoolean(long number) { + Number n = NumericUtils.unsignedLongAsNumber(number); + return n instanceof BigInteger || n.longValue() != 0; + } + + public static long booleanToUnsignedLong(boolean number) { + return number ? ONE_AS_UNSIGNED_LONG : ZERO_AS_UNSIGNED_LONG; + } + public enum EsqlConverter implements Converter { STRING_TO_DATE_PERIOD(x -> EsqlDataTypeConverter.parseTemporalAmount(x, EsqlDataTypes.DATE_PERIOD)), STRING_TO_TIME_DURATION(x -> EsqlDataTypeConverter.parseTemporalAmount(x, EsqlDataTypes.TIME_DURATION)), - STRING_TO_CHRONO_FIELD(EsqlDataTypeConverter::stringToChrono), - STRING_TO_IP(x -> EsqlDataTypeConverter.stringToIP((BytesRef) x)); + STRING_TO_CHRONO_FIELD(EsqlDataTypeConverter::stringToChrono); private static final String NAME = "esql-converter"; private final Function converter; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java index 0309bcce85581..22a00bb3684a6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java @@ -13,6 +13,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -55,7 +57,10 @@ public static Iterable parameters() { ); // random strings that don't look like a double TestCaseSupplier.forUnaryStrings(suppliers, evaluatorName.apply("String"), DataTypes.DOUBLE, bytesRef -> null, bytesRef -> { - var exception = expectThrows(NumberFormatException.class, () -> Double.parseDouble(bytesRef.utf8ToString())); + var exception = expectThrows( + InvalidArgumentException.class, + () -> EsqlDataTypeConverter.stringToDouble(bytesRef.utf8ToString()) + ); return List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: " + exception diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java index 4402c6d8529b4..3a6cb86b7a3c6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java @@ -71,7 +71,7 @@ public static Iterable parameters() { bytesRef -> null, bytesRef -> List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: java.lang.NumberFormatException: For input string: \"" + bytesRef.utf8ToString() + "\"" + "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + bytesRef.utf8ToString() + "]" ) ); // from doubles within Integer's range @@ -228,7 +228,9 @@ public static Iterable parameters() { bytesRef -> null, bytesRef -> List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: java.lang.NumberFormatException: For input string: \"" + ((BytesRef) bytesRef).utf8ToString() + "\"" + "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + + ((BytesRef) bytesRef).utf8ToString() + + "]" ) ); // strings of random doubles outside Integer's range, positive @@ -249,7 +251,9 @@ public static Iterable parameters() { bytesRef -> null, bytesRef -> List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: java.lang.NumberFormatException: For input string: \"" + ((BytesRef) bytesRef).utf8ToString() + "\"" + "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + + ((BytesRef) bytesRef).utf8ToString() + + "]" ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index 030c219b75e2f..031ce6193bcc4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -50,7 +50,7 @@ public static Iterable parameters() { bytesRef -> null, bytesRef -> List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: java.lang.NumberFormatException: For input string: \"" + bytesRef.utf8ToString() + "\"" + "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + bytesRef.utf8ToString() + "]" ) ); // from doubles within long's range @@ -179,7 +179,9 @@ public static Iterable parameters() { bytesRef -> null, bytesRef -> List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: java.lang.NumberFormatException: For input string: \"" + ((BytesRef) bytesRef).utf8ToString() + "\"" + "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + + ((BytesRef) bytesRef).utf8ToString() + + "]" ) ); // strings of random doubles outside integer's range, positive @@ -200,7 +202,9 @@ public static Iterable parameters() { bytesRef -> null, bytesRef -> List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: java.lang.NumberFormatException: For input string: \"" + ((BytesRef) bytesRef).utf8ToString() + "\"" + "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + + ((BytesRef) bytesRef).utf8ToString() + + "]" ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java index 8cbeca67d0abd..3c1bf69a78716 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java @@ -15,13 +15,15 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; -import org.elasticsearch.xpack.ql.util.NumericUtils; import java.math.BigInteger; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.bigIntegerToUnsignedLong; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; + public class Log10Tests extends AbstractFunctionTestCase { public Log10Tests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); @@ -54,7 +56,7 @@ public static Iterable parameters() { suppliers, "Log10UnsignedLongEvaluator[val=" + read + "]", DataTypes.DOUBLE, - ul -> Math.log10(ul == null ? null : NumericUtils.unsignedLongToDouble(NumericUtils.asLongUnsigned(ul))), + ul -> Math.log10(ul == null ? null : unsignedLongToDouble(bigIntegerToUnsignedLong(ul))), BigInteger.ONE, UNSIGNED_LONG_MAX, List.of() diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java index 55a479a3d2b2c..29e75bb3f0225 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; + public class SqrtTests extends AbstractFunctionTestCase { public SqrtTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); @@ -54,7 +56,7 @@ public static Iterable parameters() { suppliers, "SqrtUnsignedLongEvaluator[val=" + read + "]", DataTypes.DOUBLE, - ul -> Math.sqrt(ul == null ? null : NumericUtils.unsignedLongToDouble(NumericUtils.asLongUnsigned(ul))), + ul -> Math.sqrt(ul == null ? null : unsignedLongToDouble(NumericUtils.asLongUnsigned(ul))), BigInteger.ZERO, UNSIGNED_LONG_MAX, List.of() diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java index b1070cb7eb12b..c6c8826c6805a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java @@ -25,6 +25,7 @@ import java.util.function.Supplier; import java.util.stream.DoubleStream; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; import static org.hamcrest.Matchers.equalTo; public class MvAvgTests extends AbstractMultivalueFunctionTestCase { @@ -53,7 +54,7 @@ public static Iterable parameters() { * So we have to go back to encoded `long` and then convert to double * using the production conversion. That'll round in the same way. */ - (size, data) -> avg.apply(size, data.mapToDouble(v -> NumericUtils.unsignedLongToDouble(NumericUtils.asLongUnsigned(v)))) + (size, data) -> avg.apply(size, data.mapToDouble(v -> unsignedLongToDouble(NumericUtils.asLongUnsigned(v)))) ); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); }