diff --git a/docs/changelog/98038.yaml b/docs/changelog/98038.yaml
new file mode 100644
index 0000000000000..d99db24664f30
--- /dev/null
+++ b/docs/changelog/98038.yaml
@@ -0,0 +1,6 @@
+pr: 98038
+summary: Update enrich execution to only set index false on fields that support it
+area: Ingest Node
+type: bug
+issues:
+ - 98019
diff --git a/docs/changelog/98847.yaml b/docs/changelog/98847.yaml
new file mode 100644
index 0000000000000..ab7455bd783c3
--- /dev/null
+++ b/docs/changelog/98847.yaml
@@ -0,0 +1,5 @@
+pr: 98847
+summary: "ESQL: Add `CEIL` function"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/98864.yaml b/docs/changelog/98864.yaml
new file mode 100644
index 0000000000000..52f5b1b0ad70a
--- /dev/null
+++ b/docs/changelog/98864.yaml
@@ -0,0 +1,5 @@
+pr: 98864
+summary: "[Profiling] Abort index creation on outdated index"
+area: Application
+type: bug
+issues: []
diff --git a/docs/changelog/98878.yaml b/docs/changelog/98878.yaml
new file mode 100644
index 0000000000000..4fa8b23851bf9
--- /dev/null
+++ b/docs/changelog/98878.yaml
@@ -0,0 +1,5 @@
+pr: 98878
+summary: Fix percolator query for stored queries that expand on wildcard field names
+area: Percolator
+type: bug
+issues: []
diff --git a/docs/changelog/98942.yaml b/docs/changelog/98942.yaml
new file mode 100644
index 0000000000000..4d8eeee5192e5
--- /dev/null
+++ b/docs/changelog/98942.yaml
@@ -0,0 +1,5 @@
+pr: 98942
+summary: "ESQL: LEFT function"
+area: ES|QL
+type: feature
+issues: []
diff --git a/docs/reference/esql/esql-functions.asciidoc b/docs/reference/esql/esql-functions.asciidoc
index cd9cfd7646ae7..49b0be636d0a0 100644
--- a/docs/reference/esql/esql-functions.asciidoc
+++ b/docs/reference/esql/esql-functions.asciidoc
@@ -15,6 +15,7 @@ these functions:
* <>
* <>
* <>
+* <>
* <>
* <>
* <>
@@ -52,6 +53,7 @@ these functions:
* <>
* <>
* <>
+* <>
* <>
* <>
* <>
@@ -75,6 +77,7 @@ include::functions/atan.asciidoc[]
include::functions/atan2.asciidoc[]
include::functions/auto_bucket.asciidoc[]
include::functions/case.asciidoc[]
+include::functions/ceil.asciidoc[]
include::functions/cidr_match.asciidoc[]
include::functions/coalesce.asciidoc[]
include::functions/concat.asciidoc[]
@@ -113,6 +116,7 @@ include::functions/split.asciidoc[]
include::functions/sqrt.asciidoc[]
include::functions/starts_with.asciidoc[]
include::functions/substring.asciidoc[]
+include::functions/left.asciidoc[]
include::functions/tan.asciidoc[]
include::functions/tanh.asciidoc[]
include::functions/tau.asciidoc[]
diff --git a/docs/reference/esql/functions/acos.asciidoc b/docs/reference/esql/functions/acos.asciidoc
index 383e4224a0e1b..1fc64c05637c5 100644
--- a/docs/reference/esql/functions/acos.asciidoc
+++ b/docs/reference/esql/functions/acos.asciidoc
@@ -1,5 +1,8 @@
[[esql-acos]]
=== `ACOS`
+[.text-center]
+image::esql/functions/signature/acos.svg[Embedded,opts=inline]
+
Inverse https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[cosine] trigonometric function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=acos]
|===
include::{esql-specs}/floats.csv-spec[tag=acos-result]
|===
+
+Supported types:
+
+include::types/acos.asciidoc[]
diff --git a/docs/reference/esql/functions/asin.asciidoc b/docs/reference/esql/functions/asin.asciidoc
index a7ddfde444edd..ed39906a6ea0c 100644
--- a/docs/reference/esql/functions/asin.asciidoc
+++ b/docs/reference/esql/functions/asin.asciidoc
@@ -1,5 +1,8 @@
[[esql-asin]]
=== `ASIN`
+[.text-center]
+image::esql/functions/signature/asin.svg[Embedded,opts=inline]
+
Inverse https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[sine] trigonometric function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=asin]
|===
include::{esql-specs}/floats.csv-spec[tag=asin-result]
|===
+
+Supported types:
+
+include::types/asin.asciidoc[]
diff --git a/docs/reference/esql/functions/atan.asciidoc b/docs/reference/esql/functions/atan.asciidoc
index cda085ec8eb68..db5069bdd8010 100644
--- a/docs/reference/esql/functions/atan.asciidoc
+++ b/docs/reference/esql/functions/atan.asciidoc
@@ -1,5 +1,8 @@
[[esql-atan]]
=== `ATAN`
+[.text-center]
+image::esql/functions/signature/atan.svg[Embedded,opts=inline]
+
Inverse https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[tangent] trigonometric function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=atan]
|===
include::{esql-specs}/floats.csv-spec[tag=atan-result]
|===
+
+Supported types:
+
+include::types/atan.asciidoc[]
diff --git a/docs/reference/esql/functions/atan2.asciidoc b/docs/reference/esql/functions/atan2.asciidoc
index 47dee88ddc740..efaf82bc483f0 100644
--- a/docs/reference/esql/functions/atan2.asciidoc
+++ b/docs/reference/esql/functions/atan2.asciidoc
@@ -1,5 +1,7 @@
[[esql-atan2]]
=== `ATAN2`
+[.text-center]
+image::esql/functions/signature/atan2.svg[Embedded,opts=inline]
The https://en.wikipedia.org/wiki/Atan2[angle] between the positive x-axis and the
ray from the origin to the point (x , y) in the Cartesian plane.
@@ -12,3 +14,7 @@ include::{esql-specs}/floats.csv-spec[tag=atan2]
|===
include::{esql-specs}/floats.csv-spec[tag=atan2-result]
|===
+
+Supported types:
+
+include::types/atan2.asciidoc[]
diff --git a/docs/reference/esql/functions/ceil.asciidoc b/docs/reference/esql/functions/ceil.asciidoc
new file mode 100644
index 0000000000000..b35ab6d68b4e3
--- /dev/null
+++ b/docs/reference/esql/functions/ceil.asciidoc
@@ -0,0 +1,23 @@
+[[esql-ceil]]
+=== `CEIL`
+[.text-center]
+image::esql/functions/signature/floor.svg[Embedded,opts=inline]
+
+Round a number up to the nearest integer.
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/math.csv-spec[tag=ceil]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/math.csv-spec[tag=ceil-result]
+|===
+
+NOTE: This is a noop for `long` (including unsigned) and `integer`.
+ For `double` this picks the the closest `double` value to the integer ala
+ {javadoc}/java.base/java/lang/Math.html#ceil(double)[Math.ceil].
+
+Supported types:
+
+include::types/ceil.asciidoc[]
diff --git a/docs/reference/esql/functions/cos.asciidoc b/docs/reference/esql/functions/cos.asciidoc
index 39d2564dd7d73..bef12ba54f890 100644
--- a/docs/reference/esql/functions/cos.asciidoc
+++ b/docs/reference/esql/functions/cos.asciidoc
@@ -1,5 +1,8 @@
[[esql-cos]]
=== `COS`
+[.text-center]
+image::esql/functions/signature/cos.svg[Embedded,opts=inline]
+
https://en.wikipedia.org/wiki/Sine_and_cosine[Cosine] trigonometric function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=cos]
|===
include::{esql-specs}/floats.csv-spec[tag=cos-result]
|===
+
+Supported types:
+
+include::types/cos.asciidoc[]
diff --git a/docs/reference/esql/functions/e.asciidoc b/docs/reference/esql/functions/e.asciidoc
index a9d2f3fa340e0..21e947bf4749d 100644
--- a/docs/reference/esql/functions/e.asciidoc
+++ b/docs/reference/esql/functions/e.asciidoc
@@ -1,5 +1,8 @@
[[esql-e]]
=== `E`
+[.text-center]
+image::esql/functions/signature/e.svg[Embedded,opts=inline]
+
{wikipedia}/E_(mathematical_constant)[Euler's number].
[source.merge.styled,esql]
diff --git a/docs/reference/esql/functions/floor.asciidoc b/docs/reference/esql/functions/floor.asciidoc
index 595e60e98a6d2..d997e35f71dad 100644
--- a/docs/reference/esql/functions/floor.asciidoc
+++ b/docs/reference/esql/functions/floor.asciidoc
@@ -1,5 +1,8 @@
[[esql-floor]]
=== `FLOOR`
+[.text-center]
+image::esql/functions/signature/floor.svg[Embedded,opts=inline]
+
Round a number down to the nearest integer.
[source.merge.styled,esql]
@@ -11,6 +14,10 @@ include::{esql-specs}/math.csv-spec[tag=floor]
include::{esql-specs}/math.csv-spec[tag=floor-result]
|===
-NOTE: This is a noop for `long` and `integer`. For `double` this picks the
- the closest `double` value to the integer ala
+NOTE: This is a noop for `long` (including unsigned) and `integer`.
+ For `double` this picks the the closest `double` value to the integer ala
{javadoc}/java.base/java/lang/Math.html#floor(double)[Math.floor].
+
+Supported types:
+
+include::types/floor.asciidoc[]
diff --git a/docs/reference/esql/functions/greatest.asciidoc b/docs/reference/esql/functions/greatest.asciidoc
index e4eaedd31289d..9c192662dcaaa 100644
--- a/docs/reference/esql/functions/greatest.asciidoc
+++ b/docs/reference/esql/functions/greatest.asciidoc
@@ -18,3 +18,7 @@ include::{esql-specs}/math.csv-spec[tag=greatest-result]
NOTE: When run on `keyword` or `text` fields, this'll return the last string
in alphabetical order. When run on `boolean` columns this will return
`true` if any values are `true`.
+
+Supported types:
+
+include::types/greatest.asciidoc[]
diff --git a/docs/reference/esql/functions/least.asciidoc b/docs/reference/esql/functions/least.asciidoc
index 366ddb6deb1e0..8c702246fe5e3 100644
--- a/docs/reference/esql/functions/least.asciidoc
+++ b/docs/reference/esql/functions/least.asciidoc
@@ -1,5 +1,7 @@
[[esql-least]]
=== `LEAST`
+[.text-center]
+image::esql/functions/signature/least.svg[Embedded,opts=inline]
Returns the minimum value from many columns. This is similar to <>
except it's intended to run on multiple columns at once.
@@ -16,3 +18,7 @@ include::{esql-specs}/math.csv-spec[tag=least-result]
NOTE: When run on `keyword` or `text` fields, this'll return the first string
in alphabetical order. When run on `boolean` columns this will return
`false` if any values are `false`.
+
+Supported types:
+
+include::types/least.asciidoc[]
diff --git a/docs/reference/esql/functions/left.asciidoc b/docs/reference/esql/functions/left.asciidoc
new file mode 100644
index 0000000000000..42537d9560ebf
--- /dev/null
+++ b/docs/reference/esql/functions/left.asciidoc
@@ -0,0 +1,14 @@
+[[esql-left]]
+=== `LEFT`
+
+Return the substring that extract 'length' chars
+from string starting from 0.
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/string.csv-spec[tag=left]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/string.csv-spec[tag=left-result]
+|===
diff --git a/docs/reference/esql/functions/log10.asciidoc b/docs/reference/esql/functions/log10.asciidoc
index ee19d5a61d1fa..08a6fb91041c7 100644
--- a/docs/reference/esql/functions/log10.asciidoc
+++ b/docs/reference/esql/functions/log10.asciidoc
@@ -1,5 +1,8 @@
[[esql-log10]]
=== `LOG10`
+[.text-center]
+image::esql/functions/signature/log10.svg[Embedded,opts=inline]
+
Returns the log base 10. The input can be any numeric value, the return value
is always a double.
@@ -13,3 +16,7 @@ include::{esql-specs}/math.csv-spec[tag=log10]
|===
include::{esql-specs}/math.csv-spec[tag=log10-result]
|===
+
+Supported types:
+
+include::types/log10.asciidoc[]
diff --git a/docs/reference/esql/functions/pi.asciidoc b/docs/reference/esql/functions/pi.asciidoc
index 631018fed0055..75e9767d98a33 100644
--- a/docs/reference/esql/functions/pi.asciidoc
+++ b/docs/reference/esql/functions/pi.asciidoc
@@ -1,5 +1,8 @@
[[esql-pi]]
=== `PI`
+[.text-center]
+image::esql/functions/signature/pi.svg[Embedded,opts=inline]
+
The {wikipedia}/Pi[ratio] of a circle's circumference to its diameter.
[source.merge.styled,esql]
diff --git a/docs/reference/esql/functions/signature/acos.svg b/docs/reference/esql/functions/signature/acos.svg
index 03131b22b78f5..6a2e2c04cd20e 100644
--- a/docs/reference/esql/functions/signature/acos.svg
+++ b/docs/reference/esql/functions/signature/acos.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/asin.svg b/docs/reference/esql/functions/signature/asin.svg
index 762533569d089..9792e7316b138 100644
--- a/docs/reference/esql/functions/signature/asin.svg
+++ b/docs/reference/esql/functions/signature/asin.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/atan.svg b/docs/reference/esql/functions/signature/atan.svg
index d3bc94c779e79..184956ff2e126 100644
--- a/docs/reference/esql/functions/signature/atan.svg
+++ b/docs/reference/esql/functions/signature/atan.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/atan2.svg b/docs/reference/esql/functions/signature/atan2.svg
index 1ded29d4311cc..f2295d3d98f16 100644
--- a/docs/reference/esql/functions/signature/atan2.svg
+++ b/docs/reference/esql/functions/signature/atan2.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/ceil.svg b/docs/reference/esql/functions/signature/ceil.svg
new file mode 100644
index 0000000000000..baff44ba0cb70
--- /dev/null
+++ b/docs/reference/esql/functions/signature/ceil.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/cos.svg b/docs/reference/esql/functions/signature/cos.svg
index 9bcc26aea71d8..f06a24726f71a 100644
--- a/docs/reference/esql/functions/signature/cos.svg
+++ b/docs/reference/esql/functions/signature/cos.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/cosh.svg b/docs/reference/esql/functions/signature/cosh.svg
index c6a0483d2241c..54ea9bff84097 100644
--- a/docs/reference/esql/functions/signature/cosh.svg
+++ b/docs/reference/esql/functions/signature/cosh.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/floor.svg b/docs/reference/esql/functions/signature/floor.svg
index 6b224de1f9e77..7e153548bfd82 100644
--- a/docs/reference/esql/functions/signature/floor.svg
+++ b/docs/reference/esql/functions/signature/floor.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/least.svg b/docs/reference/esql/functions/signature/least.svg
index 52507c4c62d4f..ec0ed0efcec62 100644
--- a/docs/reference/esql/functions/signature/least.svg
+++ b/docs/reference/esql/functions/signature/least.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/log10.svg b/docs/reference/esql/functions/signature/log10.svg
index 62993d668f0bd..50edcf6ea943f 100644
--- a/docs/reference/esql/functions/signature/log10.svg
+++ b/docs/reference/esql/functions/signature/log10.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/sin.svg b/docs/reference/esql/functions/signature/sin.svg
index f22b1bc19a259..eb20f3386d441 100644
--- a/docs/reference/esql/functions/signature/sin.svg
+++ b/docs/reference/esql/functions/signature/sin.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/sinh.svg b/docs/reference/esql/functions/signature/sinh.svg
index fb21b2d436bf2..30361aca1fb35 100644
--- a/docs/reference/esql/functions/signature/sinh.svg
+++ b/docs/reference/esql/functions/signature/sinh.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/sqrt.svg b/docs/reference/esql/functions/signature/sqrt.svg
index 55d39a59c8f64..77c657120735f 100644
--- a/docs/reference/esql/functions/signature/sqrt.svg
+++ b/docs/reference/esql/functions/signature/sqrt.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/tan.svg b/docs/reference/esql/functions/signature/tan.svg
index c2ea4c2081a16..8ac6ee37cb52a 100644
--- a/docs/reference/esql/functions/signature/tan.svg
+++ b/docs/reference/esql/functions/signature/tan.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/signature/tanh.svg b/docs/reference/esql/functions/signature/tanh.svg
index 046e09369b1dc..dfe167afc5470 100644
--- a/docs/reference/esql/functions/signature/tanh.svg
+++ b/docs/reference/esql/functions/signature/tanh.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/sin.asciidoc b/docs/reference/esql/functions/sin.asciidoc
index 7c02ded0a2f72..a622a6e496ce9 100644
--- a/docs/reference/esql/functions/sin.asciidoc
+++ b/docs/reference/esql/functions/sin.asciidoc
@@ -1,5 +1,8 @@
[[esql-sin]]
=== `SIN`
+[.text-center]
+image::esql/functions/signature/sin.svg[Embedded,opts=inline]
+
https://en.wikipedia.org/wiki/Sine_and_cosine[Sine] trigonometric function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=sin]
|===
include::{esql-specs}/floats.csv-spec[tag=sin-result]
|===
+
+Supported types:
+
+include::types/sin.asciidoc[]
diff --git a/docs/reference/esql/functions/sinh.asciidoc b/docs/reference/esql/functions/sinh.asciidoc
index 241b4f978349d..054170d3fed27 100644
--- a/docs/reference/esql/functions/sinh.asciidoc
+++ b/docs/reference/esql/functions/sinh.asciidoc
@@ -1,5 +1,8 @@
[[esql-sinh]]
=== `SINH`
+[.text-center]
+image::esql/functions/signature/sinh.svg[Embedded,opts=inline]
+
https://en.wikipedia.org/wiki/Hyperbolic_functions[Sine] hyperbolic function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=sinh]
|===
include::{esql-specs}/floats.csv-spec[tag=sinh-result]
|===
+
+Supported types:
+
+include::types/sinh.asciidoc[]
diff --git a/docs/reference/esql/functions/sqrt.asciidoc b/docs/reference/esql/functions/sqrt.asciidoc
index 189deefa2cf90..a64dd0d422d15 100644
--- a/docs/reference/esql/functions/sqrt.asciidoc
+++ b/docs/reference/esql/functions/sqrt.asciidoc
@@ -1,5 +1,8 @@
[[esql-sqrt]]
=== `SQRT`
+[.text-center]
+image::esql/functions/signature/sqrt.svg[Embedded,opts=inline]
+
Returns the square root of a number. The input can be any numeric value, the return value
is always a double.
@@ -13,3 +16,7 @@ include::{esql-specs}/math.csv-spec[tag=sqrt]
|===
include::{esql-specs}/math.csv-spec[tag=sqrt-result]
|===
+
+Supported types:
+
+include::types/sqrt.asciidoc[]
diff --git a/docs/reference/esql/functions/tan.asciidoc b/docs/reference/esql/functions/tan.asciidoc
index fc64317135a44..8d5a58e7555b1 100644
--- a/docs/reference/esql/functions/tan.asciidoc
+++ b/docs/reference/esql/functions/tan.asciidoc
@@ -1,5 +1,8 @@
[[esql-tan]]
=== `TAN`
+[.text-center]
+image::esql/functions/signature/tan.svg[Embedded,opts=inline]
+
https://en.wikipedia.org/wiki/Sine_and_cosine[Tangent] trigonometric function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=tan]
|===
include::{esql-specs}/floats.csv-spec[tag=tan-result]
|===
+
+Supported types:
+
+include::types/tan.asciidoc[]
diff --git a/docs/reference/esql/functions/tanh.asciidoc b/docs/reference/esql/functions/tanh.asciidoc
index f9fcec10394d6..e5d67d9f19063 100644
--- a/docs/reference/esql/functions/tanh.asciidoc
+++ b/docs/reference/esql/functions/tanh.asciidoc
@@ -1,5 +1,8 @@
[[esql-tanh]]
=== `TANH`
+[.text-center]
+image::esql/functions/signature/tanh.svg[Embedded,opts=inline]
+
https://en.wikipedia.org/wiki/Hyperbolic_functions[Tangent] hyperbolic function.
[source.merge.styled,esql]
@@ -10,3 +13,7 @@ include::{esql-specs}/floats.csv-spec[tag=tanh]
|===
include::{esql-specs}/floats.csv-spec[tag=tanh-result]
|===
+
+Supported types:
+
+include::types/tanh.asciidoc[]
diff --git a/docs/reference/esql/functions/tau.asciidoc b/docs/reference/esql/functions/tau.asciidoc
index f2891baf73db6..c35d07fe74642 100644
--- a/docs/reference/esql/functions/tau.asciidoc
+++ b/docs/reference/esql/functions/tau.asciidoc
@@ -1,5 +1,8 @@
[[esql-tau]]
=== `TAU`
+[.text-center]
+image::esql/functions/signature/tau.svg[Embedded,opts=inline]
+
The https://tauday.com/tau-manifesto[ratio] of a circle's circumference to its radius.
[source.merge.styled,esql]
diff --git a/docs/reference/esql/functions/types/acos.asciidoc b/docs/reference/esql/functions/types/acos.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/acos.asciidoc
+++ b/docs/reference/esql/functions/types/acos.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/asin.asciidoc b/docs/reference/esql/functions/types/asin.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/asin.asciidoc
+++ b/docs/reference/esql/functions/types/asin.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/atan.asciidoc b/docs/reference/esql/functions/types/atan.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/atan.asciidoc
+++ b/docs/reference/esql/functions/types/atan.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/atan2.asciidoc b/docs/reference/esql/functions/types/atan2.asciidoc
index 3b01caac0e4ee..74fffe9056a16 100644
--- a/docs/reference/esql/functions/types/atan2.asciidoc
+++ b/docs/reference/esql/functions/types/atan2.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | arg2 | result
+y | x | result
double | double | double
double | integer | double
double | long | double
diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc
index fed63b005dbc7..269265c5638e1 100644
--- a/docs/reference/esql/functions/types/case.asciidoc
+++ b/docs/reference/esql/functions/types/case.asciidoc
@@ -1,5 +1,5 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
arg1 | arg2... | result
-boolean | keyword | keyword | keyword
+
|===
diff --git a/docs/reference/esql/functions/types/ceil.asciidoc b/docs/reference/esql/functions/types/ceil.asciidoc
new file mode 100644
index 0000000000000..f1831429aa95c
--- /dev/null
+++ b/docs/reference/esql/functions/types/ceil.asciidoc
@@ -0,0 +1,8 @@
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+arg1 | result
+double | double
+integer | integer
+long | long
+unsigned_long | unsigned_long
+|===
diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc
index e1ef2d3d60560..cfb032571c5d3 100644
--- a/docs/reference/esql/functions/types/coalesce.asciidoc
+++ b/docs/reference/esql/functions/types/coalesce.asciidoc
@@ -1,49 +1,9 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
arg1 | arg2... | result
-boolean | boolean
boolean | boolean | boolean
-boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-integer | integer
integer | integer | integer
-integer | integer | integer | integer
-integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer | integer | integer
-keyword | keyword
keyword | keyword | keyword
-keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-long | long
long | long | long
-long | long | long | long
-long | long | long | long | long
-long | long | long | long | long | long
-long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long | long | long
-text | text
text | text | text
-text | text | text | text
-text | text | text | text | text
-text | text | text | text | text | text
-text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text | text | text
|===
diff --git a/docs/reference/esql/functions/types/cos.asciidoc b/docs/reference/esql/functions/types/cos.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/cos.asciidoc
+++ b/docs/reference/esql/functions/types/cos.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/cosh.asciidoc b/docs/reference/esql/functions/types/cosh.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/cosh.asciidoc
+++ b/docs/reference/esql/functions/types/cosh.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/e.asciidoc b/docs/reference/esql/functions/types/e.asciidoc
index ddb78d7b651fb..5854465d5fb49 100644
--- a/docs/reference/esql/functions/types/e.asciidoc
+++ b/docs/reference/esql/functions/types/e.asciidoc
@@ -1,5 +1,5 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
result
-integer | double
+
|===
diff --git a/docs/reference/esql/functions/types/floor.asciidoc b/docs/reference/esql/functions/types/floor.asciidoc
index 09cb78511d275..54341360fed3f 100644
--- a/docs/reference/esql/functions/types/floor.asciidoc
+++ b/docs/reference/esql/functions/types/floor.asciidoc
@@ -1,5 +1,8 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
+integer | integer
+long | long
+unsigned_long | unsigned_long
|===
diff --git a/docs/reference/esql/functions/types/greatest.asciidoc b/docs/reference/esql/functions/types/greatest.asciidoc
index d8dc24ced9169..0ce6e8148d1a0 100644
--- a/docs/reference/esql/functions/types/greatest.asciidoc
+++ b/docs/reference/esql/functions/types/greatest.asciidoc
@@ -1,49 +1,9 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
first | rest... | result
-boolean | boolean
boolean | boolean | boolean
-boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-integer | integer
integer | integer | integer
-integer | integer | integer | integer
-integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer | integer | integer
-keyword | keyword
keyword | keyword | keyword
-keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-long | long
long | long | long
-long | long | long | long
-long | long | long | long | long
-long | long | long | long | long | long
-long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long | long | long
-text | text
text | text | text
-text | text | text | text
-text | text | text | text | text
-text | text | text | text | text | text
-text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text | text | text
|===
diff --git a/docs/reference/esql/functions/types/least.asciidoc b/docs/reference/esql/functions/types/least.asciidoc
index e1ef2d3d60560..0ce6e8148d1a0 100644
--- a/docs/reference/esql/functions/types/least.asciidoc
+++ b/docs/reference/esql/functions/types/least.asciidoc
@@ -1,49 +1,9 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | arg2... | result
-boolean | boolean
+first | rest... | result
boolean | boolean | boolean
-boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean | boolean
-integer | integer
integer | integer | integer
-integer | integer | integer | integer
-integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer | integer
-integer | integer | integer | integer | integer | integer | integer | integer | integer | integer
-keyword | keyword
keyword | keyword | keyword
-keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword | keyword
-long | long
long | long | long
-long | long | long | long
-long | long | long | long | long
-long | long | long | long | long | long
-long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long | long
-long | long | long | long | long | long | long | long | long | long
-text | text
text | text | text
-text | text | text | text
-text | text | text | text | text
-text | text | text | text | text | text
-text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text | text
-text | text | text | text | text | text | text | text | text | text
|===
diff --git a/docs/reference/esql/functions/types/log10.asciidoc b/docs/reference/esql/functions/types/log10.asciidoc
index 09cb78511d275..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/log10.asciidoc
+++ b/docs/reference/esql/functions/types/log10.asciidoc
@@ -1,5 +1,8 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
+integer | double
+long | double
+unsigned_long | double
|===
diff --git a/docs/reference/esql/functions/types/pi.asciidoc b/docs/reference/esql/functions/types/pi.asciidoc
index ddb78d7b651fb..5854465d5fb49 100644
--- a/docs/reference/esql/functions/types/pi.asciidoc
+++ b/docs/reference/esql/functions/types/pi.asciidoc
@@ -1,5 +1,5 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
result
-integer | double
+
|===
diff --git a/docs/reference/esql/functions/types/sin.asciidoc b/docs/reference/esql/functions/types/sin.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/sin.asciidoc
+++ b/docs/reference/esql/functions/types/sin.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/sinh.asciidoc b/docs/reference/esql/functions/types/sinh.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/sinh.asciidoc
+++ b/docs/reference/esql/functions/types/sinh.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/sqrt.asciidoc b/docs/reference/esql/functions/types/sqrt.asciidoc
index 09cb78511d275..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/sqrt.asciidoc
+++ b/docs/reference/esql/functions/types/sqrt.asciidoc
@@ -1,5 +1,8 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
+integer | double
+long | double
+unsigned_long | double
|===
diff --git a/docs/reference/esql/functions/types/tan.asciidoc b/docs/reference/esql/functions/types/tan.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/tan.asciidoc
+++ b/docs/reference/esql/functions/types/tan.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/tanh.asciidoc b/docs/reference/esql/functions/types/tanh.asciidoc
index dd4f6b0725cc8..1df8dd6526f18 100644
--- a/docs/reference/esql/functions/types/tanh.asciidoc
+++ b/docs/reference/esql/functions/types/tanh.asciidoc
@@ -1,6 +1,6 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
-arg1 | result
+n | result
double | double
integer | double
long | double
diff --git a/docs/reference/esql/functions/types/tau.asciidoc b/docs/reference/esql/functions/types/tau.asciidoc
index ddb78d7b651fb..5854465d5fb49 100644
--- a/docs/reference/esql/functions/types/tau.asciidoc
+++ b/docs/reference/esql/functions/types/tau.asciidoc
@@ -1,5 +1,5 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
result
-integer | double
+
|===
diff --git a/docs/reference/setup/install/auto-config-output.asciidoc b/docs/reference/setup/install/auto-config-output.asciidoc
deleted file mode 100644
index 99fc097646f00..0000000000000
--- a/docs/reference/setup/install/auto-config-output.asciidoc
+++ /dev/null
@@ -1,23 +0,0 @@
-[role="exclude"]
-
-["source","sh",subs="attributes"]
-----
-The generated password for the elastic built-in superuser is:
-
-
-The enrollment token for Kibana instances, valid for the next 30 minutes:
-
-
-The hex-encoded SHA-256 fingerprint of the generated HTTPS CA DER-encoded certificate:
-
-
-You can complete the following actions at any time:
-Reset the password of the elastic built-in superuser with
-'bin{slash}elasticsearch-reset-password -u elastic'.
-
-Generate an enrollment token for Kibana instances with
-'bin{slash}elasticsearch-create-enrollment-token -s kibana'.
-
-Generate an enrollment token for Elasticsearch nodes with
-'bin{slash}elasticsearch-create-enrollment-token -s node'.
-----
\ No newline at end of file
diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc
index 726de3ed9a0e4..ecbb9895e6c34 100644
--- a/docs/reference/setup/install/check-running.asciidoc
+++ b/docs/reference/setup/install/check-running.asciidoc
@@ -5,7 +5,7 @@ You can test that your {es} node is running by sending an HTTPS request to port
["source","sh",subs="attributes"]
----
-curl --cacert {es-conf}{slash}certs{slash}http_ca.crt -u elastic https://localhost:9200 <1>
+curl --cacert {es-conf}{slash}certs{slash}http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 <1>
----
// NOTCONSOLE
<1> Ensure that you use `https` in your call, or the request will fail.
@@ -13,8 +13,7 @@ curl --cacert {es-conf}{slash}certs{slash}http_ca.crt -u elastic https://localho
`--cacert`::
Path to the generated `http_ca.crt` certificate for the HTTP layer.
-Enter the password for the `elastic` user that was generated during
-installation, which should return a response like this:
+The call returns a response like this:
////
The following hidden request is required before the response. Otherwise, you'll
diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc
index 9867449fedd53..0abba259d46eb 100644
--- a/docs/reference/setup/install/docker.asciidoc
+++ b/docs/reference/setup/install/docker.asciidoc
@@ -1,10 +1,9 @@
[[docker]]
=== Install {es} with Docker
-{es} is also available as Docker images. A list of all published Docker
-images and tags is available at
-https://www.docker.elastic.co[www.docker.elastic.co]. The source files are
-in
+{es} is available as a Docker image. A list of all published Docker images and
+tags is available at https://www.docker.elastic.co[www.docker.elastic.co]. The
+source files are in
https://github.com/elastic/elasticsearch/blob/{branch}/distribution/docker[Github].
include::license.asciidoc[]
@@ -13,19 +12,19 @@ Starting in {es} 8.0, security is enabled by default. With security enabled,
{stack} {security-features} require TLS encryption for the transport networking
layer, or your cluster will fail to start.
-==== Install Docker Desktop or Docker Engine
+==== Install Docker
-Install the appropriate https://docs.docker.com/get-docker/[Docker application]
-for your operating system.
+Visit https://docs.docker.com/get-docker/[Get Docker] to install Docker for your
+environment.
-NOTE: Make sure that Docker is allotted at least 4GiB of memory. In Docker
-Desktop, you configure resource usage on the Advanced tab in Preference (macOS)
-or Settings (Windows).
+IMPORTANT: If using Docker Desktop, make sure to allocate at least 4GB of
+memory. You can adjust memory usage in Docker Desktop by going to **Settings >
+Resources**.
-==== Pull the {es} Docker image
+==== Pull the Docker image
-Obtaining {es} for Docker is as simple as issuing a `docker pull` command
-against the Elastic Docker registry.
+Use the `docker pull` command to pull the {es} image from the the Elastic Docker
+registry.
ifeval::["{release-state}"=="unreleased"]
@@ -44,12 +43,11 @@ docker pull {docker-repo}:{version}
endif::[]
[[docker-verify-signature]]
-==== Optional: Verify the {es} Docker image signature
+==== Optional: Verify the image signature
-Although it's optional, we highly recommend verifying the signatures included with your downloaded Docker images to ensure that the images are valid.
+Verify the signatures included in your {es} Docker images to ensure they're valid.
Elastic images are signed with https://docs.sigstore.dev/cosign/overview/[Cosign] which is part of the https://www.sigstore.dev/[Sigstore] project.
-
Cosign supports container signing, verification, and storage in an OCI registry.
ifeval::["{release-state}"=="unreleased"]
@@ -76,7 +74,7 @@ cosign verify --key cosign.pub {docker-repo}:{version} <2>
The command prints the check results and the signature payload in JSON format:
-[source,sh]
+[source,sh,subs="attributes"]
--------------------------------------------
Verification for docker.elastic.co/elasticsearch/elasticsearch:{version} --
The following checks were performed on each of these signatures:
@@ -87,47 +85,35 @@ The following checks were performed on each of these signatures:
endif::[]
-Now that you have verified the {es} Docker image signature, you can start a
-<> or <>
-cluster.
[[docker-cli-run-dev-mode]]
-==== Start a single-node cluster with Docker
+==== Run {es} in Docker
-ifeval::["{release-state}"=="unreleased"]
+Use Docker commands to start a single-node {es} cluster for development or
+testing. You can then run additional Docker commands to add nodes to the test
+cluster.
-WARNING: Version {version} of the {es} Docker image has not yet been released.
+TIP: This setup doesn't run multiple {es} nodes or {kib} by default. To create a
+multi-node cluster with {kib}, use Docker Compose instead. See
+<>.
-endif::[]
-If you're starting a single-node {es} cluster in a Docker container, security
-will be automatically enabled and configured for you. When you start {es} for
-the first time, the following security configuration occurs automatically:
+===== Start a single-node cluster
-* <> are generated
-for the transport and HTTP layers.
-* The Transport Layer Security (TLS) configuration settings are written to
-`elasticsearch.yml`.
-* A password is generated for the `elastic` user.
-* An enrollment token is generated for {kib}.
+ifeval::["{release-state}"=="unreleased"]
-You can then {kibana-ref}/docker.html[start {kib}] and enter the enrollment
-token, which is valid for 30 minutes. This token automatically applies the
-security settings from your {es} cluster, authenticates to {es} with the
-`kibana_system` user, and writes the security configuration to `kibana.yml`.
+WARNING: Version {version} of the {es} Docker image has not yet been released.
-The following commands start a single-node {es} cluster for development or
-testing.
+endif::[]
-. Create a new docker network for {es} and {kib}
+. Create a new docker network.
+
[source,sh]
----
docker network create elastic
----
-. Start {es} in Docker. A password is generated for the `elastic` user and
-output to the terminal, plus an enrollment token for enrolling {kib}.
+. Start an {es} container.
+
--
ifeval::["{release-state}"=="unreleased"]
@@ -140,78 +126,64 @@ endif::[]
ifeval::["{release-state}"!="unreleased"]
[source,sh,subs="attributes"]
----
-docker run --name es01 --net elastic -p 9200:9200 -it {docker-image}
+docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB {docker-image}
----
+TIP: Use the `-m` flag to set a memory limit for the container.
+
+The command prints the `elastic` user password and an enrollment token for {kib}.
+
endif::[]
--
-+
-TIP: You might need to scroll back a bit in the terminal to view the password
-and enrollment token.
-. Copy the generated password and enrollment token and save them in a secure
-location. These values are shown only when you start {es} for the first time.
+. Copy the generated `elastic` password and enrollment token. These credentials
+are only shown when you start {es} for the first time. You can regenerate the
+credentials using the following commands.
+
-[NOTE]
-====
-If you need to reset the password for the `elastic` user or other
-built-in users, run the <> tool.
-This tool is available in the {es} `/bin` directory of the Docker container.
-For example:
+--
+[source,sh,subs="attributes"]
+----
+docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password
+docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana
+----
+
+We recommend storing the `elastic` password as an environment variable in your shell. Example:
[source,sh]
----
-docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password
+export ELASTIC_PASSWORD="your_password"
----
-====
+--
-. Copy the `http_ca.crt` security certificate from your Docker container to
-your local machine.
+. Copy the `http_ca.crt` SSL certificate from the container to your local machine.
+
[source,sh]
----
docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt .
----
-. Open a new terminal and verify that you can connect to your {es} cluster by
-making an authenticated call, using the `http_ca.crt` file that you copied from
-your Docker container. Enter the password for the `elastic` user when prompted.
+. Make a REST API call to {es} to ensure the {es} container is running.
+
[source,sh]
----
-curl --cacert http_ca.crt -u elastic https://localhost:9200
+curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200
----
// NOTCONSOLE
-==== Enroll additional nodes
-
-When you start {es} for the first time, the installation process configures a single-node cluster by default. This process also generates an enrollment token
-and prints it to your terminal. If you want a node to join an existing cluster,
-start the new node with the generated enrollment token.
+===== Add more nodes
+. Use an existing node to generate a enrollment token for the new node.
++
--
-.Generating enrollment tokens
-****
-The enrollment token is valid for 30 minutes. If you need to generate a
-new enrollment token, run the
-<> tool on your
-existing node. This tool is available in the {es} `bin` directory of the Docker
-container.
-
-For example, run the following command on the existing `es01` node to
-generate an enrollment token for new {es} nodes:
-
[source,sh]
----
docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node
----
-****
---
-. In the terminal where you started your first node, copy the generated
-enrollment token for adding new {es} nodes.
+The enrollment token is valid for 30 minutes.
+--
-. On your new node, start {es} and include the generated enrollment token.
+. Start a new {es} container. Include the enrollment token as an environment variable.
+
--
ifeval::["{release-state}"=="unreleased"]
@@ -228,10 +200,16 @@ docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it {docker-i
----
endif::[]
-
-{es} is now configured to join the existing cluster.
--
+. Call the <> to verify the node was added to the cluster.
++
+[source,sh]
+----
+curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes
+----
+// NOTCONSOLE
+
===== Setting JVM heap size
If you experience issues where the container where your first node is running
exits when your second node starts, explicitly set values for the JVM heap size.
diff --git a/docs/reference/setup/install/package-security.asciidoc b/docs/reference/setup/install/package-security.asciidoc
index fb613c4786ef2..40bd49d064b43 100644
--- a/docs/reference/setup/install/package-security.asciidoc
+++ b/docs/reference/setup/install/package-security.asciidoc
@@ -10,31 +10,13 @@ the `elastic` built-in superuser.
* Certificates and keys for TLS are generated for the transport and HTTP layer,
and TLS is enabled and configured with these keys and certificates.
-The password and certificate and keys are output to your terminal. For example:
+The password and certificate and keys are output to your terminal.
+
+We recommend storing the `elastic` password as an environment variable in your shell. Example:
[source,sh]
----
- -------Security autoconfiguration information-------
-
-Authentication and authorization are enabled.
-TLS for the transport and HTTP layers is enabled and configured.
-
-The generated password for the elastic built-in superuser is :
-
-If this node should join an existing cluster, you can reconfigure this with
-'/usr/share/elasticsearch/bin/elasticsearch-reconfigure-node --enrollment-token '
-after creating an enrollment token on your existing cluster.
-
-You can complete the following actions at any time:
-
-Reset the password of the elastic built-in superuser with
-'/usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic'.
-
-Generate an enrollment token for Kibana instances with
- '/usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana'.
-
-Generate an enrollment token for Elasticsearch nodes with
-'/usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node'.
+export ELASTIC_PASSWORD="your_password"
----
===== Reconfigure a node to join an existing cluster
diff --git a/docs/reference/setup/install/targz-start.asciidoc b/docs/reference/setup/install/targz-start.asciidoc
index d9208d7d70e5f..294f0e1541fbc 100644
--- a/docs/reference/setup/install/targz-start.asciidoc
+++ b/docs/reference/setup/install/targz-start.asciidoc
@@ -18,11 +18,14 @@ and TLS is enabled and configured with these keys and certificates.
* An enrollment token is generated for {kib}, which is valid for 30 minutes.
The password for the `elastic` user and the enrollment token for {kib} are
-output to your terminal. For example:
+output to your terminal.
-:slash: /
+We recommend storing the `elastic` password as an environment variable in your shell. Example:
-include::auto-config-output.asciidoc[]
+[source,sh]
+----
+export ELASTIC_PASSWORD="your_password"
+----
If you have password-protected the {es} keystore, you will be prompted
to enter the keystore's password. See <> for more
diff --git a/docs/reference/setup/install/zip-windows-start.asciidoc b/docs/reference/setup/install/zip-windows-start.asciidoc
index 16566d92023d8..29356d398c808 100644
--- a/docs/reference/setup/install/zip-windows-start.asciidoc
+++ b/docs/reference/setup/install/zip-windows-start.asciidoc
@@ -18,11 +18,14 @@ and TLS is enabled and configured with these keys and certificates.
* An enrollment token is generated for {kib}, which is valid for 30 minutes.
The password for the `elastic` user and the enrollment token for {kib} are
-output to your terminal. For example:
+output to your terminal.
-:slash: \
+We recommend storing the `elastic` password as an environment variable in your shell. Example:
-include::auto-config-output.asciidoc[]
+[source,sh]
+----
+$ELASTIC_PASSWORD = "your_password"
+----
If you have password-protected the {es} keystore, you will be prompted to
enter the keystore's password. See <> for more details.
diff --git a/docs/reference/tab-widgets/api-call-widget.asciidoc b/docs/reference/tab-widgets/api-call-widget.asciidoc
deleted file mode 100644
index 37f49f89847cf..0000000000000
--- a/docs/reference/tab-widgets/api-call-widget.asciidoc
+++ /dev/null
@@ -1,40 +0,0 @@
-++++
-
- * Open {@code x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java}
- * and run it. IntelliJ will take a few minutes to compile everything but the test itself
- * should take only a few seconds. This is a fast path to running ESQL's integration tests.
+ * Run the csv tests (see {@code x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java})
+ * from within Intellij or, alternatively, via Gradle:
+ * {@code ./gradlew -p x-pack/plugin/esql test --tests "org.elasticsearch.xpack.esql.CsvTests"}
+ * IntelliJ will take a few minutes to compile everything but the test itself should take only a few seconds.
+ * This is a fast path to running ESQL's integration tests.
*
*
* Pick one of the csv-spec files in {@code x-pack/plugin/esql/qa/testFixtures/src/main/resources/}
@@ -121,6 +123,15 @@
* asciidoc ceremony to make the result look right in the rendered docs.
*
*
+ * Auto-generate a syntax diagram and a table with supported types by running
+ * {@code ./gradlew x-pack:plugin:esql:copyGeneratedDocs}
+ * The generated files can be found here
+ * {@code docs/reference/esql/functions/signature/myfunction.svg }
+ * and here
+ * {@code docs/reference/esql/functions/types/myfunction.asciidoc}
+ * Make sure to commit them and reference them in your doc file.
+ *
+ *
* Build the docs by cloning the docs repo
* and running:
*
{@code
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java
new file mode 100644
index 0000000000000..9c1133769f846
--- /dev/null
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.esql.expression.function.scalar.string;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.compute.ann.Evaluator;
+import org.elasticsearch.compute.ann.Fixed;
+import org.elasticsearch.compute.operator.EvalOperator;
+import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper;
+import org.elasticsearch.xpack.ql.expression.Expression;
+import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction;
+import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate;
+import org.elasticsearch.xpack.ql.tree.NodeInfo;
+import org.elasticsearch.xpack.ql.tree.Source;
+import org.elasticsearch.xpack.ql.type.DataType;
+import org.elasticsearch.xpack.ql.type.DataTypes;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST;
+import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND;
+import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger;
+import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString;
+
+/**
+ * left(foo, len) is a alias that substring(foo, 0, len)
+ */
+public class Left extends ScalarFunction implements EvaluatorMapper {
+
+ private final Source source;
+
+ private final Expression str;
+
+ private final Expression length;
+
+ public Left(Source source, Expression str, Expression length) {
+ super(source, Arrays.asList(str, length));
+ this.source = source;
+ this.str = str;
+ this.length = length;
+ }
+
+ @Evaluator(warnExceptions = IllegalArgumentException.class)
+ static BytesRef process(@Fixed BytesRef out, BytesRef str, int length) {
+ out.bytes = str.bytes;
+ out.offset = str.offset;
+ out.length = str.length;
+ int curLenStart = 0;
+ UnicodeUtil.UTF8CodePoint cp = new UnicodeUtil.UTF8CodePoint();
+ for (int i = 0; i < length && curLenStart < out.length; i++, curLenStart += cp.numBytes) {
+ UnicodeUtil.codePointAt(out.bytes, out.offset + curLenStart, cp);
+ }
+ out.length = Math.min(curLenStart, out.length);
+ return out;
+ }
+
+ @Override
+ public Supplier toEvaluator(
+ Function> toEvaluator
+ ) {
+
+ Supplier strSupplier = toEvaluator.apply(str);
+ Supplier lengthSupplier = toEvaluator.apply(length);
+ return () -> {
+ BytesRef out = new BytesRef();
+ return new LeftEvaluator(source, out, strSupplier.get(), lengthSupplier.get());
+ };
+ }
+
+ @Override
+ public Expression replaceChildren(List newChildren) {
+ return new Left(source(), newChildren.get(0), newChildren.get(1));
+ }
+
+ @Override
+ protected NodeInfo extends Expression> info() {
+ return NodeInfo.create(this, Left::new, str, length);
+ }
+
+ @Override
+ public DataType dataType() {
+ return DataTypes.KEYWORD;
+ }
+
+ @Override
+ protected TypeResolution resolveType() {
+ if (childrenResolved() == false) {
+ return new TypeResolution("Unresolved children");
+ }
+
+ TypeResolution resolution = isString(str, sourceText(), FIRST);
+ if (resolution.unresolved()) {
+ return resolution;
+ }
+
+ resolution = isInteger(length, sourceText(), SECOND);
+ if (resolution.unresolved()) {
+ return resolution;
+ }
+
+ return TypeResolution.TYPE_RESOLVED;
+ }
+
+ @Override
+ public boolean foldable() {
+ return str.foldable() && length.foldable();
+ }
+
+ @Override
+ public Object fold() {
+ return EvaluatorMapper.super.fold();
+ }
+
+ @Override
+ public ScriptTemplate asScript() {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java
index 599cc5d3b75a8..ff3b75e60245e 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java
@@ -52,6 +52,7 @@
import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan;
import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan2;
import org.elasticsearch.xpack.esql.expression.function.scalar.math.AutoBucket;
+import org.elasticsearch.xpack.esql.expression.function.scalar.math.Ceil;
import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cos;
import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cosh;
import org.elasticsearch.xpack.esql.expression.function.scalar.math.E;
@@ -81,6 +82,7 @@
import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce;
import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat;
import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim;
+import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left;
import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length;
import org.elasticsearch.xpack.esql.expression.function.scalar.string.RTrim;
import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split;
@@ -294,6 +296,7 @@ public static List namedTypeEntries() {
of(ESQL_UNARY_SCLR_CLS, Acos.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar),
of(ESQL_UNARY_SCLR_CLS, Asin.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar),
of(ESQL_UNARY_SCLR_CLS, Atan.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar),
+ of(ESQL_UNARY_SCLR_CLS, Ceil.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar),
of(ESQL_UNARY_SCLR_CLS, Cos.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar),
of(ESQL_UNARY_SCLR_CLS, Cosh.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar),
of(ESQL_UNARY_SCLR_CLS, Floor.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar),
@@ -341,6 +344,7 @@ public static List namedTypeEntries() {
of(ScalarFunction.class, Pow.class, PlanNamedTypes::writePow, PlanNamedTypes::readPow),
of(ScalarFunction.class, StartsWith.class, PlanNamedTypes::writeStartsWith, PlanNamedTypes::readStartsWith),
of(ScalarFunction.class, Substring.class, PlanNamedTypes::writeSubstring, PlanNamedTypes::readSubstring),
+ of(ScalarFunction.class, Left.class, PlanNamedTypes::writeLeft, PlanNamedTypes::readLeft),
of(ScalarFunction.class, Split.class, PlanNamedTypes::writeSplit, PlanNamedTypes::readSplit),
of(ScalarFunction.class, Tau.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar),
// ArithmeticOperations
@@ -1033,6 +1037,7 @@ static void writeBinaryLogic(PlanStreamOutput out, BinaryLogic binaryLogic) thro
entry(name(Acos.class), Acos::new),
entry(name(Asin.class), Asin::new),
entry(name(Atan.class), Atan::new),
+ entry(name(Ceil.class), Ceil::new),
entry(name(Cos.class), Cos::new),
entry(name(Cosh.class), Cosh::new),
entry(name(Floor.class), Floor::new),
@@ -1270,6 +1275,17 @@ static void writeSubstring(PlanStreamOutput out, Substring substring) throws IOE
out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null);
}
+ static Left readLeft(PlanStreamInput in) throws IOException {
+ return new Left(Source.EMPTY, in.readExpression(), in.readExpression());
+ }
+
+ static void writeLeft(PlanStreamOutput out, Left left) throws IOException {
+ List fields = left.children();
+ assert fields.size() == 2;
+ out.writeExpression(fields.get(0));
+ out.writeExpression(fields.get(1));
+ }
+
static Split readSplit(PlanStreamInput in) throws IOException {
return new Split(Source.EMPTY, in.readExpression(), in.readExpression());
}
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java
index 87bbdfde39d03..4dec26888dc71 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java
@@ -60,7 +60,11 @@
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.function.DoubleBinaryOperator;
+import java.util.function.DoubleFunction;
import java.util.function.DoubleUnaryOperator;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.function.LongFunction;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@@ -222,34 +226,32 @@ public String toString() {
}
/**
- * Generate positive test cases for binary functions that operate on an {@code numeric}
+ * Generate positive test cases for unary functions that operate on an {@code numeric}
* fields by casting them to {@link DataTypes#DOUBLE}s.
*/
public static List forUnaryCastingToDouble(String name, String argName, DoubleUnaryOperator expected) {
+ String read = "Attribute[channel=0]";
+ String eval = name + "[" + argName + "=";
List suppliers = new ArrayList<>();
- for (DataType type : EsqlDataTypes.types()) {
- if (type.isNumeric() == false || EsqlDataTypes.isRepresentable(type) == false) {
- continue;
- }
- for (Map.Entry> supplier : RANDOM_VALUE_SUPPLIERS.get(type)) {
- suppliers.add(new TestCaseSupplier(supplier.getKey(), List.of(type), () -> {
- Number value = (Number) supplier.getValue().get();
- TypedData typed = new TypedData(
- // TODO there has to be a better way to handle unsigned long
- value instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : value,
- type,
- "value"
- );
- String evalName = castToDoubleEvaluator("Attribute[channel=0]", type);
- return new TestCase(
- List.of(typed),
- name + "[" + argName + "=" + evalName + "]",
- DataTypes.DOUBLE,
- equalTo(expected.applyAsDouble(value.doubleValue()))
- );
- }));
- }
- }
+ forUnaryInt(
+ suppliers,
+ eval + castToDoubleEvaluator(read, DataTypes.INTEGER) + "]",
+ DataTypes.DOUBLE,
+ i -> expected.applyAsDouble(i)
+ );
+ forUnaryLong(
+ suppliers,
+ eval + castToDoubleEvaluator(read, DataTypes.LONG) + "]",
+ DataTypes.DOUBLE,
+ l -> expected.applyAsDouble(l)
+ );
+ forUnaryUnsignedLong(
+ suppliers,
+ eval + castToDoubleEvaluator(read, DataTypes.UNSIGNED_LONG) + "]",
+ DataTypes.DOUBLE,
+ ul -> expected.applyAsDouble(ul.doubleValue())
+ );
+ forUnaryDouble(suppliers, eval + read + "]", DataTypes.DOUBLE, i -> expected.applyAsDouble(i));
return suppliers;
}
@@ -305,6 +307,81 @@ public static List forBinaryCastingToDouble(
return suppliers;
}
+ /**
+ * Generate positive test cases for a unary function operating on an {@link DataTypes#INTEGER}.
+ */
+ public static void forUnaryInt(
+ List suppliers,
+ String expectedEvaluatorToString,
+ DataType expectedType,
+ IntFunction expectedValue
+ ) {
+ unaryNumeric(suppliers, expectedEvaluatorToString, DataTypes.INTEGER, expectedType, n -> expectedValue.apply(n.intValue()));
+ }
+
+ /**
+ * Generate positive test cases for a unary function operating on an {@link DataTypes#LONG}.
+ */
+ public static void forUnaryLong(
+ List suppliers,
+ String expectedEvaluatorToString,
+ DataType expectedType,
+ LongFunction expectedValue
+ ) {
+ unaryNumeric(suppliers, expectedEvaluatorToString, DataTypes.LONG, expectedType, n -> expectedValue.apply(n.longValue()));
+ }
+
+ /**
+ * Generate positive test cases for a unary function operating on an {@link DataTypes#UNSIGNED_LONG}.
+ */
+ public static void forUnaryUnsignedLong(
+ List suppliers,
+ String expectedEvaluatorToString,
+ DataType expectedType,
+ Function expectedValue
+ ) {
+ unaryNumeric(
+ suppliers,
+ expectedEvaluatorToString,
+ DataTypes.UNSIGNED_LONG,
+ expectedType,
+ n -> expectedValue.apply((BigInteger) n)
+ );
+ }
+
+ /**
+ * Generate positive test cases for a unary function operating on an {@link DataTypes#DOUBLE}.
+ */
+ public static void forUnaryDouble(
+ List suppliers,
+ String expectedEvaluatorToString,
+ DataType expectedType,
+ DoubleFunction expectedValue
+ ) {
+ unaryNumeric(suppliers, expectedEvaluatorToString, DataTypes.DOUBLE, expectedType, n -> expectedValue.apply(n.doubleValue()));
+ }
+
+ private static void unaryNumeric(
+ List suppliers,
+ String expectedEvaluatorToString,
+ DataType inputType,
+ DataType expectedOutputType,
+ Function expected
+ ) {
+ for (Map.Entry> supplier : RANDOM_VALUE_SUPPLIERS.get(inputType)) {
+ suppliers.add(new TestCaseSupplier(supplier.getKey(), List.of(inputType), () -> {
+ Number value = (Number) supplier.getValue().get();
+ TypedData typed = new TypedData(
+ // TODO there has to be a better way to handle unsigned long
+ value instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : value,
+ inputType,
+ "value"
+ );
+ return new TestCase(List.of(typed), expectedEvaluatorToString, expectedOutputType, equalTo(expected.apply(value)));
+ }));
+ }
+ }
+
private static final Map>>> RANDOM_VALUE_SUPPLIERS = Map.ofEntries(
Map.entry(
DataTypes.DOUBLE,
@@ -768,19 +845,6 @@ private static Stream representable() {
return EsqlDataTypes.types().stream().filter(EsqlDataTypes::isRepresentable);
}
- @AfterClass
- public static void renderSignature() throws IOException {
- FunctionDefinition definition = definition();
- if (definition == null) {
- LogManager.getLogger(getTestClass()).info("Skipping rendering signature because the function isn't registered");
- return;
- }
-
- String rendered = RailRoadDiagram.functionSignature(definition);
- LogManager.getLogger(getTestClass()).info("Writing function signature");
- writeToTempDir("signature", rendered, "svg");
- }
-
/**
* Unique signatures encountered by this test.
*
@@ -822,14 +886,18 @@ public static void renderTypesTable() throws IOException {
return;
}
+ List definedSignature = ShowFunctions.signature(definition);
StringBuilder header = new StringBuilder();
- for (String arg : ShowFunctions.signature(definition)) {
+ for (String arg : definedSignature) {
header.append(arg).append(" | ");
}
header.append("result");
List table = new ArrayList<>();
for (Map.Entry, DataType> sig : signatures.entrySet()) {
+ if (sig.getKey().size() != definedSignature.size()) {
+ continue;
+ }
StringBuilder b = new StringBuilder();
for (DataType arg : sig.getKey()) {
b.append(arg.typeName()).append(" | ");
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java
new file mode 100644
index 0000000000000..69c2a2817c6bc
--- /dev/null
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.esql.expression.function.scalar.math;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase;
+import org.elasticsearch.xpack.ql.expression.Expression;
+import org.elasticsearch.xpack.ql.tree.Source;
+import org.elasticsearch.xpack.ql.type.DataType;
+import org.elasticsearch.xpack.ql.type.DataTypes;
+
+import java.util.List;
+import java.util.function.Supplier;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class CeilTests extends AbstractScalarFunctionTestCase {
+ public CeilTests(@Name("TestCase") Supplier testCaseSupplier) {
+ this.testCase = testCaseSupplier.get();
+ }
+
+ @ParametersFactory
+ public static Iterable parameters() {
+ return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("large double value", () -> {
+ double arg = 1 / randomDouble();
+ return new TestCase(
+ List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")),
+ "CeilDoubleEvaluator[val=Attribute[channel=0]]",
+ DataTypes.DOUBLE,
+ equalTo(Math.ceil(arg))
+ );
+ }), new TestCaseSupplier("integer value", () -> {
+ int arg = randomInt();
+ return new TestCase(
+ List.of(new TypedData(arg, DataTypes.INTEGER, "arg")),
+ "Attribute[channel=0]",
+ DataTypes.INTEGER,
+ equalTo(arg)
+ );
+ }), new TestCaseSupplier("long value", () -> {
+ long arg = randomLong();
+ return new TestCase(List.of(new TypedData(arg, DataTypes.LONG, "arg")), "Attribute[channel=0]", DataTypes.LONG, equalTo(arg));
+ }), new TestCaseSupplier("unsigned long value", () -> {
+ long arg = randomLong();
+ return new TestCase(
+ List.of(new TypedData(arg, DataTypes.UNSIGNED_LONG, "arg")),
+ "Attribute[channel=0]",
+ DataTypes.UNSIGNED_LONG,
+ equalTo(arg)
+ );
+ })));
+ }
+
+ @Override
+ protected DataType expectedType(List argTypes) {
+ return argTypes.get(0);
+ }
+
+ @Override
+ protected List argSpec() {
+ return List.of(required(numerics()));
+ }
+
+ @Override
+ protected Expression build(Source source, List args) {
+ return new Ceil(source, args.get(0));
+ }
+}
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java
index 845d74cbeed84..530fcc177a0ac 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java
@@ -10,43 +10,30 @@
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
-import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase;
+import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase;
import org.elasticsearch.xpack.ql.expression.Expression;
import org.elasticsearch.xpack.ql.tree.Source;
-import org.elasticsearch.xpack.ql.type.DataType;
import org.elasticsearch.xpack.ql.type.DataTypes;
+import org.elasticsearch.xpack.ql.util.NumericUtils;
+import java.util.ArrayList;
import java.util.List;
import java.util.function.Supplier;
-import static org.hamcrest.Matchers.equalTo;
-
-public class FloorTests extends AbstractScalarFunctionTestCase {
+public class FloorTests extends AbstractFunctionTestCase {
public FloorTests(@Name("TestCase") Supplier testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable parameters() {
- return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("large double value", () -> {
- double arg = 1 / randomDouble();
- return new TestCase(
- List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")),
- "FloorDoubleEvaluator[val=Attribute[channel=0]]",
- DataTypes.DOUBLE,
- equalTo(Math.floor(arg))
- );
- })));
- }
-
- @Override
- protected DataType expectedType(List argTypes) {
- return argTypes.get(0);
- }
-
- @Override
- protected List argSpec() {
- return List.of(required(numerics()));
+ String read = "Attribute[channel=0]";
+ List suppliers = new ArrayList<>();
+ TestCaseSupplier.forUnaryInt(suppliers, read, DataTypes.INTEGER, i -> i);
+ TestCaseSupplier.forUnaryLong(suppliers, read, DataTypes.LONG, l -> l);
+ TestCaseSupplier.forUnaryUnsignedLong(suppliers, read, DataTypes.UNSIGNED_LONG, ul -> NumericUtils.asLongUnsigned(ul));
+ TestCaseSupplier.forUnaryDouble(suppliers, "FloorDoubleEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::floor);
+ return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers)));
}
@Override
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java
index a09d7c944cb5e..5222fc605a6bd 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java
@@ -10,56 +10,38 @@
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
-import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase;
+import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase;
import org.elasticsearch.xpack.ql.expression.Expression;
import org.elasticsearch.xpack.ql.tree.Source;
-import org.elasticsearch.xpack.ql.type.DataType;
import org.elasticsearch.xpack.ql.type.DataTypes;
+import java.util.ArrayList;
import java.util.List;
import java.util.function.Supplier;
-import static org.hamcrest.Matchers.equalTo;
-
-public class Log10Tests extends AbstractScalarFunctionTestCase {
+public class Log10Tests extends AbstractFunctionTestCase {
public Log10Tests(@Name("TestCase") Supplier testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable parameters() {
- return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Log10 of Double", () -> {
- // TODO: include larger values here
- double arg = randomDouble();
- return new TestCase(
- List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")),
- "Log10DoubleEvaluator[val=Attribute[channel=0]]",
- DataTypes.DOUBLE,
- equalTo(Math.log10(arg))
- );
- }), new TestCaseSupplier("Log10(negative)", () -> {
- double arg = randomIntBetween(Integer.MIN_VALUE, -1); // it's inclusive
- return new TestCase(
- List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")),
- "Log10DoubleEvaluator[val=Attribute[channel=0]]",
- DataTypes.DOUBLE,
- equalTo(Double.NaN)
- );
- })));
+ String read = "Attribute[channel=0]";
+ List suppliers = new ArrayList<>();
+ TestCaseSupplier.forUnaryInt(suppliers, "Log10IntEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::log10);
+ TestCaseSupplier.forUnaryLong(suppliers, "Log10LongEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::log10);
+ TestCaseSupplier.forUnaryUnsignedLong(
+ suppliers,
+ "Log10UnsignedLongEvaluator[val=" + read + "]",
+ DataTypes.DOUBLE,
+ ul -> Math.log10(ul.doubleValue())
+ );
+ TestCaseSupplier.forUnaryDouble(suppliers, "Log10DoubleEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::log10);
+ return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers)));
}
@Override
protected Expression build(Source source, List args) {
return new Log10(source, args.get(0));
}
-
- @Override
- protected List argSpec() {
- return List.of(required(numerics()));
- }
-
- @Override
- protected DataType expectedType(List argTypes) {
- return DataTypes.DOUBLE;
- }
}
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java
index c6549443ad880..e328f38f1b64c 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java
@@ -10,53 +10,38 @@
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
-import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase;
+import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase;
import org.elasticsearch.xpack.ql.expression.Expression;
import org.elasticsearch.xpack.ql.tree.Source;
-import org.elasticsearch.xpack.ql.type.DataType;
import org.elasticsearch.xpack.ql.type.DataTypes;
-import org.hamcrest.Matcher;
+import java.util.ArrayList;
import java.util.List;
import java.util.function.Supplier;
-import static org.hamcrest.Matchers.equalTo;
-
-public class SqrtTests extends AbstractScalarFunctionTestCase {
+public class SqrtTests extends AbstractFunctionTestCase {
public SqrtTests(@Name("TestCase") Supplier testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable parameters() {
- return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Sqrt of Double", () -> {
- // TODO: include larger values here
- double arg = randomDouble();
- return new TestCase(
- List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")),
- "SqrtDoubleEvaluator[val=Attribute[channel=0]]",
- DataTypes.DOUBLE,
- equalTo(Math.sqrt(arg))
- );
- })));
- }
-
- private Matcher resultsMatcher(List typedData) {
- return equalTo(Math.sqrt((Double) typedData.get(0).data()));
+ String read = "Attribute[channel=0]";
+ List suppliers = new ArrayList<>();
+ TestCaseSupplier.forUnaryInt(suppliers, "SqrtIntEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::sqrt);
+ TestCaseSupplier.forUnaryLong(suppliers, "SqrtLongEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::sqrt);
+ TestCaseSupplier.forUnaryUnsignedLong(
+ suppliers,
+ "SqrtUnsignedLongEvaluator[val=" + read + "]",
+ DataTypes.DOUBLE,
+ ul -> Math.sqrt(ul.doubleValue())
+ );
+ TestCaseSupplier.forUnaryDouble(suppliers, "SqrtDoubleEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::sqrt);
+ return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers)));
}
@Override
protected Expression build(Source source, List args) {
return new Sqrt(source, args.get(0));
}
-
- @Override
- protected List argSpec() {
- return List.of(required(numerics()));
- }
-
- @Override
- protected DataType expectedType(List argTypes) {
- return DataTypes.DOUBLE;
- }
}
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java
new file mode 100644
index 0000000000000..1119fcc80aa39
--- /dev/null
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java
@@ -0,0 +1,110 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.esql.expression.function.scalar.string;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.compute.data.Block;
+import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase;
+import org.elasticsearch.xpack.ql.expression.Expression;
+import org.elasticsearch.xpack.ql.expression.Literal;
+import org.elasticsearch.xpack.ql.tree.Source;
+import org.elasticsearch.xpack.ql.type.DataType;
+import org.elasticsearch.xpack.ql.type.DataTypes;
+import org.hamcrest.Matcher;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Supplier;
+
+import static org.elasticsearch.compute.data.BlockUtils.toJavaObject;
+import static org.hamcrest.Matchers.equalTo;
+
+public class LeftTests extends AbstractScalarFunctionTestCase {
+ public LeftTests(@Name("TestCase") Supplier testCaseSupplier) {
+ this.testCase = testCaseSupplier.get();
+ }
+
+ @ParametersFactory
+ public static Iterable parameters() {
+ List suppliers = new ArrayList<>();
+ suppliers.add(new TestCaseSupplier("long", () -> {
+ int length = between(1, 10);
+ String text = randomAlphaOfLength(10);
+ return new TestCase(
+ List.of(new TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TypedData(length, DataTypes.INTEGER, "length")),
+ "LeftEvaluator[out=[], str=Attribute[channel=0], length=Attribute[channel=1]]",
+ DataTypes.KEYWORD,
+ equalTo(new BytesRef(text.substring(0, length)))
+ );
+ }));
+ suppliers.add(new TestCaseSupplier("short", () -> {
+ int length = between(2, 10);
+ String text = randomAlphaOfLength(1);
+ return new TestCase(
+ List.of(new TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TypedData(length, DataTypes.INTEGER, "length")),
+ "LeftEvaluator[out=[], str=Attribute[channel=0], length=Attribute[channel=1]]",
+ DataTypes.KEYWORD,
+ equalTo(new BytesRef(text))
+ );
+ }));
+ return parameterSuppliersFromTypedData(suppliers);
+ }
+
+ @Override
+ protected Expression build(Source source, List args) {
+ return new Left(source, args.get(0), args.get(1));
+ }
+
+ @Override
+ protected List argSpec() {
+ return List.of(required(strings()), required(integers()));
+ }
+
+ @Override
+ protected DataType expectedType(List argTypes) {
+ return DataTypes.KEYWORD;
+ }
+
+ public Matcher resultsMatcher(List typedData) {
+ String str = ((BytesRef) typedData.get(0).data()).utf8ToString();
+ int length = (Integer) typedData.get(1).data();
+ return equalTo(new BytesRef(str.substring(0, length)));
+ }
+
+ public void testReasonableLength() {
+ assertThat(process("a fox call", 5), equalTo("a fox"));
+ }
+
+ public void testMassiveLength() {
+ assertThat(process("a fox call", 10), equalTo("a fox call"));
+ }
+
+ public void testNegativeLength() {
+ assertThat(process("a fox call", -1), equalTo(""));
+ }
+
+ public void testUnicode() {
+ final String s = "a\ud83c\udf09tiger";
+ assert s.codePointCount(0, s.length()) == 7;
+ assertThat(process(s, 2), equalTo("a\ud83c\udf09"));
+ }
+
+ private String process(String str, int length) {
+ Block result = evaluator(
+ new Left(Source.EMPTY, field("str", DataTypes.KEYWORD), new Literal(Source.EMPTY, length, DataTypes.INTEGER))
+ ).get().eval(row(List.of(new BytesRef(str))));
+ if (null == result) {
+ return null;
+ }
+ BytesRef resultByteRef = ((BytesRef) toJavaObject(result, 0));
+ return resultByteRef == null ? null : resultByteRef.utf8ToString();
+ }
+}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/AbstractProfilingPersistenceManager.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/AbstractProfilingPersistenceManager.java
index 520ace3e1970f..06946183cc97c 100644
--- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/AbstractProfilingPersistenceManager.java
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/AbstractProfilingPersistenceManager.java
@@ -21,22 +21,14 @@
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
-import org.elasticsearch.cluster.health.ClusterHealthStatus;
-import org.elasticsearch.cluster.health.ClusterIndexHealth;
-import org.elasticsearch.cluster.metadata.IndexMetadata;
-import org.elasticsearch.cluster.metadata.MappingMetadata;
-import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.gateway.GatewayService;
-import org.elasticsearch.index.Index;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.ClientHelper;
import java.io.Closeable;
-import java.util.List;
-import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
@@ -44,22 +36,26 @@
import static org.elasticsearch.core.Strings.format;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
-public abstract class AbstractProfilingPersistenceManager
- implements
- ClusterStateListener,
- Closeable {
+abstract class AbstractProfilingPersistenceManager implements ClusterStateListener, Closeable {
protected final Logger logger = LogManager.getLogger(getClass());
private final AtomicBoolean inProgress = new AtomicBoolean(false);
private final ClusterService clusterService;
protected final ThreadPool threadPool;
protected final Client client;
+ private final IndexStateResolver indexStateResolver;
private volatile boolean templatesEnabled;
- public AbstractProfilingPersistenceManager(ThreadPool threadPool, Client client, ClusterService clusterService) {
+ AbstractProfilingPersistenceManager(
+ ThreadPool threadPool,
+ Client client,
+ ClusterService clusterService,
+ IndexStateResolver indexStateResolver
+ ) {
this.threadPool = threadPool;
this.client = client;
this.clusterService = clusterService;
+ this.indexStateResolver = indexStateResolver;
}
public void initialize() {
@@ -95,7 +91,7 @@ public final void clusterChanged(ClusterChangedEvent event) {
return;
}
- if (isAllResourcesCreated(event, clusterService.getSettings()) == false) {
+ if (areAllIndexTemplatesCreated(event, clusterService.getSettings()) == false) {
logger.trace("Skipping index creation; not all required resources are present yet");
return;
}
@@ -109,27 +105,21 @@ public final void clusterChanged(ClusterChangedEvent event) {
try (var refs = new RefCountingRunnable(() -> inProgress.set(false))) {
ClusterState clusterState = event.state();
for (T index : getManagedIndices()) {
- IndexState state = getIndexState(clusterState, index);
+ IndexState state = indexStateResolver.getIndexState(clusterState, index);
if (state.getStatus().actionable) {
onIndexState(clusterState, state, ActionListener.releasing(refs.acquire()));
+ } else if (state.getStatus() == IndexStatus.TOO_OLD) {
+ logger.info("Aborting index creation as index [{}] is considered too old.", index);
+ return;
}
}
}
}
- protected boolean isAllResourcesCreated(ClusterChangedEvent event, Settings settings) {
+ protected boolean areAllIndexTemplatesCreated(ClusterChangedEvent event, Settings settings) {
return ProfilingIndexTemplateRegistry.isAllResourcesCreated(event.state(), settings);
}
- /**
- * Extracts the appropriate index metadata for a given index from the cluster state.
- *
- * @param state Current cluster state. Never null.
- * @param index An index for which to retrieve index metadata. Never null.
- * @return The corresponding index metadata or null if there are none.
- */
- protected abstract IndexMetadata indexMetadata(ClusterState state, T index);
-
/**
* @return An iterable of all indices that are managed by this instance.
*/
@@ -148,78 +138,6 @@ protected abstract void onIndexState(
ActionListener super ActionResponse> listener
);
- private IndexState getIndexState(ClusterState state, T index) {
- IndexMetadata metadata = indexMetadata(state, index);
- if (metadata == null) {
- return new IndexState<>(index, null, Status.NEEDS_CREATION);
- }
- if (metadata.getState() == IndexMetadata.State.CLOSE) {
- logger.warn(
- "Index [{}] is closed. This is likely to prevent Universal Profiling from functioning correctly",
- metadata.getIndex()
- );
- return new IndexState<>(index, metadata.getIndex(), Status.CLOSED);
- }
- final IndexRoutingTable routingTable = state.getRoutingTable().index(metadata.getIndex());
- ClusterHealthStatus indexHealth = new ClusterIndexHealth(metadata, routingTable).getStatus();
- if (indexHealth == ClusterHealthStatus.RED) {
- logger.trace("Index [{}] health status is RED, any pending mapping upgrades will wait until this changes", metadata.getIndex());
- return new IndexState<>(index, metadata.getIndex(), Status.UNHEALTHY);
- }
- MappingMetadata mapping = metadata.mapping();
- if (mapping != null) {
- @SuppressWarnings("unchecked")
- Map meta = (Map) mapping.sourceAsMap().get("_meta");
- int currentIndexVersion;
- int currentTemplateVersion;
- if (meta == null) {
- logger.debug("Missing _meta field in mapping of index [{}], assuming initial version.", metadata.getIndex());
- currentIndexVersion = 1;
- currentTemplateVersion = 1;
- } else {
- // we are extra defensive and treat any unexpected values as an unhealthy index which we won't touch.
- currentIndexVersion = getVersionField(metadata.getIndex(), meta, "index-version");
- currentTemplateVersion = getVersionField(metadata.getIndex(), meta, "index-template-version");
- if (currentIndexVersion == -1 || currentTemplateVersion == -1) {
- return new IndexState<>(index, metadata.getIndex(), Status.UNHEALTHY);
- }
- }
- if (index.getVersion() > currentIndexVersion) {
- return new IndexState<>(index, metadata.getIndex(), Status.NEEDS_VERSION_BUMP);
- } else if (getIndexTemplateVersion() > currentTemplateVersion) {
- // if there are no migrations we can consider the index up-to-date even if the index template version does not match.
- List pendingMigrations = index.getMigrations(currentTemplateVersion);
- if (pendingMigrations.isEmpty()) {
- logger.trace(
- "Index [{}] with index template version [{}] (current is [{}]) is up-to-date (no pending migrations).",
- metadata.getIndex(),
- currentTemplateVersion,
- getIndexTemplateVersion()
- );
- return new IndexState<>(index, metadata.getIndex(), Status.UP_TO_DATE);
- }
- logger.trace(
- "Index [{}] with index template version [{}] (current is [{}]) has [{}] pending migrations.",
- metadata.getIndex(),
- currentTemplateVersion,
- getIndexTemplateVersion(),
- pendingMigrations.size()
- );
- return new IndexState<>(index, metadata.getIndex(), Status.NEEDS_MAPPINGS_UPDATE, pendingMigrations);
- } else {
- return new IndexState<>(index, metadata.getIndex(), Status.UP_TO_DATE);
- }
- } else {
- logger.warn("No mapping found for existing index [{}]. Index cannot be migrated.", metadata.getIndex());
- return new IndexState<>(index, metadata.getIndex(), Status.UNHEALTHY);
- }
- }
-
- // overridable for testing
- protected int getIndexTemplateVersion() {
- return ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION;
- }
-
protected final void applyMigrations(IndexState indexState, ActionListener super ActionResponse> listener) {
String writeIndex = indexState.getWriteIndex().getName();
try (var refs = new RefCountingRunnable(() -> listener.onResponse(null))) {
@@ -282,80 +200,4 @@ public void onFailure(Exception ex) {
}, consumer);
});
}
-
- private int getVersionField(Index index, Map meta, String fieldName) {
- Object value = meta.get(fieldName);
- if (value instanceof Integer) {
- return (int) value;
- }
- if (value == null) {
- logger.warn("Metadata version field [{}] of index [{}] is empty.", fieldName, index);
- return -1;
- }
- logger.warn("Metadata version field [{}] of index [{}] is [{}] (expected an integer).", fieldName, index, value);
- return -1;
- }
-
- protected static final class IndexState {
- private final T index;
- private final Index writeIndex;
- private final Status status;
- private final List pendingMigrations;
-
- IndexState(T index, Index writeIndex, Status status) {
- this(index, writeIndex, status, null);
- }
-
- IndexState(T index, Index writeIndex, Status status, List pendingMigrations) {
- this.index = index;
- this.writeIndex = writeIndex;
- this.status = status;
- this.pendingMigrations = pendingMigrations;
- }
-
- public T getIndex() {
- return index;
- }
-
- public Index getWriteIndex() {
- return writeIndex;
- }
-
- public Status getStatus() {
- return status;
- }
-
- public List getPendingMigrations() {
- return pendingMigrations;
- }
- }
-
- enum Status {
- CLOSED(false),
- UNHEALTHY(false),
- NEEDS_CREATION(true),
- NEEDS_VERSION_BUMP(true),
- UP_TO_DATE(false),
- NEEDS_MAPPINGS_UPDATE(true);
-
- /**
- * Whether a status is for informational purposes only or whether it should be acted upon and may change cluster state.
- */
- private final boolean actionable;
-
- Status(boolean actionable) {
- this.actionable = actionable;
- }
- }
-
- /**
- * An index that is used by Universal Profiling.
- */
- interface ProfilingIndexAbstraction {
- String getName();
-
- int getVersion();
-
- List getMigrations(int currentIndexTemplateVersion);
- }
}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java
index d95758b44c04a..31540cffef010 100644
--- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java
@@ -33,18 +33,21 @@ public static class Response extends ActionResponse implements ToXContentObject
private boolean profilingEnabled;
private boolean resourceManagementEnabled;
private boolean resourcesCreated;
+ private boolean pre891Data;
public Response(StreamInput in) throws IOException {
super(in);
profilingEnabled = in.readBoolean();
resourceManagementEnabled = in.readBoolean();
resourcesCreated = in.readBoolean();
+ pre891Data = in.readBoolean();
}
- public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boolean resourcesCreated) {
+ public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boolean resourcesCreated, boolean pre891Data) {
this.profilingEnabled = profilingEnabled;
this.resourceManagementEnabled = resourceManagementEnabled;
this.resourcesCreated = resourcesCreated;
+ this.pre891Data = pre891Data;
}
@Override
@@ -52,7 +55,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.startObject();
builder.startObject("profiling").field("enabled", profilingEnabled).endObject();
builder.startObject("resource_management").field("enabled", resourceManagementEnabled).endObject();
- builder.startObject("resources").field("created", resourcesCreated).endObject();
+ builder.startObject("resources").field("created", resourcesCreated).field("pre_8_9_1_data", pre891Data).endObject();
builder.endObject();
return builder;
}
@@ -62,6 +65,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(profilingEnabled);
out.writeBoolean(resourceManagementEnabled);
out.writeBoolean(resourcesCreated);
+ out.writeBoolean(pre891Data);
}
@Override
@@ -71,12 +75,13 @@ public boolean equals(Object o) {
Response response = (Response) o;
return profilingEnabled == response.profilingEnabled
&& resourceManagementEnabled == response.resourceManagementEnabled
- && resourcesCreated == response.resourcesCreated;
+ && resourcesCreated == response.resourcesCreated
+ && pre891Data == response.pre891Data;
}
@Override
public int hashCode() {
- return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated);
+ return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated, pre891Data);
}
@Override
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexState.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexState.java
new file mode 100644
index 0000000000000..c34858acf5986
--- /dev/null
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexState.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.profiling;
+
+import org.elasticsearch.index.Index;
+
+import java.util.List;
+
+final class IndexState {
+ private final T index;
+ private final Index writeIndex;
+ private final IndexStatus status;
+ private final List pendingMigrations;
+
+ IndexState(T index, Index writeIndex, IndexStatus status) {
+ this(index, writeIndex, status, null);
+ }
+
+ IndexState(T index, Index writeIndex, IndexStatus status, List pendingMigrations) {
+ this.index = index;
+ this.writeIndex = writeIndex;
+ this.status = status;
+ this.pendingMigrations = pendingMigrations;
+ }
+
+ public T getIndex() {
+ return index;
+ }
+
+ public Index getWriteIndex() {
+ return writeIndex;
+ }
+
+ public IndexStatus getStatus() {
+ return status;
+ }
+
+ public List getPendingMigrations() {
+ return pendingMigrations;
+ }
+}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexStateResolver.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexStateResolver.java
new file mode 100644
index 0000000000000..748424386457f
--- /dev/null
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexStateResolver.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.profiling;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.health.ClusterIndexHealth;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.cluster.metadata.MappingMetadata;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexVersion;
+
+import java.util.List;
+import java.util.Map;
+
+class IndexStateResolver {
+ private static final Logger logger = LogManager.getLogger(IndexStateResolver.class);
+
+ private volatile boolean checkOutdatedIndices;
+
+ IndexStateResolver(boolean checkOutdatedIndices) {
+ this.checkOutdatedIndices = checkOutdatedIndices;
+ }
+
+ public void setCheckOutdatedIndices(boolean checkOutdatedIndices) {
+ this.checkOutdatedIndices = checkOutdatedIndices;
+ }
+
+ public IndexState getIndexState(ClusterState state, T index) {
+ IndexMetadata metadata = index.indexMetadata(state);
+ if (metadata == null) {
+ return new IndexState<>(index, null, IndexStatus.NEEDS_CREATION);
+ }
+ if (metadata.getState() == IndexMetadata.State.CLOSE) {
+ logger.warn(
+ "Index [{}] is closed. This is likely to prevent Universal Profiling from functioning correctly",
+ metadata.getIndex()
+ );
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.CLOSED);
+ }
+ final IndexRoutingTable routingTable = state.getRoutingTable().index(metadata.getIndex());
+ ClusterHealthStatus indexHealth = new ClusterIndexHealth(metadata, routingTable).getStatus();
+ if (indexHealth == ClusterHealthStatus.RED) {
+ logger.trace("Index [{}] health status is RED, any pending mapping upgrades will wait until this changes", metadata.getIndex());
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.UNHEALTHY);
+ }
+ if (checkOutdatedIndices && metadata.getCreationVersion().before(IndexVersion.V_8_9_1)) {
+ logger.trace(
+ "Index [{}] has been created before version 8.9.1 and must be deleted before proceeding with the upgrade.",
+ metadata.getIndex()
+ );
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.TOO_OLD);
+ }
+ MappingMetadata mapping = metadata.mapping();
+ if (mapping != null) {
+ @SuppressWarnings("unchecked")
+ Map meta = (Map) mapping.sourceAsMap().get("_meta");
+ int currentIndexVersion;
+ int currentTemplateVersion;
+ if (meta == null) {
+ logger.debug("Missing _meta field in mapping of index [{}], assuming initial version.", metadata.getIndex());
+ currentIndexVersion = 1;
+ currentTemplateVersion = 1;
+ } else {
+ // we are extra defensive and treat any unexpected values as an unhealthy index which we won't touch.
+ currentIndexVersion = getVersionField(metadata.getIndex(), meta, "index-version");
+ currentTemplateVersion = getVersionField(metadata.getIndex(), meta, "index-template-version");
+ if (currentIndexVersion == -1 || currentTemplateVersion == -1) {
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.UNHEALTHY);
+ }
+ }
+ if (index.getVersion() > currentIndexVersion) {
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.NEEDS_VERSION_BUMP);
+ } else if (getIndexTemplateVersion() > currentTemplateVersion) {
+ // if there are no migrations we can consider the index up-to-date even if the index template version does not match.
+ List pendingMigrations = index.getMigrations(currentTemplateVersion);
+ if (pendingMigrations.isEmpty()) {
+ logger.trace(
+ "Index [{}] with index template version [{}] (current is [{}]) is up-to-date (no pending migrations).",
+ metadata.getIndex(),
+ currentTemplateVersion,
+ getIndexTemplateVersion()
+ );
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.UP_TO_DATE);
+ }
+ logger.trace(
+ "Index [{}] with index template version [{}] (current is [{}]) has [{}] pending migrations.",
+ metadata.getIndex(),
+ currentTemplateVersion,
+ getIndexTemplateVersion(),
+ pendingMigrations.size()
+ );
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.NEEDS_MAPPINGS_UPDATE, pendingMigrations);
+ } else {
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.UP_TO_DATE);
+ }
+ } else {
+ logger.warn("No mapping found for existing index [{}]. Index cannot be migrated.", metadata.getIndex());
+ return new IndexState<>(index, metadata.getIndex(), IndexStatus.UNHEALTHY);
+ }
+ }
+
+ private int getVersionField(Index index, Map meta, String fieldName) {
+ Object value = meta.get(fieldName);
+ if (value instanceof Integer) {
+ return (int) value;
+ }
+ if (value == null) {
+ logger.warn("Metadata version field [{}] of index [{}] is empty.", fieldName, index);
+ return -1;
+ }
+ logger.warn("Metadata version field [{}] of index [{}] is [{}] (expected an integer).", fieldName, index, value);
+ return -1;
+ }
+
+ // overridable for testing
+ protected int getIndexTemplateVersion() {
+ return ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION;
+ }
+
+}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexStatus.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexStatus.java
new file mode 100644
index 0000000000000..389c0de80cc5f
--- /dev/null
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexStatus.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.profiling;
+
+enum IndexStatus {
+ CLOSED(false),
+ UNHEALTHY(false),
+ TOO_OLD(false),
+ NEEDS_CREATION(true),
+ NEEDS_VERSION_BUMP(true),
+ UP_TO_DATE(false),
+ NEEDS_MAPPINGS_UPDATE(true);
+
+ /**
+ * Whether a status is for informational purposes only or whether it should be acted upon and may change cluster state.
+ */
+ public final boolean actionable;
+
+ IndexStatus(boolean actionable) {
+ this.actionable = actionable;
+ }
+}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManager.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManager.java
index 37959df0638ba..722a7d1dbac63 100644
--- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManager.java
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManager.java
@@ -36,7 +36,7 @@
/**
* Creates all data streams that are required for using Elastic Universal Profiling.
*/
-public class ProfilingDataStreamManager extends AbstractProfilingPersistenceManager {
+class ProfilingDataStreamManager extends AbstractProfilingPersistenceManager {
public static final List PROFILING_DATASTREAMS;
static {
@@ -51,8 +51,8 @@ public class ProfilingDataStreamManager extends AbstractProfilingPersistenceMana
PROFILING_DATASTREAMS = Collections.unmodifiableList(dataStreams);
}
- public ProfilingDataStreamManager(ThreadPool threadPool, Client client, ClusterService clusterService) {
- super(threadPool, client, clusterService);
+ ProfilingDataStreamManager(ThreadPool threadPool, Client client, ClusterService clusterService, IndexStateResolver indexStateResolver) {
+ super(threadPool, client, clusterService, indexStateResolver);
}
@Override
@@ -61,7 +61,7 @@ protected void onIndexState(
IndexState indexState,
ActionListener super ActionResponse> listener
) {
- Status status = indexState.getStatus();
+ IndexStatus status = indexState.getStatus();
switch (status) {
case NEEDS_CREATION -> createDataStream(indexState.getIndex(), listener);
case NEEDS_VERSION_BUMP -> rolloverDataStream(indexState.getIndex(), listener);
@@ -74,22 +74,6 @@ protected void onIndexState(
}
}
- protected IndexMetadata indexMetadata(ClusterState state, ProfilingDataStream dataStream) {
- Map dataStreams = state.metadata().dataStreams();
- if (dataStreams == null) {
- return null;
- }
- DataStream ds = dataStreams.get(dataStream.getName());
- if (ds == null) {
- return null;
- }
- Index writeIndex = ds.getWriteIndex();
- if (writeIndex == null) {
- return null;
- }
- return state.metadata().index(writeIndex);
- }
-
@Override
protected Iterable getManagedIndices() {
return PROFILING_DATASTREAMS;
@@ -185,7 +169,7 @@ public void onFailure(Exception e) {
/**
* A datastream that is used by Universal Profiling.
*/
- static class ProfilingDataStream implements AbstractProfilingPersistenceManager.ProfilingIndexAbstraction {
+ static class ProfilingDataStream implements ProfilingIndexAbstraction {
private final String name;
private final int version;
private final List migrations;
@@ -226,6 +210,23 @@ public List getMigrations(int currentIndexTemplateVersion) {
: Collections.emptyList();
}
+ @Override
+ public IndexMetadata indexMetadata(ClusterState state) {
+ Map dataStreams = state.metadata().dataStreams();
+ if (dataStreams == null) {
+ return null;
+ }
+ DataStream ds = dataStreams.get(this.getName());
+ if (ds == null) {
+ return null;
+ }
+ Index writeIndex = ds.getWriteIndex();
+ if (writeIndex == null) {
+ return null;
+ }
+ return state.metadata().index(writeIndex);
+ }
+
@Override
public String toString() {
return getName();
@@ -248,4 +249,22 @@ public int hashCode() {
return Objects.hash(name, version);
}
}
+
+ public static boolean isAllResourcesCreated(ClusterState state, IndexStateResolver indexStateResolver) {
+ for (ProfilingDataStream profilingDataStream : PROFILING_DATASTREAMS) {
+ if (indexStateResolver.getIndexState(state, profilingDataStream).getStatus() != IndexStatus.UP_TO_DATE) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static boolean isAnyResourceTooOld(ClusterState state, IndexStateResolver indexStateResolver) {
+ for (ProfilingDataStream profilingDataStream : PROFILING_DATASTREAMS) {
+ if (indexStateResolver.getIndexState(state, profilingDataStream).getStatus() == IndexStatus.TOO_OLD) {
+ return true;
+ }
+ }
+ return false;
+ }
}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexAbstraction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexAbstraction.java
new file mode 100644
index 0000000000000..e89010cca353c
--- /dev/null
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexAbstraction.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.profiling;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+
+import java.util.List;
+
+/**
+ * An index that is used by Universal Profiling.
+ */
+interface ProfilingIndexAbstraction {
+ String getName();
+
+ int getVersion();
+
+ List getMigrations(int currentIndexTemplateVersion);
+
+ /**
+ * Extracts the appropriate index metadata for a given index from the cluster state.
+ *
+ * @param state Current cluster state. Never null.
+ * @return The corresponding index metadata or null if there are none.
+ */
+ IndexMetadata indexMetadata(ClusterState state);
+}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java
index fe5188ce7d020..746159c23dda0 100644
--- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java
@@ -39,7 +39,7 @@
/**
* Creates all indices that are required for using Elastic Universal Profiling.
*/
-public class ProfilingIndexManager extends AbstractProfilingPersistenceManager {
+class ProfilingIndexManager extends AbstractProfilingPersistenceManager {
// For testing
public static final List PROFILING_INDICES = List.of(
ProfilingIndex.regular(
@@ -68,8 +68,8 @@ public class ProfilingIndexManager extends AbstractProfilingPersistenceManager
indexState,
ActionListener super ActionResponse> listener
) {
- Status status = indexState.getStatus();
+ IndexStatus status = indexState.getStatus();
switch (status) {
case NEEDS_CREATION -> createIndex(clusterState, indexState.getIndex(), listener);
case NEEDS_VERSION_BUMP -> bumpVersion(clusterState, indexState.getIndex(), listener);
@@ -91,38 +91,6 @@ protected void onIndexState(
}
}
- @Override
- protected IndexMetadata indexMetadata(ClusterState state, ProfilingIndex index) {
- Map indicesMetadata = state.metadata().indices();
- if (indicesMetadata == null) {
- return null;
- }
- IndexMetadata metadata = indicesMetadata.get(index.toString());
- // prioritize the most recent generation from the current version
- if (metadata == null && index.isKvIndex()) {
- metadata = indicesMetadata.entrySet()
- .stream()
- .filter(e -> index.isMatchWithoutGeneration(e.getKey()))
- // use the most recent index to make sure we use the most recent version info from the _meta field
- .max(Comparator.comparingLong(e -> e.getValue().getCreationDate()))
- .map(Map.Entry::getValue)
- .orElse(null);
- }
-
- // attempt to find an index from an earlier generation
- if (metadata == null) {
- metadata = indicesMetadata.entrySet()
- .stream()
- .filter(e -> index.isMatchWithoutVersion(e.getKey()))
- // use the most recent index to make sure we use the most recent version info from the _meta field
- .max(Comparator.comparingLong(e -> e.getValue().getCreationDate()))
- .map(Map.Entry::getValue)
- .orElse(null);
- }
-
- return metadata;
- }
-
private void bumpVersion(ClusterState state, ProfilingIndex index, ActionListener super ActionResponse> listener) {
if (index.getOnVersionBump() == OnVersionBump.DELETE_OLD) {
Map indicesMetadata = state.metadata().indices();
@@ -377,6 +345,38 @@ public List getMigrations(int currentIndexTemplateVersion) {
: Collections.emptyList();
}
+ @Override
+ public IndexMetadata indexMetadata(ClusterState state) {
+ Map indicesMetadata = state.metadata().indices();
+ if (indicesMetadata == null) {
+ return null;
+ }
+ IndexMetadata metadata = indicesMetadata.get(this.toString());
+ // prioritize the most recent generation from the current version
+ if (metadata == null && isKvIndex()) {
+ metadata = indicesMetadata.entrySet()
+ .stream()
+ .filter(e -> isMatchWithoutGeneration(e.getKey()))
+ // use the most recent index to make sure we use the most recent version info from the _meta field
+ .max(Comparator.comparingLong(e -> e.getValue().getCreationDate()))
+ .map(Map.Entry::getValue)
+ .orElse(null);
+ }
+
+ // attempt to find an index from an earlier generation
+ if (metadata == null) {
+ metadata = indicesMetadata.entrySet()
+ .stream()
+ .filter(e -> isMatchWithoutVersion(e.getKey()))
+ // use the most recent index to make sure we use the most recent version info from the _meta field
+ .max(Comparator.comparingLong(e -> e.getValue().getCreationDate()))
+ .map(Map.Entry::getValue)
+ .orElse(null);
+ }
+
+ return metadata;
+ }
+
public OnVersionBump getOnVersionBump() {
return onVersionBump;
}
@@ -410,4 +410,22 @@ public int hashCode() {
return Objects.hash(namePrefix, version, generation, onVersionBump);
}
}
+
+ public static boolean isAllResourcesCreated(ClusterState state, IndexStateResolver indexStateResolver) {
+ for (ProfilingIndex profilingIndex : PROFILING_INDICES) {
+ if (indexStateResolver.getIndexState(state, profilingIndex).getStatus() != IndexStatus.UP_TO_DATE) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static boolean isAnyResourceTooOld(ClusterState state, IndexStateResolver indexStateResolver) {
+ for (ProfilingIndex profilingIndex : PROFILING_INDICES) {
+ if (indexStateResolver.getIndexState(state, profilingIndex).getStatus() == IndexStatus.TOO_OLD) {
+ return true;
+ }
+ }
+ return false;
+ }
}
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java
index 2d5d633bdccfb..49e436ea4251b 100644
--- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java
@@ -55,14 +55,22 @@ public class ProfilingPlugin extends Plugin implements ActionPlugin {
Setting.Property.NodeScope,
Setting.Property.Dynamic
);
+
+ // *Internal* setting meant as an escape hatch if we need to skip the check for outdated indices for some reason.
+ public static final Setting PROFILING_CHECK_OUTDATED_INDICES = Setting.boolSetting(
+ "xpack.profiling.check_outdated_indices",
+ true,
+ Setting.Property.NodeScope,
+ Setting.Property.Dynamic
+ );
public static final String PROFILING_THREAD_POOL_NAME = "profiling";
private final Settings settings;
private final boolean enabled;
private final SetOnce registry = new SetOnce<>();
-
private final SetOnce indexManager = new SetOnce<>();
private final SetOnce dataStreamManager = new SetOnce<>();
+ private final SetOnce indexStateResolver = new SetOnce<>();
public ProfilingPlugin(Settings settings) {
this.settings = settings;
@@ -88,8 +96,11 @@ public Collection createComponents(
) {
logger.info("Profiling is {}", enabled ? "enabled" : "disabled");
registry.set(new ProfilingIndexTemplateRegistry(settings, clusterService, threadPool, client, xContentRegistry));
- indexManager.set(new ProfilingIndexManager(threadPool, client, clusterService));
- dataStreamManager.set(new ProfilingDataStreamManager(threadPool, client, clusterService));
+ indexStateResolver.set(new IndexStateResolver(PROFILING_CHECK_OUTDATED_INDICES.get(settings)));
+ clusterService.getClusterSettings().addSettingsUpdateConsumer(PROFILING_CHECK_OUTDATED_INDICES, this::updateCheckOutdatedIndices);
+
+ indexManager.set(new ProfilingIndexManager(threadPool, client, clusterService, indexStateResolver.get()));
+ dataStreamManager.set(new ProfilingDataStreamManager(threadPool, client, clusterService, indexStateResolver.get()));
// set initial value
updateTemplatesEnabled(PROFILING_TEMPLATES_ENABLED.get(settings));
clusterService.getClusterSettings().addSettingsUpdateConsumer(PROFILING_TEMPLATES_ENABLED, this::updateTemplatesEnabled);
@@ -103,6 +114,13 @@ public Collection createComponents(
}
}
+ public void updateCheckOutdatedIndices(boolean newValue) {
+ if (newValue == false) {
+ logger.info("profiling will ignore outdated indices");
+ }
+ indexStateResolver.get().setCheckOutdatedIndices(newValue);
+ }
+
public void updateTemplatesEnabled(boolean newValue) {
if (newValue == false) {
logger.info("profiling index templates will not be installed or reinstalled");
@@ -134,6 +152,7 @@ public List getRestHandlers(
public List> getSettings() {
return List.of(
PROFILING_TEMPLATES_ENABLED,
+ PROFILING_CHECK_OUTDATED_INDICES,
TransportGetStackTracesAction.PROFILING_MAX_STACKTRACE_QUERY_SLICES,
TransportGetStackTracesAction.PROFILING_MAX_DETAIL_QUERY_SLICES,
TransportGetStackTracesAction.PROFILING_QUERY_REALTIME
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java
index 2ea1799bc60e6..abac8971596a1 100644
--- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java
@@ -24,7 +24,6 @@
import org.elasticsearch.xpack.core.XPackSettings;
public class TransportGetStatusAction extends TransportMasterNodeAction {
-
@Inject
public TransportGetStatusAction(
TransportService transportService,
@@ -53,10 +52,20 @@ protected void masterOperation(
ClusterState state,
ActionListener listener
) {
+ IndexStateResolver indexStateResolver = new IndexStateResolver(getValue(state, ProfilingPlugin.PROFILING_CHECK_OUTDATED_INDICES));
+
boolean pluginEnabled = getValue(state, XPackSettings.PROFILING_ENABLED);
boolean resourceManagementEnabled = getValue(state, ProfilingPlugin.PROFILING_TEMPLATES_ENABLED);
- boolean resourcesCreated = ProfilingIndexTemplateRegistry.isAllResourcesCreated(state, clusterService.getSettings());
- listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated));
+
+ boolean templatesCreated = ProfilingIndexTemplateRegistry.isAllResourcesCreated(state, clusterService.getSettings());
+ boolean indicesCreated = ProfilingIndexManager.isAllResourcesCreated(state, indexStateResolver);
+ boolean dataStreamsCreated = ProfilingDataStreamManager.isAllResourcesCreated(state, indexStateResolver);
+ boolean resourcesCreated = templatesCreated && indicesCreated && dataStreamsCreated;
+
+ boolean indicesPre891 = ProfilingIndexManager.isAnyResourceTooOld(state, indexStateResolver);
+ boolean dataStreamsPre891 = ProfilingDataStreamManager.isAnyResourceTooOld(state, indexStateResolver);
+ boolean anyPre891Data = indicesPre891 || dataStreamsPre891;
+ listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data));
}
private boolean getValue(ClusterState state, Setting setting) {
diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java
index 90acff05798de..0b762b5eb45da 100644
--- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java
+++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java
@@ -48,6 +48,7 @@
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.Matchers;
import org.junit.After;
import org.junit.Before;
@@ -60,7 +61,9 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
@@ -73,6 +76,7 @@ public class ProfilingDataStreamManagerTests extends ESTestCase {
private VerifyingClient client;
private List managedDataStreams;
private int indexTemplateVersion;
+ private IndexStateResolver indexStateResolver;
@Before
public void createRegistryAndClient() {
@@ -82,16 +86,17 @@ public void createRegistryAndClient() {
clusterService = ClusterServiceUtils.createClusterService(threadPool);
managedDataStreams = ProfilingDataStreamManager.PROFILING_DATASTREAMS;
indexTemplateVersion = ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION;
- datastreamManager = new ProfilingDataStreamManager(threadPool, client, clusterService) {
- @Override
- protected boolean isAllResourcesCreated(ClusterChangedEvent event, Settings settings) {
- return templatesCreated.get();
- }
-
+ indexStateResolver = new IndexStateResolver(true) {
@Override
protected int getIndexTemplateVersion() {
return indexTemplateVersion;
}
+ };
+ datastreamManager = new ProfilingDataStreamManager(threadPool, client, clusterService, indexStateResolver) {
+ @Override
+ protected boolean areAllIndexTemplatesCreated(ClusterChangedEvent event, Settings settings) {
+ return templatesCreated.get();
+ }
@Override
protected Iterable getManagedIndices() {
@@ -161,6 +166,7 @@ public void testThatRedIndexIsNotTouched() throws Exception {
List.of(existingDataStream.withVersion(0)),
nodes,
IndexMetadata.State.OPEN,
+ IndexVersion.current(),
false
);
@@ -174,6 +180,62 @@ public void testThatRedIndexIsNotTouched() throws Exception {
calledTimes.set(0);
}
+ public void testThatOutdatedDataStreamIsDetectedIfCheckEnabled() throws Exception {
+ DiscoveryNode node = DiscoveryNodeUtils.create("node");
+ DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
+ templatesCreated.set(true);
+
+ ProfilingDataStreamManager.ProfilingDataStream existingDataStream = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
+ ClusterChangedEvent event = createClusterChangedEvent(
+ List.of(existingDataStream),
+ nodes,
+ IndexMetadata.State.OPEN,
+ // This is an outdated version that requires indices to be deleted upon migration
+ IndexVersion.V_8_8_2,
+ true
+ );
+
+ AtomicInteger calledTimes = new AtomicInteger(0);
+
+ client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
+ datastreamManager.clusterChanged(event);
+ // should not create this index because the one that has changed is too old. Depending on the point at which the index is
+ // evaluated, other indices may have already been created.
+ assertBusy(
+ () -> assertThat(
+ calledTimes.get(),
+ allOf(greaterThanOrEqualTo(0), Matchers.lessThan(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size()))
+ )
+ );
+ calledTimes.set(0);
+ }
+
+ public void testThatOutdatedDataStreamIsIgnoredIfCheckDisabled() throws Exception {
+ // disable the check
+ indexStateResolver.setCheckOutdatedIndices(false);
+
+ DiscoveryNode node = DiscoveryNodeUtils.create("node");
+ DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
+ templatesCreated.set(true);
+
+ ProfilingDataStreamManager.ProfilingDataStream existingDataStream = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
+ ClusterChangedEvent event = createClusterChangedEvent(
+ List.of(existingDataStream),
+ nodes,
+ IndexMetadata.State.OPEN,
+ IndexVersion.V_8_8_2,
+ true
+ );
+
+ AtomicInteger calledTimes = new AtomicInteger(0);
+
+ client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
+ datastreamManager.clusterChanged(event);
+ // should create all indices but consider the current one up-to-date
+ assertBusy(() -> assertThat(calledTimes.get(), equalTo(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size() - 1)));
+ calledTimes.set(0);
+ }
+
public void testThatClosedIndexIsNotTouched() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
@@ -185,6 +247,7 @@ public void testThatClosedIndexIsNotTouched() throws Exception {
List.of(existingDataStream.withVersion(0)),
nodes,
IndexMetadata.State.CLOSE,
+ IndexVersion.current(),
true
);
@@ -379,16 +442,17 @@ private ClusterChangedEvent createClusterChangedEvent(
Iterable existingDataStreams,
DiscoveryNodes nodes
) {
- return createClusterChangedEvent(existingDataStreams, nodes, IndexMetadata.State.OPEN, true);
+ return createClusterChangedEvent(existingDataStreams, nodes, IndexMetadata.State.OPEN, IndexVersion.current(), true);
}
private ClusterChangedEvent createClusterChangedEvent(
Iterable existingDataStreams,
DiscoveryNodes nodes,
IndexMetadata.State state,
+ IndexVersion indexVersion,
boolean allShardsAssigned
) {
- ClusterState cs = createClusterState(Settings.EMPTY, existingDataStreams, nodes, state, allShardsAssigned);
+ ClusterState cs = createClusterState(Settings.EMPTY, existingDataStreams, nodes, state, indexVersion, allShardsAssigned);
ClusterChangedEvent realEvent = new ClusterChangedEvent(
"created-from-test",
cs,
@@ -405,6 +469,7 @@ private ClusterState createClusterState(
Iterable existingDataStreams,
DiscoveryNodes nodes,
IndexMetadata.State state,
+ IndexVersion indexVersion,
boolean allShardsAssigned
) {
Metadata.Builder metadataBuilder = Metadata.builder();
@@ -427,7 +492,7 @@ private ClusterState createClusterState(
metadataBuilder.put(ds);
IndexMetadata.Builder builder = new IndexMetadata.Builder(writeIndexName);
builder.state(state);
- builder.settings(indexSettings(IndexVersion.current(), 1, 1).put(IndexMetadata.SETTING_INDEX_UUID, writeIndex.getUUID()));
+ builder.settings(indexSettings(indexVersion, 1, 1).put(IndexMetadata.SETTING_INDEX_UUID, writeIndex.getUUID()));
builder.putMapping(
new MappingMetadata(
MapperService.SINGLE_MAPPING_NAME,
diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java
index 8600a4fcc70b3..3efd1d4c041f5 100644
--- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java
+++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java
@@ -49,6 +49,7 @@
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.Matchers;
import org.junit.After;
import org.junit.Before;
@@ -60,7 +61,9 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
@@ -73,6 +76,7 @@ public class ProfilingIndexManagerTests extends ESTestCase {
private VerifyingClient client;
private List managedIndices;
private int indexTemplateVersion;
+ private IndexStateResolver indexStateResolver;
@Before
public void createRegistryAndClient() {
@@ -82,9 +86,15 @@ public void createRegistryAndClient() {
clusterService = ClusterServiceUtils.createClusterService(threadPool);
managedIndices = ProfilingIndexManager.PROFILING_INDICES;
indexTemplateVersion = ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION;
- indexManager = new ProfilingIndexManager(threadPool, client, clusterService) {
+ indexStateResolver = new IndexStateResolver(true) {
@Override
- protected boolean isAllResourcesCreated(ClusterChangedEvent event, Settings settings) {
+ protected int getIndexTemplateVersion() {
+ return indexTemplateVersion;
+ }
+ };
+ indexManager = new ProfilingIndexManager(threadPool, client, clusterService, indexStateResolver) {
+ @Override
+ protected boolean areAllIndexTemplatesCreated(ClusterChangedEvent event, Settings settings) {
return templatesCreated.get();
}
@@ -92,11 +102,6 @@ protected boolean isAllResourcesCreated(ClusterChangedEvent event, Settings sett
protected Iterable getManagedIndices() {
return managedIndices;
}
-
- @Override
- protected int getIndexTemplateVersion() {
- return indexTemplateVersion;
- }
};
indexManager.setTemplatesEnabled(true);
}
@@ -161,6 +166,7 @@ public void testThatRedIndexIsNotTouched() throws Exception {
List.of(existingIndex.withVersion(0)),
nodes,
IndexMetadata.State.OPEN,
+ IndexVersion.current(),
false
);
@@ -174,6 +180,62 @@ public void testThatRedIndexIsNotTouched() throws Exception {
calledTimes.set(0);
}
+ public void testThatOutdatedIndexIsDetectedIfCheckEnabled() throws Exception {
+ DiscoveryNode node = DiscoveryNodeUtils.create("node");
+ DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
+ templatesCreated.set(true);
+
+ ProfilingIndexManager.ProfilingIndex existingIndex = randomFrom(ProfilingIndexManager.PROFILING_INDICES);
+ ClusterChangedEvent event = createClusterChangedEvent(
+ List.of(existingIndex.withVersion(0)),
+ nodes,
+ IndexMetadata.State.OPEN,
+ // This is an outdated version that requires indices to be deleted upon migration
+ IndexVersion.V_8_8_2,
+ true
+ );
+
+ AtomicInteger calledTimes = new AtomicInteger(0);
+
+ client.setVerifier((action, request, listener) -> verifyIndexInstalled(calledTimes, action, request, listener));
+ indexManager.clusterChanged(event);
+ // should not create this index because the one that has changed is too old. Depending on the point at which the index is
+ // evaluated, other indices may have already been created.
+ assertBusy(
+ () -> assertThat(
+ calledTimes.get(),
+ allOf(greaterThanOrEqualTo(0), Matchers.lessThan(ProfilingIndexManager.PROFILING_INDICES.size()))
+ )
+ );
+ calledTimes.set(0);
+ }
+
+ public void testThatOutdatedIndexIsIgnoredIfCheckDisabled() throws Exception {
+ // disable the check
+ indexStateResolver.setCheckOutdatedIndices(false);
+
+ DiscoveryNode node = DiscoveryNodeUtils.create("node");
+ DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
+ templatesCreated.set(true);
+
+ ProfilingIndexManager.ProfilingIndex existingIndex = randomFrom(ProfilingIndexManager.PROFILING_INDICES);
+ ClusterChangedEvent event = createClusterChangedEvent(
+ List.of(existingIndex),
+ nodes,
+ IndexMetadata.State.OPEN,
+ IndexVersion.V_8_8_2,
+ true
+ );
+
+ AtomicInteger calledTimes = new AtomicInteger(0);
+
+ client.setVerifier((action, request, listener) -> verifyIndexInstalled(calledTimes, action, request, listener));
+ indexManager.clusterChanged(event);
+ // should create all indices but consider the current one up-to-date
+ assertBusy(() -> assertThat(calledTimes.get(), equalTo(ProfilingIndexManager.PROFILING_INDICES.size() - 1)));
+ calledTimes.set(0);
+ }
+
public void testThatClosedIndexIsNotTouched() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
@@ -185,6 +247,7 @@ public void testThatClosedIndexIsNotTouched() throws Exception {
List.of(existingIndex.withVersion(0)),
nodes,
IndexMetadata.State.CLOSE,
+ IndexVersion.current(),
true
);
@@ -401,16 +464,17 @@ private ClusterChangedEvent createClusterChangedEvent(
Iterable existingIndices,
DiscoveryNodes nodes
) {
- return createClusterChangedEvent(existingIndices, nodes, IndexMetadata.State.OPEN, true);
+ return createClusterChangedEvent(existingIndices, nodes, IndexMetadata.State.OPEN, IndexVersion.current(), true);
}
private ClusterChangedEvent createClusterChangedEvent(
Iterable existingIndices,
DiscoveryNodes nodes,
IndexMetadata.State state,
+ IndexVersion indexVersion,
boolean allShardsAssigned
) {
- ClusterState cs = createClusterState(Settings.EMPTY, existingIndices, nodes, state, allShardsAssigned);
+ ClusterState cs = createClusterState(Settings.EMPTY, existingIndices, nodes, state, indexVersion, allShardsAssigned);
ClusterChangedEvent realEvent = new ClusterChangedEvent(
"created-from-test",
cs,
@@ -427,6 +491,7 @@ private ClusterState createClusterState(
Iterable existingIndices,
DiscoveryNodes nodes,
IndexMetadata.State state,
+ IndexVersion indexVersion,
boolean allShardsAssigned
) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
@@ -436,7 +501,7 @@ private ClusterState createClusterState(
Index index = new Index(indexName, indexName);
IndexMetadata.Builder builder = new IndexMetadata.Builder(indexName);
builder.state(state);
- builder.settings(indexSettings(IndexVersion.current(), 1, 1).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()));
+ builder.settings(indexSettings(indexVersion, 1, 1).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()));
builder.putMapping(
new MappingMetadata(
MapperService.SINGLE_MAPPING_NAME,
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java
index 7b7585cacf733..0cc37f7ed3945 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java
@@ -14,7 +14,6 @@
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
@@ -1309,12 +1308,11 @@ private InternalAggregation doQuery(
indexWriter.close();
DirectoryReader indexReader = DirectoryReader.open(directory);
- IndexSearcher indexSearcher = newIndexSearcher(indexReader);
- try (AggregationContext context = createAggregationContext(indexSearcher, query, fieldType)) {
+ try (AggregationContext context = createAggregationContext(indexReader, query, fieldType)) {
Aggregator aggregator = createAggregator(aggBuilder, context);
aggregator.preCollection();
- indexSearcher.search(query, aggregator.asCollector());
+ context.searcher().search(query, aggregator.asCollector());
aggregator.postCollection();
return aggregator.buildTopLevel();
} finally {
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java
index 5d8aa420a9d74..d680752efc498 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java
@@ -11,7 +11,6 @@
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.index.RandomIndexWriter;
import org.elasticsearch.action.index.IndexRequest;
@@ -89,7 +88,6 @@ public void testMissingFields() throws IOException {
indexWriter.close();
DirectoryReader indexReader = DirectoryReader.open(directory);
- IndexSearcher indexSearcher = newIndexSearcher(indexReader);
DateFieldMapper.DateFieldType timestampFieldType = new DateFieldMapper.DateFieldType(timestampField);
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(valueField, NumberFieldMapper.NumberType.LONG);
@@ -108,7 +106,7 @@ public void testMissingFields() throws IOException {
metricAgg.forEach(compositeBuilder::subAggregation);
CompositeAggregation composite = searchAndReduce(
- indexSearcher,
+ indexReader,
new AggTestConfig(compositeBuilder, timestampFieldType, valueFieldType)
);
indexReader.close();
@@ -148,7 +146,6 @@ public void testCorrectFields() throws IOException {
indexWriter.close();
DirectoryReader indexReader = DirectoryReader.open(directory);
- IndexSearcher indexSearcher = newIndexSearcher(indexReader);
DateFieldMapper.DateFieldType timestampFieldType = new DateFieldMapper.DateFieldType(timestampField);
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(valueField, NumberFieldMapper.NumberType.LONG);
@@ -169,7 +166,7 @@ public void testCorrectFields() throws IOException {
metricAgg.forEach(compositeBuilder::subAggregation);
CompositeAggregation composite = searchAndReduce(
- indexSearcher,
+ indexReader,
new AggTestConfig(compositeBuilder, timestampFieldType, valueFieldType)
);
indexReader.close();
@@ -206,7 +203,6 @@ public void testNumericTerms() throws IOException {
indexWriter.close();
DirectoryReader indexReader = DirectoryReader.open(directory);
- IndexSearcher indexSearcher = newIndexSearcher(indexReader);
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(valueField, NumberFieldMapper.NumberType.LONG);
@@ -221,7 +217,7 @@ public void testNumericTerms() throws IOException {
List metricAgg = createAggregationBuilders(singletonList(metricConfig));
metricAgg.forEach(compositeBuilder::subAggregation);
- CompositeAggregation composite = searchAndReduce(indexSearcher, new AggTestConfig(compositeBuilder, valueFieldType));
+ CompositeAggregation composite = searchAndReduce(indexReader, new AggTestConfig(compositeBuilder, valueFieldType));
indexReader.close();
directory.close();
@@ -259,7 +255,6 @@ public void testEmptyCounts() throws IOException {
indexWriter.close();
DirectoryReader indexReader = DirectoryReader.open(directory);
- IndexSearcher indexSearcher = newIndexSearcher(indexReader);
DateFieldMapper.DateFieldType timestampFieldType = new DateFieldMapper.DateFieldType(timestampField);
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(valueField, NumberFieldMapper.NumberType.LONG);
@@ -279,7 +274,7 @@ public void testEmptyCounts() throws IOException {
metricAgg.forEach(compositeBuilder::subAggregation);
CompositeAggregation composite = searchAndReduce(
- indexSearcher,
+ indexReader,
new AggTestConfig(compositeBuilder, timestampFieldType, valueFieldType)
);
indexReader.close();
@@ -449,7 +444,6 @@ public void testMissingBuckets() throws IOException {
indexWriter.close();
DirectoryReader indexReader = DirectoryReader.open(directory);
- IndexSearcher indexSearcher = newIndexSearcher(indexReader);
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(valueField, NumberFieldMapper.NumberType.LONG);
MappedFieldType metricFieldType = new NumberFieldMapper.NumberFieldType(metricField, NumberFieldMapper.NumberType.LONG);
@@ -465,10 +459,7 @@ public void testMissingBuckets() throws IOException {
List metricAgg = createAggregationBuilders(singletonList(metricConfig));
metricAgg.forEach(compositeBuilder::subAggregation);
- CompositeAggregation composite = searchAndReduce(
- indexSearcher,
- new AggTestConfig(compositeBuilder, valueFieldType, metricFieldType)
- );
+ CompositeAggregation composite = searchAndReduce(indexReader, new AggTestConfig(compositeBuilder, valueFieldType, metricFieldType));
indexReader.close();
directory.close();
@@ -516,7 +507,6 @@ public void testTimezone() throws IOException {
indexWriter.close();
DirectoryReader indexReader = DirectoryReader.open(directory);
- IndexSearcher indexSearcher = newIndexSearcher(indexReader);
DateFieldMapper.DateFieldType timestampFieldType = new DateFieldMapper.DateFieldType(timestampField);
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(valueField, NumberFieldMapper.NumberType.LONG);
@@ -539,7 +529,7 @@ public void testTimezone() throws IOException {
metricAgg.forEach(compositeBuilder::subAggregation);
CompositeAggregation composite = searchAndReduce(
- indexSearcher,
+ indexReader,
new AggTestConfig(compositeBuilder, timestampFieldType, valueFieldType)
);
indexReader.close();
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java
index c0e1053d008e8..d2b1a1d7e6772 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java
@@ -13,7 +13,6 @@
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.analysis.MockAnalyzer;
@@ -682,7 +681,6 @@ private void executeTestCase(
Map fieldTypeLookup = createFieldTypes(config);
Directory dir = index(docs, fieldTypeLookup);
IndexReader reader = DirectoryReader.open(dir);
- IndexSearcher searcher = newSearcher(reader);
String dateHistoField = config.getGroupConfig().getDateHistogram().getField();
final ThreadPool threadPool = new TestThreadPool(getTestName());
@@ -691,7 +689,7 @@ private void executeTestCase(
final SyncRollupIndexer action = new SyncRollupIndexer(
threadPool,
job,
- searcher,
+ reader,
fieldTypeLookup.values().toArray(new MappedFieldType[0]),
fieldTypeLookup.get(dateHistoField)
);
@@ -792,7 +790,7 @@ private Directory index(List