diff --git a/Vagrantfile b/Vagrantfile index 0e94a849350e3..c8eba2bca4a6d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -69,7 +69,7 @@ Vagrant.configure(2) do |config| config.vm.box = 'elastic/debian-8-x86_64' deb_common config, box, extra: <<-SHELL # this sometimes gets a bad ip, and doesn't appear to be needed - rm /etc/apt/sources.list.d/http_debian_net_debian.list + rm -f /etc/apt/sources.list.d/http_debian_net_debian.list SHELL end end @@ -256,10 +256,6 @@ def linux_common(config, touch /is_vagrant_vm # for consistency between linux and windows SHELL - config.vm.provision 'jdk-11', type: 'shell', inline: <<-SHELL - curl -sSL https://download.oracle.com/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz | tar xz -C /opt/ - SHELL - # This prevents leftovers from previous tests using the # same VM from messing up the current test config.vm.provision 'clean es installs in tmp', run: 'always', type: 'shell', inline: <<-SHELL @@ -355,11 +351,10 @@ def sh_install_deps(config, return 1 } cat \<\ /etc/profile.d/java_home.sh -if [ -z "\\\$JAVA_HOME" ]; then - export JAVA_HOME=/opt/jdk-11.0.2 +if [ ! -z "\\\$JAVA_HOME" ]; then + export SYSTEM_JAVA_HOME=\\\$JAVA_HOME + unset JAVA_HOME fi -export SYSTEM_JAVA_HOME=\\\$JAVA_HOME -unset JAVA_HOME JAVA ensure tar ensure curl @@ -416,16 +411,9 @@ def windows_common(config, name) $ps_prompt | Out-File $PsHome/Microsoft.PowerShell_profile.ps1 SHELL - config.vm.provision 'windows-jdk-11', type: 'shell', inline: <<-SHELL - New-Item -ItemType Directory -Force -Path "C:/java" - Invoke-WebRequest "https://download.oracle.com/java/GA/jdk11/9/GPL/openjdk-11.0.2_windows-x64_bin.zip" -OutFile "C:/java/jdk-11.zip" - Expand-Archive -Path "C:/java/jdk-11.zip" -DestinationPath "C:/java/" - SHELL - config.vm.provision 'set env variables', type: 'shell', inline: <<-SHELL $ErrorActionPreference = "Stop" [Environment]::SetEnvironmentVariable("PACKAGING_ARCHIVES", "C:/project/build/packaging/archives", "Machine") - [Environment]::SetEnvironmentVariable("SYSTEM_JAVA_HOME", "C:\java\jdk-11.0.2", "Machine") [Environment]::SetEnvironmentVariable("PACKAGING_TESTS", "C:/project/build/packaging/tests", "Machine") [Environment]::SetEnvironmentVariable("JAVA_HOME", $null, "Machine") SHELL diff --git a/build.gradle b/build.gradle index bb75439bcae4e..8794a1f930523 100644 --- a/build.gradle +++ b/build.gradle @@ -619,21 +619,6 @@ allprojects { } } -subprojects { - // Common config when running with a FIPS-140 runtime JVM - if (project.ext.has("inFipsJvm") && project.ext.inFipsJvm) { - tasks.withType(Test) { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - project.pluginManager.withPlugin("elasticsearch.testclusters") { - project.testClusters.all { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - } - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 57a35052a3e80..51300ffc628c9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -116,6 +116,22 @@ class BuildPlugin implements Plugin { configureTestTasks(project) configurePrecommit(project) configureDependenciesInfo(project) + + // Common config when running with a FIPS-140 runtime JVM + // Need to do it here to support external plugins + if (project.ext.inFipsJvm) { + project.tasks.withType(Test) { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + project.pluginManager.withPlugin("elasticsearch.testclusters") { + project.testClusters.all { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + } + } + } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy index 110f2fc7e8461..af5d328dc0cad 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy @@ -27,15 +27,15 @@ import org.gradle.api.tasks.Input public class BatsOverVagrantTask extends VagrantCommandTask { @Input - String remoteCommand + Object remoteCommand BatsOverVagrantTask() { command = 'ssh' } - void setRemoteCommand(String remoteCommand) { + void setRemoteCommand(Object remoteCommand) { this.remoteCommand = Objects.requireNonNull(remoteCommand) - setArgs(['--command', remoteCommand]) + setArgs((Iterable) ['--command', remoteCommand]) } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 8eb200cd19b45..71c9d53467502 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -1,11 +1,18 @@ package org.elasticsearch.gradle.vagrant import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.BwcVersions import org.elasticsearch.gradle.FileContentsTask +import org.elasticsearch.gradle.Jdk +import org.elasticsearch.gradle.JdkDownloadPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.BwcVersions -import org.gradle.api.* +import org.gradle.api.GradleException +import org.gradle.api.InvalidUserDataException +import org.gradle.api.NamedDomainObjectContainer +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.Task import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency @@ -15,6 +22,8 @@ import org.gradle.api.tasks.Exec import org.gradle.api.tasks.StopExecutionException import org.gradle.api.tasks.TaskState +import java.nio.file.Paths + import static java.util.Collections.unmodifiableList class VagrantTestPlugin implements Plugin { @@ -85,8 +94,33 @@ class VagrantTestPlugin implements Plugin { /** extra env vars to pass to vagrant for box configuration **/ Map vagrantBoxEnvVars = [:] + private static final String SYSTEM_JDK_VERSION = "11.0.2+9" + private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691" + private Jdk linuxSystemJdk; + private Jdk linuxGradleJdk; + private Jdk windowsSystemJdk; + private Jdk windowsGradleJdk; + @Override void apply(Project project) { + project.pluginManager.apply(JdkDownloadPlugin.class) + NamedDomainObjectContainer jdksContainer = (NamedDomainObjectContainer) project.getExtensions().getByName("jdks"); + linuxSystemJdk = jdksContainer.create("linux_system") { + version = SYSTEM_JDK_VERSION + platform = "linux" + } + linuxGradleJdk = jdksContainer.create("linux_gradle") { + version = GRADLE_JDK_VERSION + platform = "linux" + } + windowsSystemJdk = jdksContainer.create("windows_system") { + version = SYSTEM_JDK_VERSION + platform = "windows" + } + windowsGradleJdk = jdksContainer.create("windows_gradle") { + version = GRADLE_JDK_VERSION + platform = "windows" + } collectAvailableBoxes(project) @@ -264,7 +298,7 @@ class VagrantTestPlugin implements Plugin { } } - private static void createPrepareVagrantTestEnvTask(Project project) { + private void createPrepareVagrantTestEnvTask(Project project) { File packagingDir = new File(project.buildDir, PACKAGING_CONFIGURATION) File archivesDir = new File(packagingDir, 'archives') @@ -280,7 +314,7 @@ class VagrantTestPlugin implements Plugin { } Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) { - dependsOn copyPackagingTests + dependsOn copyPackagingTests, linuxGradleJdk, linuxSystemJdk file "${testsDir}/run-tests.sh" contents """\ if [ "\$#" -eq 0 ]; then @@ -288,11 +322,15 @@ class VagrantTestPlugin implements Plugin { else test_args=( "\$@" ) fi - "\$SYSTEM_JAVA_HOME"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" + + if [ -z "\$SYSTEM_JAVA_HOME" ]; then + export SYSTEM_JAVA_HOME="${-> convertPath(project, linuxSystemJdk.toString()) }" + fi + "${-> convertPath(project, linuxGradleJdk.toString()) }"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" """ } Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) { - dependsOn copyPackagingTests + dependsOn copyPackagingTests, windowsGradleJdk, windowsSystemJdk file "${testsDir}/run-tests.ps1" // the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely // a little trappy for those unfamiliar with powershell @@ -302,7 +340,8 @@ class VagrantTestPlugin implements Plugin { } else { \$testArgs = \$args } - & "\$Env:SYSTEM_JAVA_HOME"/bin/java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs + \$Env:SYSTEM_JAVA_HOME = "${-> convertPath(project, windowsSystemJdk.toString()) }" + & "${-> convertPath(project, windowsGradleJdk.toString()) }"/bin/java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs exit \$LASTEXITCODE """ } @@ -539,10 +578,10 @@ class VagrantTestPlugin implements Plugin { if (LINUX_BOXES.contains(box)) { Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { - remoteCommand BATS_TEST_COMMAND + remoteCommand "export SYSTEM_JAVA_HOME=\"${-> convertPath(project, linuxSystemJdk.toString())}\"; " + BATS_TEST_COMMAND boxName box environmentVars vagrantEnvVars - dependsOn up, setupPackagingTest + dependsOn up, setupPackagingTest, linuxSystemJdk finalizedBy halt } @@ -617,4 +656,9 @@ class VagrantTestPlugin implements Plugin { } } } + + // convert the given path from an elasticsearch repo path to a VM path + private String convertPath(Project project, String path) { + return "/elasticsearch/" + project.rootDir.toPath().relativize(Paths.get(path)); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index 8ec979420c0e4..e73a9d1e585e3 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -28,11 +28,13 @@ import org.gradle.api.file.FileTree; import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Classpath; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; -import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.PathSensitive; import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; @@ -45,6 +47,7 @@ import java.net.URISyntaxException; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.util.Arrays; import java.util.Collections; import java.util.Set; @@ -113,7 +116,7 @@ public void setJavaHome(String javaHome) { this.javaHome = javaHome; } - @OutputDirectory + @Internal public File getJarExpandDir() { return new File( new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), @@ -121,6 +124,11 @@ public File getJarExpandDir() { ); } + @OutputFile + public File getSuccessMarker() { + return new File(getProject().getBuildDir(), "markers/" + getName()); + } + public void ignoreMissingClasses(String... classesOrPackages) { if (classesOrPackages.length == 0) { missingClassExcludes = null; @@ -157,8 +165,7 @@ public Set getMissingClassExcludes() { return missingClassExcludes; } - @InputFiles - @PathSensitive(PathSensitivity.NAME_ONLY) + @Classpath @SkipWhenEmpty public Set getJarsToScan() { // These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files, @@ -241,6 +248,10 @@ public void runThirdPartyAudit() throws IOException { } assertNoJarHell(jdkJarHellClasses); + + // Mark successful third party audit check + getSuccessMarker().getParentFile().mkdirs(); + Files.write(getSuccessMarker().toPath(), new byte[]{}); } private void logForbiddenAPIsOutput(String forbiddenApisOutput) { diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java new file mode 100644 index 0000000000000..d3101868e84b6 --- /dev/null +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +/** + * An outputstream to a File that is lazily opened on the first write. + */ +class LazyFileOutputStream extends OutputStream { + private OutputStream delegate; + + LazyFileOutputStream(File file) { + // use an initial dummy delegate to avoid doing a conditional on every write + this.delegate = new OutputStream() { + private void bootstrap() throws IOException { + file.getParentFile().mkdirs(); + delegate = new FileOutputStream(file); + } + @Override + public void write(int b) throws IOException { + bootstrap(); + delegate.write(b); + } + @Override + public void write(byte b[], int off, int len) throws IOException { + bootstrap(); + delegate.write(b, off, len); + } + }; + } + + @Override + public void write(int b) throws IOException { + delegate.write(b); + } + + @Override + public void write(byte b[], int off, int len) throws IOException { + delegate.write(b, off, len); + } + + @Override + public void close() throws IOException { + delegate.close(); + } +} diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java index 8dd59170039eb..c71b7ba183562 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java @@ -3,14 +3,22 @@ import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; +import org.gradle.api.logging.Logger; import org.gradle.api.tasks.Exec; +import org.gradle.api.tasks.Internal; import org.gradle.process.BaseExecSpec; import org.gradle.process.ExecResult; import org.gradle.process.ExecSpec; import org.gradle.process.JavaExecSpec; import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.function.Consumer; import java.util.function.Function; /** @@ -19,35 +27,56 @@ @SuppressWarnings("unchecked") public class LoggedExec extends Exec { + private Consumer outputLogger; + public LoggedExec() { - ByteArrayOutputStream output = new ByteArrayOutputStream(); - ByteArrayOutputStream error = new ByteArrayOutputStream(); + if (getLogger().isInfoEnabled() == false) { - setStandardOutput(output); - setErrorOutput(error); setIgnoreExitValue(true); - doLast((unused) -> { - if (getExecResult().getExitValue() != 0) { - try { - getLogger().error("Standard output:"); - getLogger().error(output.toString("UTF-8")); - getLogger().error("Standard error:"); - getLogger().error(error.toString("UTF-8")); - } catch (UnsupportedEncodingException e) { - throw new GradleException("Failed to read exec output", e); - } - throw new GradleException( - String.format( - "Process '%s %s' finished with non-zero exit value %d", - getExecutable(), - getArgs(), - getExecResult().getExitValue() - ) - ); + setSpoolOutput(false); + doLast(task -> { + if (getExecResult().getExitValue() != 0) { + try { + getLogger().error("Output for " + getExecutable() + ":"); + outputLogger.accept(getLogger()); + } catch (Exception e) { + throw new GradleException("Failed to read exec output", e); + } + throw new GradleException( + String.format( + "Process '%s %s' finished with non-zero exit value %d", + getExecutable(), + getArgs(), + getExecResult().getExitValue() + ) + ); + } + }); + } + } + + @Internal + public void setSpoolOutput(boolean spoolOutput) { + final OutputStream out; + if (spoolOutput) { + File spoolFile = new File(getProject().getBuildDir() + "/buffered-output/" + this.getName()); + out = new LazyFileOutputStream(spoolFile); + outputLogger = logger -> { + try { + // the file may not exist if the command never output anything + if (Files.exists(spoolFile.toPath())) { + Files.lines(spoolFile.toPath()).forEach(logger::error); } + } catch (IOException e) { + throw new RuntimeException("could not log", e); } - ); + }; + } else { + out = new ByteArrayOutputStream(); + outputLogger = logger -> logger.error(((ByteArrayOutputStream) out).toString(StandardCharsets.UTF_8)); } + setStandardOutput(out); + setErrorOutput(out); } public static ExecResult exec(Project project, Action action) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 99afd0bcbe0ae..7968f4f57cf90 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTestCase { @@ -29,25 +28,19 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe public static final String PROJECT_NAME = "elasticsearch-build-resources"; public void testUpToDateWithSourcesConfigured() { - GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + getGradleRunner(PROJECT_NAME) .withArguments("clean", "-s") - .withPluginClasspath() .build(); - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); - result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskUpToDate(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); @@ -55,10 +48,8 @@ public void testUpToDateWithSourcesConfigured() { } public void testImplicitTaskDependencyCopy() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sampleCopyAll", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); @@ -69,10 +60,8 @@ public void testImplicitTaskDependencyCopy() { } public void testImplicitTaskDependencyInputFileOfOther() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sample", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":sample"); @@ -81,11 +70,12 @@ public void testImplicitTaskDependencyInputFileOfOther() { } public void testIncorrectUsage() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) - .withArguments("noConfigAfterExecution", "-s", "-i") - .withPluginClasspath() - .buildAndFail(); - assertOutputContains("buildResources can't be configured after the task ran"); + assertOutputContains( + getGradleRunner(PROJECT_NAME) + .withArguments("noConfigAfterExecution", "-s", "-i") + .buildAndFail() + .getOutput(), + "buildResources can't be configured after the task ran" + ); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java index e5624a15d92df..d45028d844542 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -2,7 +2,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; /* * Licensed to Elasticsearch under one or more contributor @@ -25,10 +24,8 @@ public class JarHellTaskIT extends GradleIntegrationTestCase { public void testJarHellDetected() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("jarHell")) + BuildResult result = getGradleRunner("jarHell") .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) - .withPluginClasspath() .buildAndFail(); assertTaskFailed(result, ":jarHell"); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f7a0382cec775..46a9194780c2a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -4,8 +4,12 @@ import org.gradle.testkit.runner.BuildTask; import org.gradle.testkit.runner.GradleRunner; import org.gradle.testkit.runner.TaskOutcome; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; @@ -16,6 +20,9 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { + @Rule + public TemporaryFolder testkitTmpDir = new TemporaryFolder(); + protected File getProjectDir(String name) { File root = new File("src/testKit/"); if (root.exists() == false) { @@ -26,9 +33,16 @@ protected File getProjectDir(String name) { } protected GradleRunner getGradleRunner(String sampleProject) { + File testkit; + try { + testkit = testkitTmpDir.newFolder(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } return GradleRunner.create() .withProjectDir(getProjectDir(sampleProject)) - .withPluginClasspath(); + .withPluginClasspath() + .withTestKitDir(testkit); } protected File getBuildDir(String name) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 84b13340c35cf..c9086d1459afd 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,12 +21,21 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; +import org.junit.Before; import org.junit.Ignore; + import java.util.Arrays; public class TestClustersPluginIT extends GradleIntegrationTestCase { + private GradleRunner runner; + + @Before + public void setUp() throws Exception { + runner = getGradleRunner("testclusters"); + } + public void testListClusters() { BuildResult result = getTestClustersRunner("listTestClusters").build(); @@ -190,10 +199,7 @@ private GradleRunner getTestClustersRunner(String... tasks) { arguments[tasks.length] = "-s"; arguments[tasks.length + 1] = "-i"; arguments[tasks.length + 2] = "-Dlocal.repo.path=" + getLocalTestRepoPath(); - return GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments(arguments) - .withPluginClasspath(); + return runner.withArguments(arguments); } private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8f7911574979d..471cb3a705cf5 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ slf4j = 1.6.2 jna = 4.5.1 netty = 4.1.35.Final -joda = 2.10.1 +joda = 2.10.2 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java index c9a34fe5c98d9..6ea3cede0e3f1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java @@ -47,6 +47,8 @@ public class ModelSizeStats implements ToXContentObject { * Field Names */ public static final ParseField MODEL_BYTES_FIELD = new ParseField("model_bytes"); + public static final ParseField MODEL_BYTES_EXCEEDED_FIELD = new ParseField("model_bytes_exceeded"); + public static final ParseField MODEL_BYTES_MEMORY_LIMIT_FIELD = new ParseField("model_bytes_memory_limit"); public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("total_by_field_count"); public static final ParseField TOTAL_OVER_FIELD_COUNT_FIELD = new ParseField("total_over_field_count"); public static final ParseField TOTAL_PARTITION_FIELD_COUNT_FIELD = new ParseField("total_partition_field_count"); @@ -61,6 +63,8 @@ public class ModelSizeStats implements ToXContentObject { static { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); PARSER.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD); + PARSER.declareLong(Builder::setModelBytesExceeded, MODEL_BYTES_EXCEEDED_FIELD); + PARSER.declareLong(Builder::setModelBytesMemoryLimit, MODEL_BYTES_MEMORY_LIMIT_FIELD); PARSER.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD); PARSER.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD); PARSER.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD); @@ -97,6 +101,8 @@ public String toString() { private final String jobId; private final long modelBytes; + private final Long modelBytesExceeded; + private final Long modelBytesMemoryLimit; private final long totalByFieldCount; private final long totalOverFieldCount; private final long totalPartitionFieldCount; @@ -105,11 +111,13 @@ public String toString() { private final Date timestamp; private final Date logTime; - private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount, - long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus, - Date timestamp, Date logTime) { + private ModelSizeStats(String jobId, long modelBytes, Long modelBytesExceeded, Long modelBytesMemoryLimit, long totalByFieldCount, + long totalOverFieldCount, long totalPartitionFieldCount, long bucketAllocationFailuresCount, + MemoryStatus memoryStatus, Date timestamp, Date logTime) { this.jobId = jobId; this.modelBytes = modelBytes; + this.modelBytesExceeded = modelBytesExceeded; + this.modelBytesMemoryLimit = modelBytesMemoryLimit; this.totalByFieldCount = totalByFieldCount; this.totalOverFieldCount = totalOverFieldCount; this.totalPartitionFieldCount = totalPartitionFieldCount; @@ -126,6 +134,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Job.ID.getPreferredName(), jobId); builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes); + if (modelBytesExceeded != null) { + builder.field(MODEL_BYTES_EXCEEDED_FIELD.getPreferredName(), modelBytesExceeded); + } + if (modelBytesMemoryLimit != null) { + builder.field(MODEL_BYTES_MEMORY_LIMIT_FIELD.getPreferredName(), modelBytesMemoryLimit); + } builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount); builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount); builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount); @@ -148,6 +162,14 @@ public long getModelBytes() { return modelBytes; } + public Long getModelBytesExceeded() { + return modelBytesExceeded; + } + + public Long getModelBytesMemoryLimit() { + return modelBytesMemoryLimit; + } + public long getTotalByFieldCount() { return totalByFieldCount; } @@ -188,8 +210,8 @@ public Date getLogTime() { @Override public int hashCode() { - return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return Objects.hash(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } /** @@ -207,7 +229,8 @@ public boolean equals(Object other) { ModelSizeStats that = (ModelSizeStats) other; - return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount + return this.modelBytes == that.modelBytes && Objects.equals(this.modelBytesExceeded, that.modelBytesExceeded) + && Objects.equals(this.modelBytesMemoryLimit, that.modelBytesMemoryLimit) && this.totalByFieldCount == that.totalByFieldCount && this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount && this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount && Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp) @@ -219,6 +242,8 @@ public static class Builder { private final String jobId; private long modelBytes; + private Long modelBytesExceeded; + private Long modelBytesMemoryLimit; private long totalByFieldCount; private long totalOverFieldCount; private long totalPartitionFieldCount; @@ -236,6 +261,8 @@ public Builder(String jobId) { public Builder(ModelSizeStats modelSizeStats) { this.jobId = modelSizeStats.jobId; this.modelBytes = modelSizeStats.modelBytes; + this.modelBytesExceeded = modelSizeStats.modelBytesExceeded; + this.modelBytesMemoryLimit = modelSizeStats.modelBytesMemoryLimit; this.totalByFieldCount = modelSizeStats.totalByFieldCount; this.totalOverFieldCount = modelSizeStats.totalOverFieldCount; this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount; @@ -250,6 +277,16 @@ public Builder setModelBytes(long modelBytes) { return this; } + public Builder setModelBytesExceeded(long modelBytesExceeded) { + this.modelBytesExceeded = modelBytesExceeded; + return this; + } + + public Builder setModelBytesMemoryLimit(long modelBytesMemoryLimit) { + this.modelBytesMemoryLimit = modelBytesMemoryLimit; + return this; + } + public Builder setTotalByFieldCount(long totalByFieldCount) { this.totalByFieldCount = totalByFieldCount; return this; @@ -287,8 +324,8 @@ public Builder setLogTime(Date logTime) { } public ModelSizeStats build() { - return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return new ModelSizeStats(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 1bd49154ee548..40cd6f454cdab 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -72,6 +72,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.oneOf; public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { @@ -264,7 +265,8 @@ public void testStartStop() throws IOException { GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id), client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); - assertEquals(IndexerState.STARTED, statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState()); + IndexerState indexerState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState(); + assertThat(indexerState, is(oneOf(IndexerState.STARTED, IndexerState.INDEXING))); StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java index 092bc254f50fa..34ca5cd2aa448 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -150,11 +150,15 @@ private void addCategoriesIndexRequests(BulkRequest bulkRequest) { private void addModelSnapshotIndexRequests(BulkRequest bulkRequest) { { + // Index a number of model snapshots, one of which contains the new model_size_stats fields + // 'model_bytes_exceeded' and 'model_bytes_memory_limit' that were introduced in 7.2.0. + // We want to verify that we can parse the snapshots whether or not these fields are present. IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX); indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541587919000, " + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", \"snapshot_id\":\"1541587919\"," + "\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," + - "\"model_bytes\":51722, \"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"model_bytes\":51722, \"model_bytes_exceeded\":10762, \"model_bytes_memory_limit\":40960, \"total_by_field_count\":3, " + + "\"total_over_field_count\":0, \"total_partition_field_count\":2," + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000," + " \"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000, \"latest_result_time_stamp\":1519930800000," + " \"retain\":false }", XContentType.JSON); @@ -223,6 +227,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -241,6 +247,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -259,6 +267,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -288,6 +298,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -306,6 +318,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -324,6 +338,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -353,6 +369,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -383,6 +401,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -402,6 +422,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -430,6 +452,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -470,6 +494,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -488,6 +514,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -517,6 +545,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java index 24925e819a443..4a5cd2056655a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java @@ -38,7 +38,7 @@ protected org.elasticsearch.action.main.MainResponse createServerTestInstance() ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); final String date = new Date(randomNonNegativeLong()).toString(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_1, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); Build build = new Build( Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(), version.toString() diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java index 4a12a75f2b17d..8c43feb545a26 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java @@ -31,6 +31,8 @@ public class ModelSizeStatsTests extends AbstractXContentTestCase - return tasks.create(name: "$name", type: Exec) { + return tasks.create(name: "$name", type: LoggedExec) { dependsOn checkoutBwcBranch, writeBuildMetadata + spoolOutput = true workingDir = checkoutDir doFirst { // Execution time so that the checkouts are available diff --git a/docs/painless/index.asciidoc b/docs/painless/index.asciidoc index 92e0a33bf1347..c41899bbd98da 100644 --- a/docs/painless/index.asciidoc +++ b/docs/painless/index.asciidoc @@ -3,7 +3,7 @@ include::../Versions.asciidoc[] -include::painless-getting-started.asciidoc[] +include::painless-guide.asciidoc[] include::painless-lang-spec.asciidoc[] diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index 7c342a3da7a5a..ccc9e3ac4db24 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -54,6 +54,4 @@ specialized code may define new ways to use a Painless script. | {xpack-ref}/transform-script.html[Elasticsearch Documentation] |==== -include::painless-contexts/painless-context-examples.asciidoc[] - include::painless-contexts/index.asciidoc[] diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc index 0c8c21c06a9be..11b4c9993374e 100644 --- a/docs/painless/painless-contexts/index.asciidoc +++ b/docs/painless/painless-contexts/index.asciidoc @@ -1,3 +1,5 @@ +include::painless-context-examples.asciidoc[] + include::painless-ingest-processor-context.asciidoc[] include::painless-update-context.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc b/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc index 07914b671e781..3edb1080611d2 100644 --- a/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc +++ b/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc @@ -40,4 +40,4 @@ analysis chain matches a predicate. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc index 5a5306016945d..f6e2a4b7a5a91 100644 --- a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc @@ -18,7 +18,7 @@ numeric:: ==== API -The standard <> is available. +The standard <> is available. ==== Example diff --git a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc index 69fbce1d0828f..2d854c880cdcd 100644 --- a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc @@ -19,6 +19,10 @@ boolean:: ==== API +The standard <> is available. + +==== Example + To run this example, first follow the steps in <>. diff --git a/docs/painless/painless-contexts/painless-field-context.asciidoc b/docs/painless/painless-contexts/painless-field-context.asciidoc index 15a9f4255232c..5a95e88c68460 100644 --- a/docs/painless/painless-contexts/painless-field-context.asciidoc +++ b/docs/painless/painless-contexts/painless-field-context.asciidoc @@ -25,7 +25,8 @@ a customized value for each document in the results of a query. *API* -The standard <> is available. +Both the standard <> and +<> are available. *Example* diff --git a/docs/painless/painless-contexts/painless-filter-context.asciidoc b/docs/painless/painless-contexts/painless-filter-context.asciidoc index bf4741cfc02fc..eea810f616291 100644 --- a/docs/painless/painless-contexts/painless-filter-context.asciidoc +++ b/docs/painless/painless-contexts/painless-filter-context.asciidoc @@ -23,7 +23,7 @@ query to include and exclude documents. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc index 546057ab1a0b8..858949deb5602 100644 --- a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc +++ b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc @@ -38,7 +38,8 @@ void:: *API* -The standard <> is available. +Both the standard <> and +<> are available. *Example* diff --git a/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc index 5cc9ad8ecbb93..2d5edf6ab4cd8 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc @@ -24,4 +24,4 @@ optional as part of a full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc index 8c0fddfa33961..78ebac79c65ee 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc @@ -29,4 +29,4 @@ full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc index a34308aa93887..485d4da8439d8 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc @@ -44,4 +44,4 @@ part of a full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc index b492207ef4468..ba6b6dabdc924 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc @@ -25,4 +25,4 @@ specified) and is optional as part of a full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc index cd476481381a6..896e882c7837d 100644 --- a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc +++ b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc @@ -25,7 +25,7 @@ results. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-reindex-context.asciidoc b/docs/painless/painless-contexts/painless-reindex-context.asciidoc index ae5445183a6ad..54791f2fa50db 100644 --- a/docs/painless/painless-contexts/painless-reindex-context.asciidoc +++ b/docs/painless/painless-contexts/painless-reindex-context.asciidoc @@ -65,4 +65,4 @@ reindexed into a target index. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-score-context.asciidoc b/docs/painless/painless-contexts/painless-score-context.asciidoc index 2bec9021c1720..e5d3c538b4512 100644 --- a/docs/painless/painless-contexts/painless-score-context.asciidoc +++ b/docs/painless/painless-contexts/painless-score-context.asciidoc @@ -26,7 +26,8 @@ score to documents returned from a query. *API* -The standard <> is available. +Both the standard <> and +<> are available. *Example* diff --git a/docs/painless/painless-contexts/painless-similarity-context.asciidoc b/docs/painless/painless-contexts/painless-similarity-context.asciidoc index 98eff19a1943e..e48da21195dd7 100644 --- a/docs/painless/painless-contexts/painless-similarity-context.asciidoc +++ b/docs/painless/painless-contexts/painless-similarity-context.asciidoc @@ -56,4 +56,4 @@ uses synonyms. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-sort-context.asciidoc b/docs/painless/painless-contexts/painless-sort-context.asciidoc index 64c17ad07a664..4a7743dc48800 100644 --- a/docs/painless/painless-contexts/painless-sort-context.asciidoc +++ b/docs/painless/painless-contexts/painless-sort-context.asciidoc @@ -25,7 +25,7 @@ Use a Painless script to *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc index ba42105f2e901..c9e72ac5b9288 100644 --- a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc @@ -51,7 +51,7 @@ result of query. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc index 6ed8c2f7c13a3..a83bf47de1f78 100644 --- a/docs/painless/painless-contexts/painless-update-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-context.asciidoc @@ -52,7 +52,7 @@ add, modify, or delete fields within a single document. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc index 91ab51561ef88..8e4924d426b0c 100644 --- a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc @@ -14,7 +14,7 @@ include::painless-watcher-context-variables.asciidoc[] *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc b/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc index addfd11cab92e..71009d819a42d 100644 --- a/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc @@ -33,8 +33,7 @@ The following variables are available in all watcher contexts. *API* - -The standard <> is available. +The standard <> is available. To run this example, first follow the steps in <>. diff --git a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc index 92012720aa69e..ec0ac6519a44f 100644 --- a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc @@ -14,7 +14,7 @@ include::painless-watcher-context-variables.asciidoc[] *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-weight-context.asciidoc b/docs/painless/painless-contexts/painless-weight-context.asciidoc index 9b4a47bc113b4..44438a1225ea6 100644 --- a/docs/painless/painless-contexts/painless-weight-context.asciidoc +++ b/docs/painless/painless-contexts/painless-weight-context.asciidoc @@ -39,4 +39,4 @@ Queries that contain multiple terms calculate a separate weight for each term. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-description.asciidoc b/docs/painless/painless-guide.asciidoc similarity index 56% rename from docs/painless/painless-description.asciidoc rename to docs/painless/painless-guide.asciidoc index dfaf66ca26d4b..5e926498088ab 100644 --- a/docs/painless/painless-description.asciidoc +++ b/docs/painless/painless-guide.asciidoc @@ -1,11 +1,14 @@ +[[painless-guide]] +== Painless Guide + _Painless_ is a simple, secure scripting language designed specifically for use with Elasticsearch. It is the default scripting language for Elasticsearch and -can safely be used for inline and stored scripts. For a detailed description of -the Painless syntax and language features, see the -{painless}/painless-lang-spec.html[Painless Language Specification]. +can safely be used for inline and stored scripts. For a jump start into +Painless, see <>. For a +detailed description of the Painless syntax and language features, see the +<>. -[[painless-features]] -You can use Painless anywhere scripts can be used in Elasticsearch. Painless +You can use Painless anywhere scripts are used in Elasticsearch. Painless provides: * Fast performance: Painless scripts https://benchmarks.elastic.co/index.html#search_qps_scripts[ @@ -18,7 +21,9 @@ complete list of available classes and methods. * Optional typing: Variables and parameters can use explicit types or the dynamic `def` type. -* Syntax: Extends Java's syntax to provide http://groovy-lang.org/index.html[ -Groovy-style] scripting language features that make scripts easier to write. +* Syntax: Extends a subset of Java's syntax to provide additional scripting +language features. * Optimizations: Designed specifically for Elasticsearch scripting. + +include::painless-guide/index.asciidoc[] \ No newline at end of file diff --git a/docs/painless/painless-guide/index.asciidoc b/docs/painless/painless-guide/index.asciidoc new file mode 100644 index 0000000000000..b45406a4e7273 --- /dev/null +++ b/docs/painless/painless-guide/index.asciidoc @@ -0,0 +1,7 @@ +include::painless-walkthrough.asciidoc[] + +include::painless-method-dispatch.asciidoc[] + +include::painless-debugging.asciidoc[] + +include::painless-execute-script.asciidoc[] diff --git a/docs/painless/painless-debugging.asciidoc b/docs/painless/painless-guide/painless-debugging.asciidoc similarity index 100% rename from docs/painless/painless-debugging.asciidoc rename to docs/painless/painless-guide/painless-debugging.asciidoc diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-guide/painless-execute-script.asciidoc similarity index 100% rename from docs/painless/painless-execute-script.asciidoc rename to docs/painless/painless-guide/painless-execute-script.asciidoc diff --git a/docs/painless/painless-guide/painless-method-dispatch.asciidoc b/docs/painless/painless-guide/painless-method-dispatch.asciidoc new file mode 100644 index 0000000000000..0f7d0423174b5 --- /dev/null +++ b/docs/painless/painless-guide/painless-method-dispatch.asciidoc @@ -0,0 +1,30 @@ +[[modules-scripting-painless-dispatch]] +=== How painless dispatches functions + +Painless uses receiver, name, and https://en.wikipedia.org/wiki/Arity[arity] +for method dispatch. For example, `s.foo(a, b)` is resolved by first getting +the class of `s` and then looking up the method `foo` with two parameters. This +is different from Groovy which uses the +https://en.wikipedia.org/wiki/Multiple_dispatch[runtime types] of the +parameters and Java which uses the compile time types of the parameters. + +The consequence of this that Painless doesn't support overloaded methods like +Java, leading to some trouble when it whitelists classes from the Java +standard library. For example, in Java and Groovy, `Matcher` has two methods: +`group(int)` and `group(String)`. Painless can't whitelist both of these methods +because they have the same name and the same number of parameters. So instead it +has `group(int)` and `namedGroup(String)`. + +We have a few justifications for this different way of dispatching methods: + +1. It makes operating on `def` types simpler and, presumably, faster. Using +receiver, name, and arity means that when Painless sees a call on a `def` object it +can dispatch the appropriate method without having to do expensive comparisons +of the types of the parameters. The same is true for invocations with `def` +typed parameters. +2. It keeps things consistent. It would be genuinely weird for Painless to +behave like Groovy if any `def` typed parameters were involved and Java +otherwise. It'd be slow for it to behave like Groovy all the time. +3. It keeps Painless maintainable. Adding the Java or Groovy like method +dispatch *feels* like it'd add a ton of complexity which'd make maintenance and +other improvements much more difficult. diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-guide/painless-walkthrough.asciidoc similarity index 83% rename from docs/painless/painless-getting-started.asciidoc rename to docs/painless/painless-guide/painless-walkthrough.asciidoc index f562033471e31..70089a08726d2 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-guide/painless-walkthrough.asciidoc @@ -1,10 +1,5 @@ -[[painless-getting-started]] -== Getting Started with Painless - -include::painless-description.asciidoc[] - -[[painless-examples]] -=== Painless Examples +[[painless-walkthrough]] +=== A Brief Painless Walkthrough To illustrate how Painless works, let's load some hockey stats into an Elasticsearch index: @@ -121,7 +116,7 @@ GET hockey/_search [float] -===== Missing values +==== Missing values `doc['field'].value` throws an exception if the field is missing in a document. @@ -198,7 +193,7 @@ POST hockey/_update/1 ==== Dates Date fields are exposed as -`ReadableDateTime`, so they support methods like `getYear`, `getDayOfWeek` +`ZonedDateTime`, so they support methods like `getYear`, `getDayOfWeek` or e.g. getting milliseconds since epoch with `getMillis`. To use these in a script, leave out the `get` prefix and continue with lowercasing the rest of the method name. For example, the following returns every hockey @@ -365,38 +360,3 @@ Note: all of the `_update_by_query` examples above could really do with a {ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient as using any other query because script queries aren't able to use the inverted index to limit the documents that they have to check. - -[[modules-scripting-painless-dispatch]] -=== How painless dispatches functions - -Painless uses receiver, name, and https://en.wikipedia.org/wiki/Arity[arity] -for method dispatch. For example, `s.foo(a, b)` is resolved by first getting -the class of `s` and then looking up the method `foo` with two parameters. This -is different from Groovy which uses the -https://en.wikipedia.org/wiki/Multiple_dispatch[runtime types] of the -parameters and Java which uses the compile time types of the parameters. - -The consequence of this that Painless doesn't support overloaded methods like -Java, leading to some trouble when it whitelists classes from the Java -standard library. For example, in Java and Groovy, `Matcher` has two methods: -`group(int)` and `group(String)`. Painless can't whitelist both of these methods -because they have the same name and the same number of parameters. So instead it -has `group(int)` and `namedGroup(String)`. - -We have a few justifications for this different way of dispatching methods: - -1. It makes operating on `def` types simpler and, presumably, faster. Using -receiver, name, and arity means that when Painless sees a call on a `def` object it -can dispatch the appropriate method without having to do expensive comparisons -of the types of the parameters. The same is true for invocations with `def` -typed parameters. -2. It keeps things consistent. It would be genuinely weird for Painless to -behave like Groovy if any `def` typed parameters were involved and Java -otherwise. It'd be slow for it to behave like Groovy all the time. -3. It keeps Painless maintainable. Adding the Java or Groovy like method -dispatch *feels* like it'd add a ton of complexity which'd make maintenance and -other improvements much more difficult. - -include::painless-debugging.asciidoc[] - -include::painless-execute-script.asciidoc[] diff --git a/docs/painless/painless-lang-spec.asciidoc b/docs/painless/painless-lang-spec.asciidoc index d50f3db2dc0d3..2f108c73732eb 100644 --- a/docs/painless/painless-lang-spec.asciidoc +++ b/docs/painless/painless-lang-spec.asciidoc @@ -17,38 +17,4 @@ into Java Virtual Machine (JVM) byte code and executed against a standard JVM. This specification uses ANTLR4 grammar notation to describe the allowed syntax. However, the actual Painless grammar is more compact than what is shown here. -include::painless-comments.asciidoc[] - -include::painless-keywords.asciidoc[] - -include::painless-literals.asciidoc[] - -include::painless-identifiers.asciidoc[] - -include::painless-variables.asciidoc[] - -include::painless-types.asciidoc[] - -include::painless-casting.asciidoc[] - -include::painless-operators.asciidoc[] - -include::painless-operators-general.asciidoc[] - -include::painless-operators-numeric.asciidoc[] - -include::painless-operators-boolean.asciidoc[] - -include::painless-operators-reference.asciidoc[] - -include::painless-operators-array.asciidoc[] - -include::painless-statements.asciidoc[] - -include::painless-scripts.asciidoc[] - -include::painless-functions.asciidoc[] - -include::painless-lambdas.asciidoc[] - -include::painless-regexes.asciidoc[] +include::painless-lang-spec/index.asciidoc[] \ No newline at end of file diff --git a/docs/painless/painless-lang-spec/index.asciidoc b/docs/painless/painless-lang-spec/index.asciidoc new file mode 100644 index 0000000000000..e75264ff3e4e1 --- /dev/null +++ b/docs/painless/painless-lang-spec/index.asciidoc @@ -0,0 +1,35 @@ +include::painless-comments.asciidoc[] + +include::painless-keywords.asciidoc[] + +include::painless-literals.asciidoc[] + +include::painless-identifiers.asciidoc[] + +include::painless-variables.asciidoc[] + +include::painless-types.asciidoc[] + +include::painless-casting.asciidoc[] + +include::painless-operators.asciidoc[] + +include::painless-operators-general.asciidoc[] + +include::painless-operators-numeric.asciidoc[] + +include::painless-operators-boolean.asciidoc[] + +include::painless-operators-reference.asciidoc[] + +include::painless-operators-array.asciidoc[] + +include::painless-statements.asciidoc[] + +include::painless-scripts.asciidoc[] + +include::painless-functions.asciidoc[] + +include::painless-lambdas.asciidoc[] + +include::painless-regexes.asciidoc[] diff --git a/docs/painless/painless-casting.asciidoc b/docs/painless/painless-lang-spec/painless-casting.asciidoc similarity index 100% rename from docs/painless/painless-casting.asciidoc rename to docs/painless/painless-lang-spec/painless-casting.asciidoc diff --git a/docs/painless/painless-comments.asciidoc b/docs/painless/painless-lang-spec/painless-comments.asciidoc similarity index 100% rename from docs/painless/painless-comments.asciidoc rename to docs/painless/painless-lang-spec/painless-comments.asciidoc diff --git a/docs/painless/painless-functions.asciidoc b/docs/painless/painless-lang-spec/painless-functions.asciidoc similarity index 100% rename from docs/painless/painless-functions.asciidoc rename to docs/painless/painless-lang-spec/painless-functions.asciidoc diff --git a/docs/painless/painless-identifiers.asciidoc b/docs/painless/painless-lang-spec/painless-identifiers.asciidoc similarity index 100% rename from docs/painless/painless-identifiers.asciidoc rename to docs/painless/painless-lang-spec/painless-identifiers.asciidoc diff --git a/docs/painless/painless-keywords.asciidoc b/docs/painless/painless-lang-spec/painless-keywords.asciidoc similarity index 100% rename from docs/painless/painless-keywords.asciidoc rename to docs/painless/painless-lang-spec/painless-keywords.asciidoc diff --git a/docs/painless/painless-lambdas.asciidoc b/docs/painless/painless-lang-spec/painless-lambdas.asciidoc similarity index 100% rename from docs/painless/painless-lambdas.asciidoc rename to docs/painless/painless-lang-spec/painless-lambdas.asciidoc diff --git a/docs/painless/painless-literals.asciidoc b/docs/painless/painless-lang-spec/painless-literals.asciidoc similarity index 100% rename from docs/painless/painless-literals.asciidoc rename to docs/painless/painless-lang-spec/painless-literals.asciidoc diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-lang-spec/painless-operators-array.asciidoc similarity index 100% rename from docs/painless/painless-operators-array.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-array.asciidoc diff --git a/docs/painless/painless-operators-boolean.asciidoc b/docs/painless/painless-lang-spec/painless-operators-boolean.asciidoc similarity index 100% rename from docs/painless/painless-operators-boolean.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-boolean.asciidoc diff --git a/docs/painless/painless-operators-general.asciidoc b/docs/painless/painless-lang-spec/painless-operators-general.asciidoc similarity index 100% rename from docs/painless/painless-operators-general.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-general.asciidoc diff --git a/docs/painless/painless-operators-numeric.asciidoc b/docs/painless/painless-lang-spec/painless-operators-numeric.asciidoc similarity index 100% rename from docs/painless/painless-operators-numeric.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-numeric.asciidoc diff --git a/docs/painless/painless-operators-reference.asciidoc b/docs/painless/painless-lang-spec/painless-operators-reference.asciidoc similarity index 100% rename from docs/painless/painless-operators-reference.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-reference.asciidoc diff --git a/docs/painless/painless-operators.asciidoc b/docs/painless/painless-lang-spec/painless-operators.asciidoc similarity index 100% rename from docs/painless/painless-operators.asciidoc rename to docs/painless/painless-lang-spec/painless-operators.asciidoc diff --git a/docs/painless/painless-regexes.asciidoc b/docs/painless/painless-lang-spec/painless-regexes.asciidoc similarity index 100% rename from docs/painless/painless-regexes.asciidoc rename to docs/painless/painless-lang-spec/painless-regexes.asciidoc diff --git a/docs/painless/painless-scripts.asciidoc b/docs/painless/painless-lang-spec/painless-scripts.asciidoc similarity index 100% rename from docs/painless/painless-scripts.asciidoc rename to docs/painless/painless-lang-spec/painless-scripts.asciidoc diff --git a/docs/painless/painless-statements.asciidoc b/docs/painless/painless-lang-spec/painless-statements.asciidoc similarity index 100% rename from docs/painless/painless-statements.asciidoc rename to docs/painless/painless-lang-spec/painless-statements.asciidoc diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-lang-spec/painless-types.asciidoc similarity index 100% rename from docs/painless/painless-types.asciidoc rename to docs/painless/painless-lang-spec/painless-types.asciidoc diff --git a/docs/painless/painless-variables.asciidoc b/docs/painless/painless-lang-spec/painless-variables.asciidoc similarity index 100% rename from docs/painless/painless-variables.asciidoc rename to docs/painless/painless-lang-spec/painless-variables.asciidoc diff --git a/docs/painless/painless-xref.asciidoc b/docs/painless/painless-xref.asciidoc deleted file mode 100644 index 86407b3e697d6..0000000000000 --- a/docs/painless/painless-xref.asciidoc +++ /dev/null @@ -1,2 +0,0 @@ -Ready to start scripting with Painless? See {painless}/painless-getting-started.html[Getting Started with Painless] in the guide to the -{painless}/painless.html[Painless Scripting Language]. \ No newline at end of file diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index d1ea1fad88515..03854fae2f61f 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -59,7 +59,7 @@ ml_autodetect (default distro only) ml_datafeed (default distro only) ml_utility (default distro only) refresh -rollup_indexing (default distro only)` +rollup_indexing (default distro only) search security-token-key (default distro only) snapshot diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index a13ea58c27d3e..e778366aa58b9 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -9,7 +9,6 @@ tasks from the command line: * <> * <> -* <> * <> * <> * <> @@ -21,7 +20,6 @@ tasks from the command line: include::certgen.asciidoc[] include::certutil.asciidoc[] -include::migrate-tool.asciidoc[] include::node-tool.asciidoc[] include::saml-metadata.asciidoc[] include::setup-passwords.asciidoc[] diff --git a/docs/reference/commands/migrate-tool.asciidoc b/docs/reference/commands/migrate-tool.asciidoc deleted file mode 100644 index a1903ac69dacf..0000000000000 --- a/docs/reference/commands/migrate-tool.asciidoc +++ /dev/null @@ -1,110 +0,0 @@ -[role="xpack"] -[testenv="gold+"] -[[migrate-tool]] -== elasticsearch-migrate - -The `elasticsearch-migrate` command migrates existing file-based users and roles -to the native realm. From 5.0 onward, you should use the `native` realm to -manage roles and local users. - - -[float] -=== Synopsis - -[source,shell] --------------------------------------------------- -bin/elasticsearch-migrate -(native (-U, --url ) -[-h, --help] [-E ] -[-n, --users ] [-r, --roles ] -[-u, --username ] [-p, --password ] -[-s, --silent] [-v, --verbose]) --------------------------------------------------- - -[float] -=== Description - -NOTE: When migrating from Shield 2.x, the `elasticsearch-migrate` tool should be -run prior to upgrading to ensure all roles can be migrated as some may be in a -deprecated format that {xpack} cannot read. The `migrate` tool is available in -Shield 2.4.0 and higher. - -The `elasticsearch-migrate` tool loads the existing file-based users and roles -and calls the user and roles APIs to add them to the native realm. You can -migrate all users and roles, or specify the ones you want to migrate. Users and -roles that already exist in the `native` realm are not replaced or -overridden. If the names you specify with the `--users` and `--roles` options -don't exist in the `file` realm, they are skipped. - -[float] -[[migrate-tool-options]] -=== Parameters -The `native` subcommand supports the following options: - -`-E `:: -Configures a setting. - -`-h, --help`:: -Returns all of the command parameters. - -`-n`, `--users`:: -Comma-separated list of the users that you want to migrate. If this parameter is -not specified, all users are migrated. - -`-p`, `--password`:: -Password to use for authentication with {es}. -//TBD: What is the default if this isn't specified? - -`-r`, `--roles`:: -Comma-separated list of the roles that you want to migrate. If this parameter is -not specified, all roles are migrated. - -`-s, --silent`:: Shows minimal output. - -`-U`, `--url`:: -Endpoint URL of the {es} cluster to which you want to migrate the -file-based users and roles. This parameter is required. - -`-u`, `--username`:: -Username to use for authentication with {es}. -//TBD: What is the default if this isn't specified? - -`-v, --verbose`:: Shows verbose output. - -[float] -=== Examples - -Run the `elasticsearch-migrate` tool when {xpack} is installed. For example: - -[source, sh] ----------------------------------------------------------------------- -$ bin/elasticsearch-migrate native -U http://localhost:9200 -u elastic --p x-pack-test-password -n lee,foo -r role1,role2,role3,role4,foo -starting migration of users and roles... -importing users from [/home/es/config/shield/users]... -found existing users: [test_user, joe3, joe2] -migrating user [lee] -{"user":{"created":true}} -no user [foo] found, skipping -importing roles from [/home/es/config/shield/roles.yml]... -found existing roles: [marvel_user, role_query_fields, admin_role, role3, admin, -remote_marvel_agent, power_user, role_new_format_name_array, role_run_as, -logstash, role_fields, role_run_as1, role_new_format, kibana4_server, user, -transport_client, role1.ab, role_query] -migrating role [role1] -{"role":{"created":true}} -migrating role [role2] -{"role":{"created":true}} -role [role3] already exists, skipping -no role [foo] found, skipping -users and roles imported. ----------------------------------------------------------------------- - -Additionally, the `-E` flag can be used to specify additional settings. For example -to specify a different configuration directory, the command would look like: - -[source, sh] ----------------------------------------------------------------------- -$ bin/elasticsearch-migrate native -U http://localhost:9200 -u elastic --p x-pack-test-password -E path.conf=/etc/elasticsearch ----------------------------------------------------------------------- diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index f070d11aa8fb0..ed810a4dac014 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -4,14 +4,15 @@ The `elasticsearch-node` command enables you to perform certain unsafe operations on a node that are only possible while it is shut down. This command allows you to adjust the <> of a node and may be able to -recover some data after a disaster. +recover some data after a disaster or start a node even if it is incompatible +with the data on disk. [float] === Synopsis [source,shell] -------------------------------------------------- -bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster +bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-version [--ordinal ] [-E ] [-h, --help] ([-s, --silent] | [-v, --verbose]) -------------------------------------------------- @@ -19,7 +20,7 @@ bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster [float] === Description -This tool has three modes: +This tool has four modes: * `elasticsearch-node repurpose` can be used to delete unwanted data from a node if it used to be a <> or a @@ -36,6 +37,11 @@ This tool has three modes: cluster bootstrapping was not possible, it also enables you to move nodes into a brand-new cluster. +* `elasticsearch-node override-version` enables you to start up a node + even if the data in the data path was written by an incompatible version of + {es}. This may sometimes allow you to downgrade to an earlier version of + {es}. + [[node-tool-repurpose]] [float] ==== Changing the role of a node @@ -109,6 +115,25 @@ way forward that does not risk data loss, but it may be possible to use the `elasticsearch-node` tool to construct a new cluster that contains some of the data from the failed cluster. +[[node-tool-override-version]] +[float] +==== Bypassing version checks + +The data that {es} writes to disk is designed to be read by the current version +and a limited set of future versions. It cannot generally be read by older +versions, nor by versions that are more than one major version newer. The data +stored on disk includes the version of the node that wrote it, and {es} checks +that it is compatible with this version when starting up. + +In rare circumstances it may be desirable to bypass this check and start up an +{es} node using data that was written by an incompatible version. This may not +work if the format of the stored data has changed, and it is a risky process +because it is possible for the format to change in ways that {es} may +misinterpret, silently leading to data loss. + +To bypass this check, you can use the `elasticsearch-node override-version` +tool to overwrite the version number stored in the data path with the current +version, causing {es} to believe that it is compatible with the on-disk data. [[node-tool-unsafe-bootstrap]] [float] @@ -262,6 +287,9 @@ one-node cluster. `detach-cluster`:: Specifies to unsafely detach this node from its cluster so it can join a different cluster. +`override-version`:: Overwrites the version number stored in the data path so +that a node can start despite being incompatible with the on-disk data. + `--ordinal `:: If there is <> then this specifies which node to target. Defaults to `0`, meaning to use the first node in the data path. @@ -423,3 +451,32 @@ Do you want to proceed? Confirm [y/N] y Node was successfully detached from the cluster ---- + +[float] +==== Bypassing version checks + +Run the `elasticsearch-node override-version` command to overwrite the version +stored in the data path so that a node can start despite being incompatible +with the data stored in the data path: + +[source, txt] +---- +node$ ./bin/elasticsearch-node override-version + + WARNING: Elasticsearch MUST be stopped before running this tool. + +This data path was last written by Elasticsearch version [x.x.x] and may no +longer be compatible with Elasticsearch version [y.y.y]. This tool will bypass +this compatibility check, allowing a version [y.y.y] node to start on this data +path, but a version [y.y.y] node may not be able to read this data or may read +it incorrectly leading to data loss. + +You should not use this tool. Instead, continue to use a version [x.x.x] node +on this data path. If necessary, you can use reindex-from-remote to copy the +data from here into an older cluster. + +Do you want to proceed? + +Confirm [y/N] y +Successfully overwrote this node's metadata to bypass its version compatibility checks. +---- diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/data-frames/apis/get-transform-stats.asciidoc index 85e5001b13a9a..09c383f249488 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/data-frames/apis/get-transform-stats.asciidoc @@ -65,27 +65,35 @@ The API returns the following results: { "id" : "ecommerce_transform", "state" : { + "task_state" : "started", "indexer_state" : "started", - "task_state": "started", - "current_position" : { - "customer_id" : "9" - }, - "generation" : 1 + "checkpoint" : 1, + "progress" : { + "total_docs" : 1220, + "docs_remaining" : 0, + "percent_complete" : 100.0 + } }, "stats" : { - "pages_processed" : 0, - "documents_processed" : 0, - "documents_indexed" : 0, - "trigger_count" : 0, - "index_time_in_ms" : 0, - "index_total" : 0, + "pages_processed" : 2, + "documents_processed" : 1220, + "documents_indexed" : 13, + "trigger_count" : 1, + "index_time_in_ms" : 19, + "index_total" : 1, "index_failures" : 0, - "search_time_in_ms" : 0, - "search_total" : 0, + "search_time_in_ms" : 52, + "search_total" : 2, "search_failures" : 0 + }, + "checkpointing" : { + "current" : { + "timestamp_millis" : 1557474786393 + }, + "operations_behind" : 0 } } ] } ---- -// TESTRESPONSE \ No newline at end of file +// TESTRESPONSE diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/data-frames/apis/get-transform.asciidoc index 85e56aa21cdd1..e2b5c5eccb7da 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/data-frames/apis/get-transform.asciidoc @@ -75,10 +75,20 @@ The API returns the following results: "transforms" : [ { "id" : "ecommerce_transform", - "source" : "kibana_sample_data_ecommerce", - "dest" : "kibana_sample_data_ecommerce_transform", - "query" : { - "match_all" : { } + "source" : { + "index" : [ + "kibana_sample_data_ecommerce" + ], + "query" : { + "term" : { + "geoip.continent_name" : { + "value" : "Asia" + } + } + } + }, + "dest" : { + "index" : "kibana_sample_data_ecommerce_transform" }, "pivot" : { "group_by" : { @@ -95,7 +105,8 @@ The API returns the following results: } } } - } + }, + "description" : "Maximum priced ecommerce data by customer_id in Asia" } ] } diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 222d93dfe4256..f452c38ab4c94 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -15,7 +15,13 @@ Instantiates a {dataframe-transform}. `PUT _data_frame/transforms/` -//===== Description +===== Description + +IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. + Do not put a {dataframe-transform} directly into any + `.data-frame-internal*` indices using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users any + privileges on `.data-frame-internal*` indices. ==== Path Parameters @@ -27,12 +33,12 @@ Instantiates a {dataframe-transform}. ==== Request Body -`source`:: (object) The source configuration, consisting of `index` and optionally +`source` (required):: (object) The source configuration, consisting of `index` and optionally a `query`. -`dest`:: (object) The destination configuration, consisting of `index`. +`dest` (required):: (object) The destination configuration, consisting of `index`. -`pivot`:: Defines the pivot function `group by` fields and the aggregation to +`pivot`:: (object) Defines the pivot function `group by` fields and the aggregation to reduce the data. `description`:: Optional free text description of the data frame transform diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index b085e081b4dd7..e96c262d67bb4 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -982,6 +982,10 @@ Reindex supports <> to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. +NOTE: Reindexing from remote clusters does not support +<> or +<>. + [float] [[docs-reindex-manual-slice]] ==== Manual slicing diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 705fb81b09c8c..6821e583a79dd 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -29,13 +29,11 @@ The data in the translog is only persisted to disk when the translog is ++fsync++ed and committed. In the event of hardware failure, any data written since the previous translog commit will be lost. -By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds -if `index.translog.durability` is set to `async` or if set to `request` -(default) at the end of every <>, <>, -<>, or <> request. More precisely, if set -to `request`, Elasticsearch will only report success of an index, delete, +By default, `index.translog.durability` is set to `request` meaning that Elasticsearch will only report success of an index, delete, update, or bulk request to the client after the translog has been successfully -++fsync++ed and committed on the primary and on every allocated replica. +++fsync++ed and committed on the primary and on every allocated replica. If +`index.translog.durability` is set to `async` then Elasticsearch ++fsync++s +and commits the translog every `index.translog.sync_interval` (defaults to 5 seconds). The following <> per-index settings control the behaviour of the translog: diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 1f8abc5675db9..b1a92222bec59 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -563,7 +563,7 @@ template for all indexes that hold data that needs pre-index processing. [[conditionals-with-regex]] === Conditionals with the Regular Expressions The `if` conditional is implemented as a Painless script, which requires -{painless}//painless-examples.html#modules-scripting-painless-regex[explicit support for regular expressions]. +{painless}//painless-regexes.html[explicit support for regular expressions]. `script.painless.regex.enabled: true` must be set in `elasticsearch.yml` to use regular expressions in the `if` condition. diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc index fcc0a5b22168a..a7cacef8ff017 100644 --- a/docs/reference/migration/migrate_8_0/security.asciidoc +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -25,3 +25,11 @@ The `xpack.security.authz.store.roles.index.cache.max_size` and been removed. These settings have been redundant and deprecated since the 5.2 release of {es}. +[float] +[[migrate-tool-removed]] +==== The `elasticsearch-migrate` tool has been removed + +The `elasticsearch-migrate` tool provided a way to convert file +realm users and roles into the native realm. It has been deprecated +since 7.2.0. Users and roles should now be created in the native +realm directly. diff --git a/docs/reference/modules/discovery/discovery-settings.asciidoc b/docs/reference/modules/discovery/discovery-settings.asciidoc index 541cb15bf1108..84472552cfced 100644 --- a/docs/reference/modules/discovery/discovery-settings.asciidoc +++ b/docs/reference/modules/discovery/discovery-settings.asciidoc @@ -31,7 +31,7 @@ Discovery and cluster formation are also affected by the following _expert-level_ settings, although it is not recommended to change any of these from their default values. -[WARNING] If you adjust these settings then your cluster may not form correctly +WARNING: If you adjust these settings then your cluster may not form correctly or may become unstable or intolerant of certain failures. `discovery.cluster_formation_warning_timeout`:: diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index ad75de1291cdc..30137fa382779 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -3,6 +3,7 @@ The following _expert_ setting can be set to manage global search limits. +[[indices-query-bool-max-clause-count]] `indices.query.bool.max_clause_count`:: Defaults to `1024`. diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/modules/scripting/painless.asciidoc index ac48aad73d28f..6dd9b50db51ed 100644 --- a/docs/reference/modules/scripting/painless.asciidoc +++ b/docs/reference/modules/scripting/painless.asciidoc @@ -1,7 +1,32 @@ [[modules-scripting-painless]] === Painless Scripting Language -include::../../../painless/painless-description.asciidoc[] +_Painless_ is a simple, secure scripting language designed specifically for use +with Elasticsearch. It is the default scripting language for Elasticsearch and +can safely be used for inline and stored scripts. To get started with +Painless, see the {painless}/painless-guide.html[Painless Guide]. For a +detailed description of the Painless syntax and language features, see the +{painless}/painless-lang-spec.html[Painless Language Specification]. -Ready to start scripting with Painless? See {painless}/painless-getting-started.html[Getting Started with Painless] in the guide to the +[[painless-features]] +You can use Painless anywhere scripts can be used in Elasticsearch. Painless +provides: + +* Fast performance: Painless scripts https://benchmarks.elastic.co/index.html#search_qps_scripts[ +run several times faster] than the alternatives. + +* Safety: Fine-grained whitelist with method call/field granularity. See the +{painless}/painless-api-reference.html[Painless API Reference] for a +complete list of available classes and methods. + +* Optional typing: Variables and parameters can use explicit types or the +dynamic `def` type. + +* Syntax: Extends a subset of Java's syntax to provide additional scripting +language features. + +* Optimizations: Designed specifically for Elasticsearch scripting. + +Ready to start scripting with Painless? See the +{painless}/painless-guide.html[Painless Guide] for the {painless}/index.html[Painless Scripting Language]. \ No newline at end of file diff --git a/docs/reference/query-dsl/multi-term-rewrite.asciidoc b/docs/reference/query-dsl/multi-term-rewrite.asciidoc index 0d327a40fdea3..391b42ea00791 100644 --- a/docs/reference/query-dsl/multi-term-rewrite.asciidoc +++ b/docs/reference/query-dsl/multi-term-rewrite.asciidoc @@ -1,45 +1,109 @@ [[query-dsl-multi-term-rewrite]] -== Multi Term Query Rewrite - -Multi term queries, like -<> and -<> are called -multi term queries and end up going through a process of rewrite. This -also happens on the -<>. -All of those queries allow to control how they will get rewritten using -the `rewrite` parameter: - -* `constant_score` (default): A rewrite method that performs like -`constant_score_boolean` when there are few matching terms and otherwise -visits all matching terms in sequence and marks documents for that term. -Matching documents are assigned a constant score equal to the query's -boost. -* `scoring_boolean`: A rewrite method that first translates each term -into a should clause in a boolean query, and keeps the scores as -computed by the query. Note that typically such scores are meaningless -to the user, and require non-trivial CPU to compute, so it's almost -always better to use `constant_score`. This rewrite method will hit -too many clauses failure if it exceeds the boolean query limit (defaults -to `1024`). -* `constant_score_boolean`: Similar to `scoring_boolean` except scores -are not computed. Instead, each matching document receives a constant -score equal to the query's boost. This rewrite method will hit too many -clauses failure if it exceeds the boolean query limit (defaults to -`1024`). -* `top_terms_N`: A rewrite method that first translates each term into -should clause in boolean query, and keeps the scores as computed by the -query. This rewrite method only uses the top scoring terms so it will -not overflow boolean max clause count. The `N` controls the size of the -top scoring terms to use. -* `top_terms_boost_N`: A rewrite method that first translates each term -into should clause in boolean query, but the scores are only computed as -the boost. This rewrite method only uses the top scoring terms so it -will not overflow the boolean max clause count. The `N` controls the -size of the top scoring terms to use. -* `top_terms_blended_freqs_N`: A rewrite method that first translates each -term into should clause in boolean query, but all term queries compute scores -as if they had the same frequency. In practice the frequency which is used -is the maximum frequency of all matching terms. This rewrite method only uses -the top scoring terms so it will not overflow boolean max clause count. The -`N` controls the size of the top scoring terms to use. +== `rewrite` Parameter + +WARNING: This parameter is for expert users only. Changing the value of +this parameter can impact search performance and relevance. + +{es} uses https://lucene.apache.org/core/[Apache Lucene] internally to power +indexing and searching. In their original form, Lucene cannot execute the +following queries: + +* <> +* <> +* <> +* <> +* <> + +To execute them, Lucene changes these queries to a simpler form, such as a +<> or a +https://en.wikipedia.org/wiki/Bit_array[bit set]. + +The `rewrite` parameter determines: + +* How Lucene calculates the relevance scores for each matching document +* Whether Lucene changes the original query to a `bool` +query or bit set +* If changed to a `bool` query, which `term` query clauses are included + +[float] +[[rewrite-param-valid-values]] +=== Valid values + +`constant_score` (Default):: +Uses the `constant_score_boolean` method for fewer matching terms. Otherwise, +this method finds all matching terms in sequence and returns matching documents +using a bit set. + +`constant_score_boolean`:: +Assigns each document a relevance score equal to the `boost` +parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`scoring_boolean`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`top_terms_blended_freqs_N`:: +Calculates a relevance score for each matching document as if all terms had the +same frequency. This frequency is the maximum frequency of all matching terms. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` scoring +terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_boost_N`:: +Assigns each matching document a relevance score equal to the `boost` parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_N`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query +only includes `term` queries for the top `N` scoring terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +[float] +[[rewrite-param-perf-considerations]] +=== Performance considerations for the `rewrite` parameter +For most uses, we recommend using the `constant_score`, +`constant_score_boolean`, or `top_terms_boost_N` rewrite methods. + +Other methods calculate relevance scores. These score calculations are often +expensive and do not improve query results. \ No newline at end of file diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index 42d4a7b1517e3..e8d97a31fa95f 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -78,10 +78,18 @@ to be the most efficient by using the internal mechanisms. [[vector-functions]] ===== Functions for vector fields + +experimental[] + These functions are used for for <> and <> fields. +NOTE: During vector functions' calculation, all matched documents are +linearly scanned. Thus, expect the query time grow linearly +with the number of matched documents. For this reason, we recommend +to limit the number of matched documents with a `query` parameter. + For dense_vector fields, `cosineSimilarity` calculates the measure of cosine similarity between a given query vector and document vectors. diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index b25048fec90e6..0ec2e070b1c74 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -111,7 +111,7 @@ GET /my_index/_rank_eval ], "requests": [ { - "id": "amsterdam_query" + "id": "amsterdam_query", "ratings": [ ... ], "template_id": "match_one_field_query", <3> "params": { <4> diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc index 2bc2300174ecc..1d23430e37eec 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc @@ -2,10 +2,8 @@ [[configuring-tls-docker]] === Encrypting communications in an {es} Docker Container -Starting with version 6.0.0, {stack} {security-features} -(Gold, Platinum or Enterprise subscriptions) -https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[require SSL/TLS] -encryption for the transport networking layer. +Unless you are using a trial license, {stack} {security-features} require +SSL/TLS encryption for the transport networking layer. This section demonstrates an easy path to get started with SSL/TLS for both HTTPS and transport using the {es} Docker image. The example uses diff --git a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc index 9d207f26a96b6..a24e272dd8937 100644 --- a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc +++ b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc @@ -7,8 +7,8 @@ your {es} cluster. Connections are secured using Transport Layer Security (TLS/SSL). WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you +have a trial license, you must configure SSL/TLS for internode-communication. To enable encryption, you need to perform the following steps on each node in the cluster: diff --git a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc index 90f9b040d9d54..68eda2cdc3e09 100644 --- a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc +++ b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc @@ -1,16 +1,15 @@ [[ssl-tls]] -=== Setting Up TLS on a cluster +=== Setting up TLS on a cluster -The {stack} {security-features} enables you to encrypt traffic to, from, and +The {stack} {security-features} enable you to encrypt traffic to, from, and within your {es} cluster. Connections are secured using Transport Layer Security (TLS), which is commonly referred to as "SSL". WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you have a trial license, you must configure SSL/TLS for internode-communication. The following steps describe how to enable encryption across the various -components of the Elastic Stack. You must perform each of the steps that are +components of the {stack}. You must perform each of the steps that are applicable to your cluster. . Generate a private key and X.509 certificate for each of your {es} nodes. See @@ -22,14 +21,14 @@ enable TLS on the HTTP layer. See {ref}/configuring-tls.html#tls-transport[Encrypting Communications Between Nodes in a Cluster] and {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. -. Configure {monitoring} to use encrypted connections. See <>. +. Configure the {monitor-features} to use encrypted connections. See <>. . Configure {kib} to encrypt communications between the browser and the {kib} server and to connect to {es} via HTTPS. See -{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. +{kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}]. . Configure Logstash to use TLS encryption. See -{logstash-ref}/ls-security.html[Configuring Security in Logstash]. +{logstash-ref}/ls-security.html[Configuring security in {ls}]. . Configure Beats to use encrypted connections. See <>. diff --git a/docs/reference/setup/bootstrap-checks-xes.asciidoc b/docs/reference/setup/bootstrap-checks-xes.asciidoc index df020bbd96276..37c90e9f4d9a3 100644 --- a/docs/reference/setup/bootstrap-checks-xes.asciidoc +++ b/docs/reference/setup/bootstrap-checks-xes.asciidoc @@ -53,9 +53,8 @@ must also be valid. === SSL/TLS check //See TLSLicenseBootstrapCheck.java -In 6.0 and later releases, if you have a gold, platinum, or enterprise license -and {es} {security-features} are enabled, you must configure SSL/TLS for -internode-communication. +If you enable {es} {security-features}, unless you have a trial license, you +must configure SSL/TLS for internode-communication. NOTE: Single-node clusters that use a loopback interface do not have this requirement. For more information, see diff --git a/docs/reference/setup/important-settings/heap-size.asciidoc b/docs/reference/setup/important-settings/heap-size.asciidoc index 890a9786e09a5..37e417e086e9b 100644 --- a/docs/reference/setup/important-settings/heap-size.asciidoc +++ b/docs/reference/setup/important-settings/heap-size.asciidoc @@ -10,7 +10,7 @@ Elasticsearch will assign the entire heap specified in heap size) settings. You should set these two settings to be equal to each other. -The value for these setting depends on the amount of RAM available on your +The value for these settings depends on the amount of RAM available on your server: * Set `Xmx` and `Xms` to no more than 50% of your physical RAM. {es} requires diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc new file mode 100644 index 0000000000000..112ddfffce6ed --- /dev/null +++ b/docs/reference/sql/functions/geo.asciidoc @@ -0,0 +1,194 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-geo]] +=== Geo Functions + +beta[] + +The geo functions work with geometries stored in `geo_point` and `geo_shape` fields, or returned by other geo functions. + +==== Limitations + +Both <> and <> types are represented in SQL as geometry and can be used +interchangeably with the following exceptions: + +* `geo_shape` fields don't have doc values, therefore these fields cannot be used for filtering, grouping or sorting. + +* `geo_points` fields are indexed and have doc values by default, however only latitude and longitude are stored and + indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and + 8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed. + Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`. + +==== Geometry Conversion + +[[sql-functions-geo-st-as-wkt]] +===== `ST_AsWKT` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_AsWKT(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: string + +.Description: + +Returns the WKT representation of the `geometry`. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] +-------------------------------------------------- + + +[[sql-functions-geo-st-wkt-to-sql]] +===== `ST_WKTToSQL` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_WKTToSQL(string<1>) +-------------------------------------------------- + +*Input*: + +<1> string WKT representation of geometry + +*Output*: geometry + +.Description: + +Returns the geometry from WKT representation. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] +-------------------------------------------------- + +==== Geometry Properties + +[[sql-functions-geo-st-geometrytype]] +===== `ST_GeometryType` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_GeometryType(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: string + +.Description: + +Returns the type of the `geometry` such as POINT, MULTIPOINT, LINESTRING, MULTILINESTRING, POLYGON, MULTIPOLYGON, GEOMETRYCOLLECTION, ENVELOPE or CIRCLE. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[geometrytype] +-------------------------------------------------- + +[[sql-functions-geo-st-x]] +===== `ST_X` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_X(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the longitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[x] +-------------------------------------------------- + +[[sql-functions-geo-st-y]] +===== `ST_Y` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Y(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the the latitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[y] +-------------------------------------------------- + +[[sql-functions-geo-st-z]] +===== `ST_Z` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Z(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the altitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[z] +-------------------------------------------------- + +[[sql-functions-geo-st-distance]] +===== `ST_Distance` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Distance(geometry<1>, geometry<2>) +-------------------------------------------------- + +*Input*: + +<1> source geometry +<2> target geometry + +*Output*: Double + +.Description: + +Returns the distance between geometries in meters. Both geometries have to be points. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[distance] +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 382adeecea4ed..248c47452bab4 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -136,6 +136,14 @@ ** <> ** <> ** <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> ** <> ** <> @@ -149,5 +157,6 @@ include::search.asciidoc[] include::math.asciidoc[] include::string.asciidoc[] include::type-conversion.asciidoc[] +include::geo.asciidoc[] include::conditional.asciidoc[] include::system.asciidoc[] diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index 8db4c88f3a11b..ad9b2a320c0c6 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -81,6 +81,8 @@ s|SQL precision | interval_hour_to_minute | 23 | interval_hour_to_second | 23 | interval_minute_to_second | 23 +| geo_point | 52 +| geo_shape | 2,147,483,647 |=== diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index b9c59e31b3d6f..c5b334480c993 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -150,3 +150,14 @@ SELECT count(*) FROM test GROUP BY MINUTE((CAST(date_created AS TIME)); ------------------------------------------------------------- SELECT HISTOGRAM(CAST(birth_date AS TIME), INTERVAL '10' MINUTES) as h, COUNT(*) FROM t GROUP BY h ------------------------------------------------------------- + +[float] +[[geo-sql-limitations]] +=== Geo-related functions + +Since `geo_shape` fields don't have doc values these fields cannot be used for filtering, grouping or sorting. + +By default,`geo_points` fields are indexed and have doc values. However only latitude and longitude are stored and +indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and +8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed. +Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`. diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 041b184570ab6..2cf1061e67ba7 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -168,20 +168,29 @@ include::open-ml.asciidoc[] During a rolling upgrade, the cluster continues to operate normally. However, any new functionality is disabled or operates in a backward compatible mode -until all nodes in the cluster are upgraded. New functionality -becomes operational once the upgrade is complete and all nodes are running the -new version. Once that has happened, there's no way to return to operating -in a backward compatible mode. Nodes running the previous major version will -not be allowed to join the fully-updated cluster. +until all nodes in the cluster are upgraded. New functionality becomes +operational once the upgrade is complete and all nodes are running the new +version. Once that has happened, there's no way to return to operating in a +backward compatible mode. Nodes running the previous major version will not be +allowed to join the fully-updated cluster. In the unlikely case of a network malfunction during the upgrade process that -isolates all remaining old nodes from the cluster, you must take the -old nodes offline and upgrade them to enable them to join the cluster. +isolates all remaining old nodes from the cluster, you must take the old nodes +offline and upgrade them to enable them to join the cluster. + +If you stop half or more of the master-eligible nodes all at once during the +upgrade then the cluster will become unavailable, meaning that the upgrade is +no longer a _rolling_ upgrade. If this happens, you should upgrade and restart +all of the stopped master-eligible nodes to allow the cluster to form again, as +if performing a <>. It may also +be necessary to upgrade all of the remaining old nodes before they can join the +cluster after it re-forms. Similarly, if you run a testing/development environment with only one master node, the master node should be upgraded last. Restarting a single master node forces the cluster to be reformed. The new cluster will initially only have the upgraded master node and will thus reject the older nodes when they re-join the -cluster. Nodes that have already been upgraded will successfully re-join the -upgraded master. +cluster. Nodes that have already been upgraded will successfully re-join the +upgraded master. + ==================================================== diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index a880090a048ef..e9a1ccad3e950 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.bootstrap.JavaVersion; + import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; import java.nio.file.Path; @@ -66,10 +68,7 @@ public abstract class SslConfigurationLoader { static final List DEFAULT_PROTOCOLS = List.of("TLSv1.3", "TLSv1.2", "TLSv1.1"); - /** - * This list has been created with ordering - */ - static final List DEFAULT_CIPHERS = List.of( + private static final List JDK11_CIPHERS = List.of( "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support @@ -80,6 +79,23 @@ public abstract class SslConfigurationLoader { "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", // AEAD, hardware support "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + + private static final List JDK12_CIPHERS = List.of( + "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support + "TLS_CHACHA20_POLY1305_SHA256", // TLSv1.3 cipher has PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", // AEAD, hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + + static final List DEFAULT_CIPHERS = + JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1 ? JDK12_CIPHERS : JDK11_CIPHERS; private static final char[] EMPTY_PASSWORD = new char[0]; private final String settingPrefix; diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java index 20a161b78fd5f..b8648efe49618 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -33,8 +34,10 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class SslConfigurationLoaderTests extends ESTestCase { @@ -217,4 +220,18 @@ public void testLoadKeysFromJKS() { assertThat(keyConfig.getDependentFiles(), containsInAnyOrder(getDataPath("/certs/cert-all/certs.jks"))); assertThat(keyConfig.createKeyManager(), notNullValue()); } + + public void testChaCha20InCiphersOnJdk12Plus() { + assumeTrue("Test is only valid on JDK 12+ JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_CHACHA20_POLY1305_SHA256")); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256")); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256")); + } + + public void testChaCha20NotInCiphersOnPreJdk12() { + assumeTrue("Test is only valid on pre JDK 12 JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, not(hasItem("TLS_CHACHA20_POLY1305_SHA256"))); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"))); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"))); + } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index c883bb5893c9f..3021f5b31606e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -293,17 +293,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeVInt(documents.size()); - for (BytesReference document : documents) { - out.writeBytesReference(document); - } - } else { - if (documents.size() > 1) { - throw new IllegalArgumentException("Nodes prior to 6.1.0 cannot accept multiple documents"); - } - BytesReference doc = documents.isEmpty() ? null : documents.iterator().next(); - out.writeOptionalBytesReference(doc); + out.writeVInt(documents.size()); + for (BytesReference document : documents) { + out.writeBytesReference(document); } if (documents.isEmpty() == false) { out.writeEnum(documentXContentType); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 9ad660b4e548c..d2038e2e2bfde 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -261,7 +261,7 @@ Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List createCandidateQuery(IndexReader indexReader, Versi } BooleanQuery.Builder candidateQuery = new BooleanQuery.Builder(); - if (canUseMinimumShouldMatchField && indexVersion.onOrAfter(Version.V_6_1_0)) { + if (canUseMinimumShouldMatchField) { LongValuesSource valuesSource = LongValuesSource.fromIntField(minimumShouldMatchField.name()); for (BytesRef extractedTerm : extractedTerms) { subQueries.add(new TermQuery(new Term(queryTermsField.name(), extractedTerm))); @@ -471,9 +471,7 @@ void processQuery(Query query, ParseContext context) { for (IndexableField field : fields) { context.doc().add(field); } - if (indexVersionCreated.onOrAfter(Version.V_6_1_0)) { - doc.add(new NumericDocValuesField(minimumShouldMatchFieldMapper.name(), result.minimumShouldMatch)); - } + doc.add(new NumericDocValuesField(minimumShouldMatchFieldMapper.name(), result.minimumShouldMatch)); } static Query parseQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, XContentParser parser) throws IOException { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index 5bdeef8a7b1b4..bcec2548de307 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; -import org.elasticsearch.Version; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.search.SearchHit; @@ -73,9 +72,8 @@ static void innerHitsExecute(Query mainQuery, for (PercolateQuery percolateQuery : percolateQueries) { String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName(); IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher(); - Query nonNestedQuery = Queries.newNonNestedFilter(Version.CURRENT); - Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(nonNestedQuery), - ScoreMode.COMPLETE_NO_SCORES, 1f); + Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter()), + ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0)); int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc(); BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 07f47df41e60d..b191dd948c574 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -97,6 +97,7 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.VersionUtils; import org.junit.After; import org.junit.Before; @@ -593,7 +594,7 @@ public void testRangeQueries() throws Exception { IndexSearcher shardSearcher = newSearcher(directoryReader); shardSearcher.setQueryCache(null); - Version v = Version.V_6_1_0; + Version v = VersionUtils.randomIndexCompatibleVersion(random()); MemoryIndex memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new IntPoint("int_field", 3)), new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index f1747d1977561..f5d30a951e68c 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -399,26 +399,6 @@ public void testCreateCandidateQuery() throws Exception { assertThat(t.v1().clauses().get(2).getQuery().toString(), containsString(fieldName + ".extraction_result:failed")); } - public void testCreateCandidateQuery_oldIndex() throws Exception { - addQueryFieldMappings(); - - MemoryIndex memoryIndex = new MemoryIndex(false); - memoryIndex.addField("field1", "value1", new WhitespaceAnalyzer()); - IndexReader indexReader = memoryIndex.createSearcher().getIndexReader(); - - Tuple t = fieldType.createCandidateQuery(indexReader, Version.CURRENT); - assertTrue(t.v2()); - assertEquals(2, t.v1().clauses().size()); - assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(CoveringQuery.class)); - assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class)); - - t = fieldType.createCandidateQuery(indexReader, Version.V_6_0_0); - assertTrue(t.v2()); - assertEquals(2, t.v1().clauses().size()); - assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class)); - assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class)); - } - public void testExtractTermsAndRanges_numberFields() throws Exception { addQueryFieldMappings(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 544cfc6ef6193..c07467187f05f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -333,12 +333,6 @@ public void testExtractQueryMetadata_booleanQueryWithMustNot() { assertThat(result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(0)); assertTermsEqual(result.extractions); - - result = analyze(booleanQuery, Version.CURRENT); - assertThat(result.matchAllDocs, is(true)); - assertThat(result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(0)); - assertTermsEqual(result.extractions); } public void testExactMatch_booleanQuery() { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 697125fbd537d..7eeadc7f6475b 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -45,8 +45,7 @@ public class AzureBlobStore implements BlobStore { private final String container; private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) - throws URISyntaxException, StorageException { + public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; @@ -69,10 +68,6 @@ public LocationMode getLocationMode() { return locationMode; } - public String getClientName() { - return clientName; - } - @Override public BlobContainer blobContainer(BlobPath path) { return new AzureBlobContainer(path, this); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 33ee9b64c2683..7c3520918fc58 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -112,20 +112,16 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam } } - // only use for testing @Override protected BlobStore getBlobStore() { return super.getBlobStore(); } - /** - * {@inheritDoc} - */ @Override - protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { + protected AzureBlobStore createBlobStore() { final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, isCompress(), basePath)); return blobStore; @@ -136,9 +132,6 @@ protected BlobPath basePath() { return basePath; } - /** - * {@inheritDoc} - */ @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 0d18592b8a7bb..89a78fd8045ee 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -97,7 +97,7 @@ public Tuple> client(String clientNa } } - protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final CloudBlobClient client = createClient(azureStorageSettings); // Set timeout option if the user sets cloud.azure.storage.timeout or // cloud.azure.storage.xxx.timeout (it's negative by default) @@ -115,12 +115,12 @@ protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) return client; } - protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final String connectionString = azureStorageSettings.buildConnectionString(); return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); } - protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { + private static OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { final OperationContext context = new OperationContext(); context.setProxy(azureStorageSettings.getProxy()); return context; @@ -146,24 +146,6 @@ public boolean doesContainerExist(String account, String container) throws URISy return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); } - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - // container name must be lower case. - logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); - SocketAccess.doPrivilegedVoidException(() -> { - // list the blobs using a flat blob listing mode - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, - client.v2().get())) { - final String blobName = blobNameFromUri(blobItem.getUri()); - logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); - // don't call {@code #deleteBlob}, use the same client - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); - azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); - } - }); - } - /** * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile * It should remove the container part (first part of the path) and gives path/to/myfile diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 3f7a5df8f14b2..e57d855cb0ee5 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -129,14 +129,6 @@ private AzureStorageSettings(String account, String key, String endpointSuffix, this.locationMode = LocationMode.PRIMARY_ONLY; } - public String getKey() { - return key; - } - - public String getAccount() { - return account; - } - public String getEndpointSuffix() { return endpointSuffix; } @@ -207,7 +199,7 @@ public static Map load(Settings settings) { // pkg private for tests /** Parse settings for a single client. */ - static AzureStorageSettings getClientSettings(Settings settings, String clientName) { + private static AzureStorageSettings getClientSettings(Settings settings, String clientName) { try (SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING); SecureString key = getConfigValue(settings, clientName, KEY_SETTING)) { return new AzureStorageSettings(account.toString(), key.toString(), @@ -226,7 +218,7 @@ private static T getConfigValue(Settings settings, String clientName, return concreteSetting.get(settings); } - public static T getValue(Settings settings, String groupName, Setting setting) { + private static T getValue(Settings settings, String groupName, Setting setting) { final Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); final String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java index da8b85430067c..1400cc5b06627 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java @@ -48,7 +48,7 @@ public static T doPrivilegedIOException(PrivilegedExceptionAction operati } } - public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException, URISyntaxException { + public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException { SpecialPermission.check(); try { return AccessController.doPrivileged(operation); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index a06dd7c3f28b1..13cc487a1c122 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -19,24 +19,17 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; -import java.io.IOException; -import java.net.URISyntaxException; public class AzureBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java index 9a0c9039d089c..67d30fda05b69 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -18,25 +18,17 @@ */ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; -import java.io.IOException; -import java.net.URISyntaxException; - public class AzureBlobStoreTests extends ESBlobStoreTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 5f3072e1ad9ed..17502a1d1f982 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -33,7 +33,6 @@ import java.io.IOException; import java.io.InputStream; import java.net.SocketPermission; -import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.security.AccessController; @@ -61,21 +60,13 @@ public boolean doesContainerExist(String account, String container) { return true; } - @Override - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Map blobs = listBlobsByPrefix(account, container, path, null); - for (String key : blobs.keySet()) { - deleteBlob(account, container, key); - } - } - @Override public boolean blobExists(String account, String container, String blob) { return blobs.containsKey(blob); } @Override - public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { + public void deleteBlob(String account, String container, String blob) throws StorageException { if (blobs.remove(blob) == null) { throw new StorageException("BlobNotFound", "[" + blob + "] does not exist.", 404, null, null); } @@ -109,8 +100,7 @@ public Map listBlobsByPrefix(String account, String contai @Override public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize, - boolean failIfAlreadyExists) - throws URISyntaxException, StorageException, FileAlreadyExistsException { + boolean failIfAlreadyExists) throws StorageException, FileAlreadyExistsException { if (failIfAlreadyExists && blobs.containsKey(blobName)) { throw new FileAlreadyExistsException(blobName); } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java index e557b47fb8912..2eb3a288fbcc2 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java @@ -222,7 +222,6 @@ private static void verifyDefaultInstallation(Installation es, Distribution dist "elasticsearch-certgen", "elasticsearch-certutil", "elasticsearch-croneval", - "elasticsearch-migrate", "elasticsearch-saml-metadata", "elasticsearch-setup-passwords", "elasticsearch-sql-cli", diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index 70ac89dc3b7f5..4d528b96c32e9 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -244,7 +244,6 @@ private static void verifyDefaultInstallation(Installation es) { "elasticsearch-certgen", "elasticsearch-certutil", "elasticsearch-croneval", - "elasticsearch-migrate", "elasticsearch-saml-metadata", "elasticsearch-setup-passwords", "elasticsearch-sql-cli", diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index dcbf5253bb085..bcb55079b8269 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -70,7 +70,7 @@ dependencies { compile "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" compile "org.apache.logging.log4j:log4j-core:${versions.log4j}" - compile project(path: ':client:transport', configuration: 'runtime') + compile project(path: ':client:rest-high-level') wildfly "org.jboss:wildfly:${wildflyVersion}@zip" testCompile "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}" } @@ -93,8 +93,7 @@ task writeElasticsearchProperties { final File elasticsearchProperties = file("${wildflyInstall}/standalone/configuration/elasticsearch.properties") elasticsearchProperties.write( [ - "transport.uri=${-> integTest.getNodes().get(0).transportUri()}", - "cluster.name=${-> integTest.getNodes().get(0).clusterName}" + "http.uri=${-> integTest.getNodes().get(0).httpUri()}" ].join("\n")) } } @@ -167,7 +166,7 @@ task startWildfly { } } -task configureTransportClient(type: LoggedExec) { +task configureClient(type: LoggedExec) { dependsOn startWildfly // we skip these tests on Windows so we do not need to worry about compatibility here commandLine "${wildflyInstall}/bin/jboss-cli.sh", @@ -182,7 +181,7 @@ task stopWildfly(type: LoggedExec) { } if (!Os.isFamily(Os.FAMILY_WINDOWS)) { - integTestRunner.dependsOn(configureTransportClient) + integTestRunner.dependsOn(configureClient) final TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { @Override void afterExecute(final Task task, final TaskState state) { diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientActivator.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java similarity index 87% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientActivator.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java index 881b263f35b97..c860f9e5e1bf7 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientActivator.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java @@ -26,11 +26,11 @@ import java.util.Set; @ApplicationPath("/transport") -public class TransportClientActivator extends Application { +public class RestHighLevelClientActivator extends Application { @Override public Set> getClasses() { - return Collections.singleton(TransportClientEmployeeResource.class); + return Collections.singleton(RestHighLevelClientEmployeeResource.class); } } diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientEmployeeResource.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java similarity index 84% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientEmployeeResource.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java index 4008bf8801a55..d99810a9638cc 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientEmployeeResource.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java @@ -19,9 +19,12 @@ package org.elasticsearch.wildfly.transport; +import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.wildfly.model.Employee; @@ -33,7 +36,6 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; - import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -44,17 +46,17 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @Path("/employees") -public class TransportClientEmployeeResource { +public class RestHighLevelClientEmployeeResource { @Inject - private TransportClient client; + private RestHighLevelClient client; @GET @Path("/{id}") @Produces(MediaType.APPLICATION_JSON) - public Response getEmployeeById(final @PathParam("id") Long id) { + public Response getEmployeeById(final @PathParam("id") Long id) throws IOException { Objects.requireNonNull(id); - final GetResponse response = client.prepareGet("megacorp", "employee", Long.toString(id)).get(); + final GetResponse response = client.get(new GetRequest("megacorp", Long.toString(id)), RequestOptions.DEFAULT); if (response.isExists()) { final Map source = response.getSource(); final Employee employee = new Employee(); @@ -94,7 +96,10 @@ public Response putEmployeeById(final @PathParam("id") Long id, final Employee e } } builder.endObject(); - final IndexResponse response = client.prepareIndex("megacorp", "employee", Long.toString(id)).setSource(builder).get(); + final IndexRequest request = new IndexRequest("megacorp"); + request.id(Long.toString(id)); + request.source(builder); + final IndexResponse response = client.index(request, RequestOptions.DEFAULT); if (response.status().getStatus() == 201) { return Response.created(new URI("/employees/" + id)).build(); } else { diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientProducer.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java similarity index 58% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientProducer.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java index 7c234bce6cdb7..5d924192342ef 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientProducer.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java @@ -19,46 +19,34 @@ package org.elasticsearch.wildfly.transport; -import org.elasticsearch.client.transport.TransportClient; +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.transport.client.PreBuiltTransportClient; import javax.enterprise.inject.Produces; - import java.io.IOException; import java.io.InputStream; -import java.net.InetAddress; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Collections; import java.util.Properties; @SuppressWarnings("unused") -public final class TransportClientProducer { +public final class RestHighLevelClientProducer { @Produces - public TransportClient createTransportClient() throws IOException { + public RestHighLevelClient createRestHighLevelClient() throws IOException { final String elasticsearchProperties = System.getProperty("elasticsearch.properties"); final Properties properties = new Properties(); - final String transportUri; - final String clusterName; + final String httpUri; try (InputStream is = Files.newInputStream(getPath(elasticsearchProperties))) { properties.load(is); - transportUri = properties.getProperty("transport.uri"); - clusterName = properties.getProperty("cluster.name"); + httpUri = properties.getProperty("http.uri"); } - final int lastColon = transportUri.lastIndexOf(':'); - final String host = transportUri.substring(0, lastColon); - final int port = Integer.parseInt(transportUri.substring(lastColon + 1)); - final Settings settings = Settings.builder().put("cluster.name", clusterName).build(); - final TransportClient transportClient = new PreBuiltTransportClient(settings, Collections.emptyList()); - transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName(host), port)); - return transportClient; + return new RestHighLevelClient(RestClient.builder(HttpHost.create(httpUri))); } @SuppressForbidden(reason = "get path not configured in environment") diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportJacksonJsonProvider.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java similarity index 92% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportJacksonJsonProvider.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java index 07585780c0665..50568790ca064 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportJacksonJsonProvider.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java @@ -24,5 +24,5 @@ import javax.ws.rs.ext.Provider; @Provider -public class TransportJacksonJsonProvider extends ResteasyJackson2Provider { +public class RestHighLevelJacksonJsonProvider extends ResteasyJackson2Provider { } diff --git a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java index 9aebffdc4ce3f..28e11f021a1c7 100644 --- a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java @@ -53,7 +53,7 @@ @TestRuleLimitSysouts.Limit(bytes = 14000) public class WildflyIT extends LuceneTestCase { - Logger logger = Logger.getLogger(WildflyIT.class); + private Logger logger = Logger.getLogger(WildflyIT.class); public void testTransportClient() throws URISyntaxException, IOException { try (CloseableHttpClient client = HttpClientBuilder.create().build()) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index f21d2606364d1..65fcf02807ba1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -33,10 +33,6 @@ "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, - "parent": { - "type" : "string", - "description" : "ID of the parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 792f9d89609bf..0152374028832 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -33,10 +33,6 @@ "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, - "parent": { - "type" : "string", - "description" : "ID of parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index 3debd3edce585..2a451344521e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -33,10 +33,6 @@ "type": "list", "description" : "A comma-separated list of stored fields to return in the response" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index 89f9c33e5fb44..30e56141ec001 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -30,10 +30,6 @@ } }, "params": { - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 12aa7a8dca942..203ef23c9cc10 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -55,10 +55,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 5b2203c94deb9..f4e0fdd5f90ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -33,10 +33,6 @@ "type": "list", "description" : "A comma-separated list of stored fields to return in the response" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index a26691edc41fc..d6f6964aa7c36 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -30,10 +30,6 @@ } }, "params": { - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 2a2053d2250a0..438032980a3c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -43,10 +43,6 @@ "default" : "index", "description" : "Explicit operation type" }, - "parent": { - "type" : "string", - "description" : "ID of the parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json index ac73f84e30d6d..aaff8e73259cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json @@ -62,11 +62,6 @@ "description" : "Specific routing value. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", "required" : false }, - "parent" : { - "type" : "string", - "description" : "Parent id of documents. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "required" : false - }, "realtime": { "type": "boolean", "description": "Specifies if requests are real-time as opposed to near-real-time (default: true).", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json index 0570433507055..bbbdc7c87ad0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json @@ -62,11 +62,6 @@ "description" : "Specific routing value.", "required" : false }, - "parent": { - "type" : "string", - "description" : "Parent id of documents.", - "required" : false - }, "realtime": { "type": "boolean", "description": "Specifies if request is real-time as opposed to near-real-time (default: true).", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index b85c70be57d9e..02435190674cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -49,10 +49,6 @@ "type": "string", "description": "The script language (default: painless)" }, - "parent": { - "type": "string", - "description": "ID of the parent document. Is is only used for routing and when for the upsert request" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml index a6b7cae104418..e0183f0c54f66 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml @@ -251,8 +251,20 @@ setup: --- "Bad params": + - skip: + version: " - 7.1.99" + reason: "empty bodies throws exception starting in 7.2" + - do: + catch: /\[filters\] cannot be empty/ + search: + rest_total_hits_as_int: true + body: + aggs: + the_filter: + filters: {} - do: + catch: /\[filters\] cannot be empty/ search: rest_total_hits_as_int: true body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index b15a48f52a43e..df6664141c3b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -165,20 +165,6 @@ setup: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } ---- -"docvalue_fields with default format": - - skip: - features: warnings - - do: - warnings: - - "[use_field_mapping] is a special format that was only used to ease the transition to 7.x. It has become the default and shouldn't be set explicitly anymore." - search: - body: - docvalue_fields: - - field: "count" - format: "use_field_mapping" - - match: { hits.hits.0.fields.count: [1] } - --- "docvalue_fields with explicit format": diff --git a/server/licenses/joda-time-2.10.1.jar.sha1 b/server/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ecee..0000000000000 --- a/server/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/server/licenses/joda-time-2.10.2.jar.sha1 b/server/licenses/joda-time-2.10.2.jar.sha1 new file mode 100644 index 0000000000000..9cbac57161c8e --- /dev/null +++ b/server/licenses/joda-time-2.10.2.jar.sha1 @@ -0,0 +1 @@ +a079fc39ccc3de02acdeb7117443e5d9bd431687 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 861228d221778..a2e53a1189f1b 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1006,7 +1006,7 @@ private enum ElasticsearchExceptionHandle { UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class, org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, - MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_6_2_0), + MultiBucketConsumerService.TooManyBucketsException::new, 149, UNKNOWN_VERSION_ADDED), COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, @@ -1022,7 +1022,12 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.index.seqno.RetentionLeaseNotFoundException.class, org.elasticsearch.index.seqno.RetentionLeaseNotFoundException::new, 154, - Version.V_6_7_0); + Version.V_6_7_0), + SHARD_NOT_IN_PRIMARY_MODE_EXCEPTION( + org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, + org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, + 155, + Version.V_6_8_1); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index e4269a375dd6c..48461ffe30d4b 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -175,12 +175,42 @@ public static T useOrSuppress(T first, T second) { return first; } + private static final List> CORRUPTION_EXCEPTIONS = + List.of(CorruptIndexException.class, IndexFormatTooOldException.class, IndexFormatTooNewException.class); + + /** + * Looks at the given Throwable's and its cause(s) as well as any suppressed exceptions on the Throwable as well as its causes + * and returns the first corruption indicating exception (as defined by {@link #CORRUPTION_EXCEPTIONS}) it finds. + * @param t Throwable + * @return Corruption indicating exception if one is found, otherwise {@code null} + */ public static IOException unwrapCorruption(Throwable t) { - return (IOException) unwrap(t, CorruptIndexException.class, - IndexFormatTooOldException.class, - IndexFormatTooNewException.class); + if (t != null) { + do { + for (Class clazz : CORRUPTION_EXCEPTIONS) { + if (clazz.isInstance(t)) { + return (IOException) t; + } + } + for (Throwable suppressed : t.getSuppressed()) { + IOException corruptionException = unwrapCorruption(suppressed); + if (corruptionException != null) { + return corruptionException; + } + } + } while ((t = t.getCause()) != null); + } + return null; } + /** + * Looks at the given Throwable and its cause(s) and returns the first Throwable that is of one of the given classes or {@code null} + * if no matching Throwable is found. Unlike {@link #unwrapCorruption} this method does only check the given Throwable and its causes + * but does not look at any suppressed exceptions. + * @param t Throwable + * @param clazzes Classes to look for + * @return Matching Throwable if one is found, otherwise {@code null} + */ public static Throwable unwrap(Throwable t, Class... clazzes) { if (t != null) { do { diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c2d927f457bd1..90b7ae869e811 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,34 +46,8 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_0_0_ID = 6000099; - public static final Version V_6_0_0 = - new Version(V_6_0_0_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); - public static final int V_6_0_1_ID = 6000199; - public static final Version V_6_0_1 = - new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); - public static final int V_6_1_0_ID = 6010099; - public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_1_ID = 6010199; - public static final Version V_6_1_1 = new Version(V_6_1_1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_2_ID = 6010299; - public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_3_ID = 6010399; - public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_4_ID = 6010499; - public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); // The below version is missing from the 7.3 JAR private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); - public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, LUCENE_7_2_1); - public static final int V_6_2_1_ID = 6020199; - public static final Version V_6_2_1 = new Version(V_6_2_1_ID, LUCENE_7_2_1); - public static final int V_6_2_2_ID = 6020299; - public static final Version V_6_2_2 = new Version(V_6_2_2_ID, LUCENE_7_2_1); - public static final int V_6_2_3_ID = 6020399; - public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1); - public static final int V_6_2_4_ID = 6020499; - public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_3_1_ID = 6030199; @@ -110,18 +84,18 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_7_2_ID = 6070299; public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_7_3_ID = 6070399; - public static final Version V_6_7_3 = new Version(V_6_7_3_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_0_ID = 6080099; public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_8_1_ID = 6080199; + public static final Version V_6_8_1 = new Version(V_6_8_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final int V_7_0_2_ID = 7000299; - public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_1_ID = 7010199; + public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_8_0_0_ID = 8000099; @@ -144,18 +118,18 @@ public static Version fromId(int id) { return V_8_0_0; case V_7_2_0_ID: return V_7_2_0; + case V_7_1_1_ID: + return V_7_1_1; case V_7_1_0_ID: return V_7_1_0; - case V_7_0_2_ID: - return V_7_0_2; case V_7_0_1_ID: return V_7_0_1; case V_7_0_0_ID: return V_7_0_0; + case V_6_8_1_ID: + return V_6_8_1; case V_6_8_0_ID: return V_6_8_0; - case V_6_7_3_ID: - return V_6_7_3; case V_6_7_1_ID: return V_6_7_1; case V_6_7_2_ID: @@ -192,30 +166,6 @@ public static Version fromId(int id) { return V_6_3_1; case V_6_3_0_ID: return V_6_3_0; - case V_6_2_4_ID: - return V_6_2_4; - case V_6_2_3_ID: - return V_6_2_3; - case V_6_2_2_ID: - return V_6_2_2; - case V_6_2_1_ID: - return V_6_2_1; - case V_6_2_0_ID: - return V_6_2_0; - case V_6_1_4_ID: - return V_6_1_4; - case V_6_1_3_ID: - return V_6_1_3; - case V_6_1_2_ID: - return V_6_1_2; - case V_6_1_1_ID: - return V_6_1_1; - case V_6_1_0_ID: - return V_6_1_0; - case V_6_0_1_ID: - return V_6_0_1; - case V_6_0_0_ID: - return V_6_0_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index fe07a4efe930e..d1d72da544560 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -203,27 +203,15 @@ void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListe request.getTaskId().toString()); get.setParentTask(clusterService.localNode().getId(), thisTask.getId()); - client.get(get, new ActionListener() { - @Override - public void onResponse(GetResponse getResponse) { - try { - onGetFinishedTaskFromIndex(getResponse, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { - // We haven't yet created the index for the task results so it can't be found. - listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, - request.getTaskId())); - } else { - listener.onFailure(e); - } + client.get(get, ActionListener.wrap(r -> onGetFinishedTaskFromIndex(r, listener), e -> { + if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { + // We haven't yet created the index for the task results so it can't be found. + listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, + request.getTaskId())); + } else { + listener.onFailure(e); } - }); + })); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 5dfc24d1e280e..c2f0d3dd0c074 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -119,23 +119,11 @@ protected void masterOperation(final SnapshotsStatusRequest request, TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()])) .snapshots(snapshots).timeout(request.masterNodeTimeout()); - transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener() { - @Override - public void onResponse(TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) { - try { - List currentSnapshots = - snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())); - listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses)); - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + transportNodesSnapshotsStatus.execute(nodesRequest, + ActionListener.map( + listener, nodeSnapshotStatuses -> + buildResponse(request, snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())), + nodeSnapshotStatuses))); } else { // We don't have any in-progress shards, just return current stats listener.onResponse(buildResponse(request, currentSnapshots, null)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index f2d046f3321b2..b122350c3e61d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -184,26 +184,13 @@ protected ClusterBlockException checkRequestBlock(ClusterState state, UpgradeReq @Override protected void doExecute(Task task, UpgradeRequest request, final ActionListener listener) { - ActionListener settingsUpdateListener = new ActionListener() { - @Override - public void onResponse(UpgradeResponse upgradeResponse) { - try { - if (upgradeResponse.versions().isEmpty()) { - listener.onResponse(upgradeResponse); - } else { - updateSettings(upgradeResponse, listener); - } - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); + super.doExecute(task, request, ActionListener.wrap(upgradeResponse -> { + if (upgradeResponse.versions().isEmpty()) { + listener.onResponse(upgradeResponse); + } else { + updateSettings(upgradeResponse, listener); } - }; - super.doExecute(task, request, settingsUpdateListener); + }, listener::onFailure)); } private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index 2f5db520088e9..7890fb4e83fc1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -59,27 +59,20 @@ public void execute(BulkRequest bulkRequest, long executionId) { semaphore.acquire(); toRelease = semaphore::release; CountDownLatch latch = new CountDownLatch(1); - retry.withBackoff(consumer, bulkRequest, new ActionListener() { + retry.withBackoff(consumer, bulkRequest, ActionListener.runAfter(new ActionListener() { @Override public void onResponse(BulkResponse response) { - try { - listener.afterBulk(executionId, bulkRequest, response); - } finally { - semaphore.release(); - latch.countDown(); - } + listener.afterBulk(executionId, bulkRequest, response); } @Override public void onFailure(Exception e) { - try { - listener.afterBulk(executionId, bulkRequest, e); - } finally { - semaphore.release(); - latch.countDown(); - } + listener.afterBulk(executionId, bulkRequest, e); } - }); + }, () -> { + semaphore.release(); + latch.countDown(); + })); bulkRequestSetupSuccessful = true; if (concurrentRequests == 0) { latch.await(); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index 97f13bf71d14c..be1528a354bc3 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -74,25 +73,13 @@ protected void masterOperation(PutPipelineRequest request, ClusterState state, A NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear(); nodesInfoRequest.ingest(true); - client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener() { - @Override - public void onResponse(NodesInfoResponse nodeInfos) { - try { - Map ingestInfos = new HashMap<>(); - for (NodeInfo nodeInfo : nodeInfos.getNodes()) { - ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); - } - ingestService.putPipeline(ingestInfos, request, listener); - } catch (Exception e) { - onFailure(e); - } + client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(nodeInfos -> { + Map ingestInfos = new HashMap<>(); + for (NodeInfo nodeInfo : nodeInfos.getNodes()) { + ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + ingestService.putPipeline(ingestInfos, request, listener); + }, listener::onFailure)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 18c2f529a711d..6b641906d2e32 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -286,12 +286,19 @@ boolean isFinalReduce() { * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search * request. When created through {@link #crossClusterSearch(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. - * */ long getOrCreateAbsoluteStartMillis() { return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis; } + /** + * Returns the provided absoluteStartMillis when created through {@link #crossClusterSearch} and + * -1 otherwise. + */ + long getAbsoluteStartMillis() { + return absoluteStartMillis; + } + /** * Sets the indices the search will be executed on. */ diff --git a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index dfcf6445abf7d..ad72ef10139ba 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; @@ -86,21 +87,16 @@ public ThreadedActionListener(Logger logger, ThreadPool threadPool, String execu @Override public void onResponse(final Response response) { - threadPool.executor(executor).execute(new AbstractRunnable() { + threadPool.executor(executor).execute(new ActionRunnable<>(listener) { @Override public boolean isForceExecution() { return forceExecution; } @Override - protected void doRun() throws Exception { + protected void doRun() { listener.onResponse(response); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 87c9e15324152..15daaf786b604 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -36,7 +37,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -287,45 +287,25 @@ class ShardTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception { - asyncShardOperation(request, task, new ActionListener() { - @Override - public void onResponse(ShardResponse response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn(() -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); + asyncShardOperation(request, task, + ActionListener.wrap(channel::sendResponse, e -> { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); + } } - } - }); + )); } } protected void asyncShardOperation(ShardRequest request, Task task, ActionListener listener) { - transportService.getThreadPool().executor(getExecutor(request)).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + transportService.getThreadPool().executor(shardExecutor).execute(new ActionRunnable(listener) { @Override protected void doRun() throws Exception { listener.onResponse(shardOperation(request, task)); } }); } - - protected String getExecutor(ShardRequest request) { - return shardExecutor; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 6edaa95033997..d19009433deb5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -63,6 +63,7 @@ import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; @@ -307,10 +308,18 @@ protected void doRun() throws Exception { primaryRequest.getTargetAllocationID(), primaryRequest.getPrimaryTerm(), actualTerm); } - acquirePrimaryOperationPermit(indexShard, primaryRequest.getRequest(), ActionListener.wrap( - releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), - this::onFailure - )); + acquirePrimaryOperationPermit( + indexShard, + primaryRequest.getRequest(), + ActionListener.wrap( + releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), + e -> { + if (e instanceof ShardNotInPrimaryModeException) { + onFailure(new ReplicationOperation.RetryOnPrimaryException(shardId, "shard is not in primary mode", e)); + } else { + onFailure(e); + } + })); } void runWithPrimaryShardReference(final PrimaryShardReference primaryShardReference) { diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index c575c3b233872..d1d7b6ffac597 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -254,27 +254,16 @@ private class ShardTransportHandler implements TransportRequestHandler @Override public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { - shardOperation(request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn("failed to send response for get", inner); + shardOperation(request, + ActionListener.wrap(channel::sendResponse, e -> { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn("failed to send response for get", inner); + } } - } - }); - + )); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 3c2e7f9a49e0d..123ed11769aa7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChannelActionListener; @@ -40,7 +41,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -107,12 +107,7 @@ protected void doExecute(Task task, Request request, ActionListener li protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException; protected void asyncShardOperation(Request request, ShardId shardId, ActionListener listener) throws IOException { - threadPool.executor(getExecutor(request, shardId)).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + threadPool.executor(getExecutor(request, shardId)).execute(new ActionRunnable<>(listener) { @Override protected void doRun() throws Exception { listener.onResponse(shardOperation(request, shardId)); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index c2f9872ca5cee..8d80a15beb14b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -329,19 +329,8 @@ class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception { - nodeOperation(request, new ActionListener() { - @Override - public void onResponse( - TransportTasksAction.NodeTasksResponse response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { + nodeOperation(request, ActionListener.wrap(channel::sendResponse, + e -> { try { channel.sendResponse(e); } catch (IOException e1) { @@ -349,11 +338,10 @@ public void onFailure(Exception e) { logger.warn("Failed to send failure", e1); } } - }); + )); } } - private class NodeTaskRequest extends TransportRequest { private TasksRequest tasksRequest; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index a38a383b269d5..ac75c83c19a26 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -480,6 +480,7 @@ default void markLastAcceptedStateAsCommitted() { metaDataBuilder = MetaData.builder(lastAcceptedState.metaData()); } metaDataBuilder.clusterUUIDCommitted(true); + logger.info("cluster UUID set to [{}]", lastAcceptedState.metaData().clusterUUID()); } if (metaDataBuilder != null) { setLastAcceptedState(ClusterState.builder(lastAcceptedState).metaData(metaDataBuilder).build()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 457cfcb15486e..6304588e3121a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -647,7 +647,11 @@ protected void doStart() { coordinationState.set(new CoordinationState(settings, getLocalNode(), persistedState)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); - VotingConfiguration votingConfiguration = coordinationState.get().getLastAcceptedState().getLastCommittedConfiguration(); + final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); + if (lastAcceptedState.metaData().clusterUUIDCommitted()) { + logger.info("cluster UUID [{}]", lastAcceptedState.metaData().clusterUUID()); + } + final VotingConfiguration votingConfiguration = lastAcceptedState.getLastCommittedConfiguration(); if (singleNodeDiscovery && votingConfiguration.isEmpty() == false && votingConfiguration.hasQuorum(Collections.singleton(getLocalNode().getId())) == false) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 3d59e2bceacdb..ec664c97067d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -44,7 +44,7 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); protected final NamedXContentRegistry namedXContentRegistry; - static final String DELIMITER = "------------------------------------------------------------------------\n"; + protected static final String DELIMITER = "------------------------------------------------------------------------\n"; static final String STOP_WARNING_MSG = DELIMITER + @@ -81,9 +81,8 @@ protected void processNodePathsWithLock(Terminal terminal, OptionSet options, En throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); } processNodePaths(terminal, dataPaths, env); - } catch (LockObtainFailedException ex) { - throw new ElasticsearchException( - FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); + } catch (LockObtainFailedException e) { + throw new ElasticsearchException(FAILED_TO_OBTAIN_NODE_LOCK_MSG, e); } } @@ -177,6 +176,17 @@ protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newG MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); } + protected NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { + return Arrays.stream(dataPaths).map(ElasticsearchNodeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); + } + + private static NodeEnvironment.NodePath createNodePath(Path path) { + try { + return new NodeEnvironment.NodePath(path); + } catch (IOException e) { + throw new ElasticsearchException("Unable to investigate path [" + path + "]", e); + } + } //package-private for testing OptionParser getParser() { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index d6bd22bcd76fd..ff054e71eee3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -22,6 +22,7 @@ import org.elasticsearch.cli.MultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.env.NodeRepurposeCommand; +import org.elasticsearch.env.OverrideNodeVersionCommand; // NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization // after LoggingAwareCommand instance is constructed. @@ -39,6 +40,7 @@ public NodeToolCli() { subcommands.put("repurpose", new NodeRepurposeCommand()); subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); subcommands.put("detach-cluster", new DetachClusterCommand()); + subcommands.put("override-version", new OverrideNodeVersionCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java deleted file mode 100644 index 74a08711042f7..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common; - -import java.util.Base64; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * These are essentially flake ids, but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. - * For more information about flake ids, check out - * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ - */ - -class LegacyTimeBasedUUIDGenerator implements UUIDGenerator { - - // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips - // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - private final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); - - // Used to ensure clock moves forward: - private long lastTimestamp; - - private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); - - static { - assert SECURE_MUNGED_ADDRESS.length == 6; - } - - /** Puts the lower numberOfLongBytes from l into the array, starting index pos. */ - private static void putLong(byte[] array, long l, int pos, int numberOfLongBytes) { - for (int i=0; i>> (i*8)); - } - } - - @Override - public String getBase64UUID() { - final int sequenceId = sequenceNumber.incrementAndGet() & 0xffffff; - long timestamp = System.currentTimeMillis(); - - synchronized (this) { - // Don't let timestamp go backwards, at least "on our watch" (while this JVM is running). We are still vulnerable if we are - // shut down, clock goes backwards, and we restart... for this we randomize the sequenceNumber on init to decrease chance of - // collision: - timestamp = Math.max(lastTimestamp, timestamp); - - if (sequenceId == 0) { - // Always force the clock to increment whenever sequence number is 0, in case we have a long time-slip backwards: - timestamp++; - } - - lastTimestamp = timestamp; - } - - final byte[] uuidBytes = new byte[15]; - - // Only use lower 6 bytes of the timestamp (this will suffice beyond the year 10000): - putLong(uuidBytes, timestamp, 0, 6); - - // MAC address adds 6 bytes: - System.arraycopy(SECURE_MUNGED_ADDRESS, 0, uuidBytes, 6, SECURE_MUNGED_ADDRESS.length); - - // Sequence number adds 3 bytes: - putLong(uuidBytes, sequenceId, 12, 3); - - assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length; - - return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index a6a314c2cccb0..46643a79da2e2 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -26,7 +26,6 @@ public class UUIDs { private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - private static final UUIDGenerator LEGACY_TIME_UUID_GENERATOR = new LegacyTimeBasedUUIDGenerator(); private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); /** Generates a time-based UUID (similar to Flake IDs), which is preferred when generating an ID to be indexed into a Lucene index as @@ -35,11 +34,6 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } - /** Legacy implementation of {@link #base64UUID()}, for pre 6.0 indices. */ - public static String legacyBase64UUID() { - return LEGACY_TIME_UUID_GENERATOR.getBase64UUID(); - } - /** Returns a Base64 encoded version of a Version 4.0 compatible UUID as defined here: http://www.ietf.org/rfc/rfc4122.txt, using the * provided {@code Random} instance */ public static String randomBase64UUID(Random random) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 21d1bd9f25564..9299edc459cb7 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -20,12 +20,18 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; import java.io.IOException; +import java.io.InputStream; /** * first point of entry for a shape parser @@ -67,4 +73,20 @@ static ShapeBuilder parse(XContentParser parser, BaseGeoShapeFieldMapper shapeMa static ShapeBuilder parse(XContentParser parser) throws IOException { return parse(parser, null); } + + static ShapeBuilder parse(Object value) throws IOException { + XContentBuilder content = JsonXContent.contentBuilder(); + content.startObject(); + content.field("value", value); + content.endObject(); + + try (InputStream stream = BytesReference.bytes(content).streamInput(); + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return parse(parser); + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 56d1b5cedc33c..96a0cafc35b11 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; @@ -72,17 +71,9 @@ public static Query newNestedFilter() { /** * Creates a new non-nested docs query - * @param indexVersionCreated the index version created since newer indices can identify a parent field more efficiently */ - public static Query newNonNestedFilter(Version indexVersionCreated) { - if (indexVersionCreated.onOrAfter(Version.V_6_1_0)) { - return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME); - } else { - return new BooleanQuery.Builder() - .add(new MatchAllDocsQuery(), Occur.FILTER) - .add(newNestedFilter(), Occur.MUST_NOT) - .build(); - } + public static Query newNonNestedFilter() { + return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME); } public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 514cfd3ce4ca8..1e5079124c345 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -466,6 +466,11 @@ public final String getRaw(final Settings settings) { * @return the raw string representation of the setting value */ String innerGetRaw(final Settings settings) { + SecureSettings secureSettings = settings.getSecureSettings(); + if (secureSettings != null && secureSettings.getSettingNames().contains(getKey())) { + throw new IllegalArgumentException("Setting [" + getKey() + "] is a non-secure setting" + + " and must be stored inside elasticsearch.yml, but was found inside the Elasticsearch keystore"); + } return settings.get(getKey(), defaultValue.apply(settings)); } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index fc2f76d3436c0..4cfd22ecb1a65 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -31,6 +31,7 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedFunction; @@ -248,7 +249,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce sharedDataPath = null; locks = null; nodeLockId = -1; - nodeMetaData = new NodeMetaData(generateNodeId(settings)); + nodeMetaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); return; } boolean success = false; @@ -393,7 +394,6 @@ private void maybeLogHeapDetails() { logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops); } - /** * scans the node paths and loads existing metaData file. If not found a new meta data will be generated * and persisted into the nodePaths @@ -403,10 +403,15 @@ private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger l final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); if (metaData == null) { - metaData = new NodeMetaData(generateNodeId(settings)); + metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); + } else { + metaData = metaData.upgradeToCurrentVersion(); } + // we write again to make sure all paths have the latest state file + assert metaData.nodeVersion().equals(Version.CURRENT) : metaData.nodeVersion() + " != " + Version.CURRENT; NodeMetaData.FORMAT.writeAndCleanup(metaData, paths); + return metaData; } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java index dbea3164c8a44..f9deba8f6c382 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java @@ -19,6 +19,7 @@ package org.elasticsearch.env; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,66 +32,104 @@ import java.util.Objects; /** - * Metadata associated with this node. Currently only contains the unique uuid describing this node. + * Metadata associated with this node: its persistent node ID and its version. * The metadata is persisted in the data folder of this node and is reused across restarts. */ public final class NodeMetaData { private static final String NODE_ID_KEY = "node_id"; + private static final String NODE_VERSION_KEY = "node_version"; private final String nodeId; - public NodeMetaData(final String nodeId) { + private final Version nodeVersion; + + public NodeMetaData(final String nodeId, final Version nodeVersion) { this.nodeId = Objects.requireNonNull(nodeId); + this.nodeVersion = Objects.requireNonNull(nodeVersion); } @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; NodeMetaData that = (NodeMetaData) o; - - return Objects.equals(this.nodeId, that.nodeId); + return nodeId.equals(that.nodeId) && + nodeVersion.equals(that.nodeVersion); } @Override public int hashCode() { - return this.nodeId.hashCode(); + return Objects.hash(nodeId, nodeVersion); } @Override public String toString() { - return "node_id [" + nodeId + "]"; + return "NodeMetaData{" + + "nodeId='" + nodeId + '\'' + + ", nodeVersion=" + nodeVersion + + '}'; } private static ObjectParser PARSER = new ObjectParser<>("node_meta_data", Builder::new); static { PARSER.declareString(Builder::setNodeId, new ParseField(NODE_ID_KEY)); + PARSER.declareInt(Builder::setNodeVersionId, new ParseField(NODE_VERSION_KEY)); } public String nodeId() { return nodeId; } + public Version nodeVersion() { + return nodeVersion; + } + + public NodeMetaData upgradeToCurrentVersion() { + if (nodeVersion.equals(Version.V_EMPTY)) { + assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + return new NodeMetaData(nodeId, Version.CURRENT); + } + + if (nodeVersion.before(Version.CURRENT.minimumIndexCompatibilityVersion())) { + throw new IllegalStateException( + "cannot upgrade a node from version [" + nodeVersion + "] directly to version [" + Version.CURRENT + "]"); + } + + if (nodeVersion.after(Version.CURRENT)) { + throw new IllegalStateException( + "cannot downgrade a node from version [" + nodeVersion + "] to version [" + Version.CURRENT + "]"); + } + + return nodeVersion.equals(Version.CURRENT) ? this : new NodeMetaData(nodeId, Version.CURRENT); + } + private static class Builder { String nodeId; + Version nodeVersion; public void setNodeId(String nodeId) { this.nodeId = nodeId; } + public void setNodeVersionId(int nodeVersionId) { + this.nodeVersion = Version.fromId(nodeVersionId); + } + public NodeMetaData build() { - return new NodeMetaData(nodeId); + final Version nodeVersion; + if (this.nodeVersion == null) { + assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + nodeVersion = Version.V_EMPTY; + } else { + nodeVersion = this.nodeVersion; + } + + return new NodeMetaData(nodeId, nodeVersion); } } - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("node-") { @Override @@ -103,10 +142,11 @@ protected XContentBuilder newXContentBuilder(XContentType type, OutputStream str @Override public void toXContent(XContentBuilder builder, NodeMetaData nodeMetaData) throws IOException { builder.field(NODE_ID_KEY, nodeMetaData.nodeId); + builder.field(NODE_VERSION_KEY, nodeMetaData.nodeVersion.id); } @Override - public NodeMetaData fromXContent(XContentParser parser) throws IOException { + public NodeMetaData fromXContent(XContentParser parser) { return PARSER.apply(parser, null).build(); } }; diff --git a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java index 7331d8528fc64..20b5552dfa8f8 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java @@ -172,10 +172,6 @@ private String toIndexName(NodeEnvironment.NodePath[] nodePaths, String uuid) { } } - private NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { - return Arrays.stream(dataPaths).map(NodeRepurposeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); - } - private Set indexUUIDsFor(Set indexPaths) { return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet()); } @@ -221,19 +217,11 @@ private void removePath(Path path) { @SafeVarargs @SuppressWarnings("varargs") - private final Set uniqueParentPaths(Collection... paths) { + private Set uniqueParentPaths(Collection... paths) { // equals on Path is good enough here due to the way these are collected. return Arrays.stream(paths).flatMap(Collection::stream).map(Path::getParent).collect(Collectors.toSet()); } - private static NodeEnvironment.NodePath createNodePath(Path path) { - try { - return new NodeEnvironment.NodePath(path); - } catch (IOException e) { - throw new ElasticsearchException("Unable to investigate path: " + path + ": " + e.getMessage()); - } - } - //package-private for testing OptionParser getParser() { return parser; diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java new file mode 100644 index 0000000000000..a46e185a25351 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import joptsimple.OptionParser; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; + +public class OverrideNodeVersionCommand extends ElasticsearchNodeCommand { + private static final Logger logger = LogManager.getLogger(OverrideNodeVersionCommand.class); + + private static final String TOO_NEW_MESSAGE = + DELIMITER + + "\n" + + "This data path was last written by Elasticsearch version [V_NEW] and may no\n" + + "longer be compatible with Elasticsearch version [V_CUR]. This tool will bypass\n" + + "this compatibility check, allowing a version [V_CUR] node to start on this data\n" + + "path, but a version [V_CUR] node may not be able to read this data or may read\n" + + "it incorrectly leading to data loss.\n" + + "\n" + + "You should not use this tool. Instead, continue to use a version [V_NEW] node\n" + + "on this data path. If necessary, you can use reindex-from-remote to copy the\n" + + "data from here into an older cluster.\n" + + "\n" + + "Do you want to proceed?\n"; + + private static final String TOO_OLD_MESSAGE = + DELIMITER + + "\n" + + "This data path was last written by Elasticsearch version [V_OLD] which may be\n" + + "too old to be readable by Elasticsearch version [V_CUR]. This tool will bypass\n" + + "this compatibility check, allowing a version [V_CUR] node to start on this data\n" + + "path, but this version [V_CUR] node may not be able to read this data or may\n" + + "read it incorrectly leading to data loss.\n" + + "\n" + + "You should not use this tool. Instead, upgrade this data path from [V_OLD] to\n" + + "[V_CUR] using one or more intermediate versions of Elasticsearch.\n" + + "\n" + + "Do you want to proceed?\n"; + + static final String NO_METADATA_MESSAGE = "no node metadata found, so there is no version to override"; + static final String SUCCESS_MESSAGE = "Successfully overwrote this node's metadata to bypass its version compatibility checks."; + + public OverrideNodeVersionCommand() { + super("Overwrite the version stored in this node's data path with [" + Version.CURRENT + + "] to bypass the version compatibility checks"); + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + final Path[] nodePaths = Arrays.stream(toNodePaths(dataPaths)).map(p -> p.path).toArray(Path[]::new); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodePaths); + if (nodeMetaData == null) { + throw new ElasticsearchException(NO_METADATA_MESSAGE); + } + + try { + nodeMetaData.upgradeToCurrentVersion(); + throw new ElasticsearchException("found [" + nodeMetaData + "] which is compatible with current version [" + Version.CURRENT + + "], so there is no need to override the version checks"); + } catch (IllegalStateException e) { + // ok, means the version change is not supported + } + + confirm(terminal, (nodeMetaData.nodeVersion().before(Version.CURRENT) ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE) + .replace("V_OLD", nodeMetaData.nodeVersion().toString()) + .replace("V_NEW", nodeMetaData.nodeVersion().toString()) + .replace("V_CUR", Version.CURRENT.toString())); + + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeMetaData.nodeId(), Version.CURRENT), nodePaths); + + terminal.println(SUCCESS_MESSAGE); + } + + //package-private for testing + OptionParser getParser() { + return parser; + } +} diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index 7e4172961ea1e..d8b96550ad01a 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -93,7 +93,7 @@ public abstract AllocateUnassignedDecision makeAllocationDecision(ShardRouting u * Builds decisions for all nodes in the cluster, so that the explain API can provide information on * allocation decisions for each node, while still waiting to allocate the shard (e.g. due to fetching shard data). */ - protected List buildDecisionsForAllNodes(ShardRouting shard, RoutingAllocation allocation) { + protected static List buildDecisionsForAllNodes(ShardRouting shard, RoutingAllocation allocation) { List results = new ArrayList<>(); for (RoutingNode node : allocation.routingNodes()) { Decision decision = allocation.deciders().canAllocate(shard, node, allocation); diff --git a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 9a1c79b476e1b..fefd807d8d8a5 100644 --- a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexGraveyard; @@ -162,14 +163,14 @@ private void allocateDanglingIndices() { } try { allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())), - new LocalAllocateDangledIndices.Listener() { + new ActionListener<>() { @Override public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { logger.info("failed to send allocated dangled", e); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index dea544d40d55b..c59d52c60be7a 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -28,9 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import java.util.Arrays; import java.util.function.Function; @@ -43,12 +41,7 @@ public class Gateway { private final TransportNodesListGatewayMetaState listGatewayMetaState; - private final IndicesService indicesService; - - public Gateway(final Settings settings, final ClusterService clusterService, - final TransportNodesListGatewayMetaState listGatewayMetaState, - final IndicesService indicesService) { - this.indicesService = indicesService; + public Gateway(final ClusterService clusterService, final TransportNodesListGatewayMetaState listGatewayMetaState) { this.clusterService = clusterService; this.listGatewayMetaState = listGatewayMetaState; } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayException.java b/server/src/main/java/org/elasticsearch/gateway/GatewayException.java index 32050f1c10e7d..380610a593675 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayException.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayException.java @@ -26,14 +26,6 @@ public class GatewayException extends ElasticsearchException { - public GatewayException(String msg) { - super(msg); - } - - public GatewayException(String msg, Throwable cause) { - super(msg, cause); - } - public GatewayException(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 30361fa70ee6b..91bcb68370ea1 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -44,9 +44,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.transport.TransportService; @@ -76,11 +74,9 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState.PersistedState { protected static final Logger logger = LogManager.getLogger(GatewayMetaState.class); - private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; private final Settings settings; private final ClusterService clusterService; - private final IndicesService indicesService; private final TransportService transportService; //there is a single thread executing updateClusterState calls, hence no volatile modifier @@ -88,16 +84,13 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. protected ClusterState previousClusterState; protected boolean incrementalWrite; - public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, + public GatewayMetaState(Settings settings, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader, - TransportService transportService, ClusterService clusterService, - IndicesService indicesService) throws IOException { + TransportService transportService, ClusterService clusterService) throws IOException { this.settings = settings; - this.nodeEnv = nodeEnv; this.metaStateService = metaStateService; this.transportService = transportService; this.clusterService = clusterService; - this.indicesService = indicesService; upgradeMetaData(metaDataIndexUpgradeService, metaDataUpgrader); initializeClusterState(ClusterName.CLUSTER_NAME_SETTING.get(settings)); @@ -184,7 +177,7 @@ protected void upgradeMetaData(MetaDataIndexUpgradeService metaDataIndexUpgradeS } } - protected boolean isMasterOrDataNode() { + private boolean isMasterOrDataNode() { return DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings); } @@ -312,13 +305,12 @@ long writeIndex(String reason, IndexMetaData metaData) throws WriteStateExceptio } } - long writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { + void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { assert finished == false : FINISHED_MSG; try { - long generation = metaStateService.writeManifestAndCleanup(reason, manifest); + metaStateService.writeManifestAndCleanup(reason, manifest); commitCleanupActions.forEach(Runnable::run); finished = true; - return generation; } catch (WriteStateException e) { // if Manifest write results in dirty WriteStateException it's not safe to remove // new metadata files, because if Manifest was actually written to disk and its deletion @@ -346,7 +338,7 @@ void rollback() { * * @throws WriteStateException if exception occurs. See also {@link WriteStateException#isDirty()}. */ - protected void updateClusterState(ClusterState newState, ClusterState previousState) + private void updateClusterState(ClusterState newState, ClusterState previousState) throws WriteStateException { MetaData newMetaData = newState.metaData(); @@ -406,7 +398,7 @@ public static Set getRelevantIndices(ClusterState state, ClusterState pre } private static boolean isDataOnlyNode(ClusterState state) { - return ((state.nodes().getLocalNode().isMasterNode() == false) && state.nodes().getLocalNode().isDataNode()); + return state.nodes().getLocalNode().isMasterNode() == false && state.nodes().getLocalNode().isDataNode(); } /** @@ -535,8 +527,7 @@ private static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, C } private static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { - Set relevantIndices; - relevantIndices = new HashSet<>(); + Set relevantIndices = new HashSet<>(); // we have to iterate over the metadata to make sure we also capture closed indices for (IndexMetaData indexMetaData : state.metaData()) { relevantIndices.add(indexMetaData.getIndex()); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 3cc8ec167552c..b7b7d0759980e 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -93,7 +92,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste public GatewayService(final Settings settings, final AllocationService allocationService, final ClusterService clusterService, final ThreadPool threadPool, final TransportNodesListGatewayMetaState listGatewayMetaState, - final IndicesService indicesService, final Discovery discovery) { + final Discovery discovery) { this.allocationService = allocationService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -122,7 +121,7 @@ public GatewayService(final Settings settings, final AllocationService allocatio recoveryRunnable = () -> clusterService.submitStateUpdateTask("local-gateway-elected-state", new RecoverStateUpdateTask()); } else { - final Gateway gateway = new Gateway(settings, clusterService, listGatewayMetaState, indicesService); + final Gateway gateway = new Gateway(clusterService, listGatewayMetaState); recoveryRunnable = () -> gateway.performStateRecovery(new GatewayRecoveryListener()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index a5f4f77da438b..48117eed2d56d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -76,7 +77,7 @@ public LocalAllocateDangledIndices(TransportService transportService, ClusterSer new AllocateDangledRequestHandler()); } - public void allocateDangled(Collection indices, final Listener listener) { + public void allocateDangled(Collection indices, ActionListener listener) { ClusterState clusterState = clusterService.state(); DiscoveryNode masterNode = clusterState.nodes().getMasterNode(); if (masterNode == null) { @@ -88,9 +89,7 @@ public void allocateDangled(Collection indices, final Listener li transportService.sendRequest(masterNode, ACTION_NAME, request, new TransportResponseHandler() { @Override public AllocateDangledResponse read(StreamInput in) throws IOException { - final AllocateDangledResponse response = new AllocateDangledResponse(); - response.readFrom(in); - return response; + return new AllocateDangledResponse(in); } @Override @@ -110,12 +109,6 @@ public String executor() { }); } - public interface Listener { - void onResponse(AllocateDangledResponse response); - - void onFailure(Throwable e); - } - class AllocateDangledRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel, Task task) throws Exception { @@ -203,7 +196,7 @@ public void onFailure(String source, Exception e) { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { try { - channel.sendResponse(new AllocateDangledResponse(true)); + channel.sendResponse(new AllocateDangledResponse()); } catch (IOException e) { logger.warn("failed send response for allocating dangled", e); } @@ -248,29 +241,21 @@ public void writeTo(StreamOutput out) throws IOException { public static class AllocateDangledResponse extends TransportResponse { - private boolean ack; - - AllocateDangledResponse() { - } - - AllocateDangledResponse(boolean ack) { - this.ack = ack; - } - - public boolean ack() { - return ack; + private AllocateDangledResponse(StreamInput in) throws IOException { + if (in.getVersion().before(Version.V_8_0_0)) { + in.readBoolean(); + } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - ack = in.readBoolean(); + private AllocateDangledResponse() { } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeBoolean(ack); + if (out.getVersion().before(Version.V_8_0_0)) { + out.writeBoolean(true); + } } } } diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 3f28fead29439..d5dbfe828665f 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -382,7 +382,7 @@ private List findStateFilesByGeneration(final long generation, Path... loc return files; } - private String getStateFileName(long generation) { + public String getStateFileName(long generation) { return prefix + generation + STATE_FILE_EXTENSION; } @@ -466,7 +466,7 @@ public static void deleteMetaState(Path... dataLocations) throws IOException { IOUtils.rm(stateDirectories); } - String getPrefix() { + public String getPrefix() { return prefix; } } diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index d67cdccb9a09b..3bd8ba11a57ec 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -199,12 +199,11 @@ MetaData loadGlobalState() throws IOException { * * @throws WriteStateException if exception when writing state occurs. See also {@link WriteStateException#isDirty()} */ - public long writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { + public void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { logger.trace("[_meta] writing state, reason [{}]", reason); try { long generation = MANIFEST_FORMAT.writeAndCleanup(manifest, nodeEnv.nodeDataPaths()); logger.trace("[_meta] state written (generation: {})", generation); - return generation; } catch (WriteStateException ex) { throw new WriteStateException(ex.isDirty(), "[_meta]: failed to write meta state", ex); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 79030336acc02..d2e82d092e603 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -297,10 +297,10 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool /** * Split the list of node shard states into groups yes/no/throttle based on allocation deciders */ - private NodesToAllocate buildNodesToAllocate(RoutingAllocation allocation, - List nodeShardStates, - ShardRouting shardRouting, - boolean forceAllocate) { + private static NodesToAllocate buildNodesToAllocate(RoutingAllocation allocation, + List nodeShardStates, + ShardRouting shardRouting, + boolean forceAllocate) { List yesNodeShards = new ArrayList<>(); List throttledNodeShards = new ArrayList<>(); List noNodeShards = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java b/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java index 1d24baf561ab3..60bdc2434e972 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java @@ -56,11 +56,11 @@ public final int compare(ShardRouting o1, ShardRouting o2) { return cmp; } - private int priority(Settings settings) { + private static int priority(Settings settings) { return IndexMetaData.INDEX_PRIORITY_SETTING.get(settings); } - private long timeCreated(Settings settings) { + private static long timeCreated(Settings settings) { return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L); } diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 10bd6115b4c74..ce3cde3e6db71 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -243,8 +243,8 @@ public AllocateUnassignedDecision makeAllocationDecision(final ShardRouting unas * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element * in the returned tuple. */ - private Tuple> canBeAllocatedToAtLeastOneNode(ShardRouting shard, - RoutingAllocation allocation) { + private static Tuple> canBeAllocatedToAtLeastOneNode(ShardRouting shard, + RoutingAllocation allocation) { Decision madeDecision = Decision.NO; final boolean explain = allocation.debugDecision(); Map nodeDecisions = explain ? new HashMap<>() : null; @@ -260,7 +260,7 @@ private Tuple> canBeAllocatedToAtLea if (explain) { madeDecision = decision; } else { - return Tuple.tuple(decision, nodeDecisions); + return Tuple.tuple(decision, null); } } else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) { madeDecision = decision; @@ -276,8 +276,8 @@ private Tuple> canBeAllocatedToAtLea * Takes the store info for nodes that have a shard store and adds them to the node decisions, * leaving the node explanations untouched for those nodes that do not have any store information. */ - private List augmentExplanationsWithStoreInfo(Map nodeDecisions, - Map withShardStores) { + private static List augmentExplanationsWithStoreInfo(Map nodeDecisions, + Map withShardStores) { if (nodeDecisions == null || withShardStores == null) { return null; } @@ -295,8 +295,8 @@ private List augmentExplanationsWithStoreInfo(Map data) { + private static TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, + AsyncShardFetch.FetchResult data) { assert shard.currentNodeId() != null; DiscoveryNode primaryNode = allocation.nodes().get(shard.currentNodeId()); if (primaryNode == null) { diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 477961c8a6d0c..ab0fad88ecdfa 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -94,23 +94,10 @@ public Request() { public Request(String... nodesIds) { super(nodesIds); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } public static class NodesGatewayMetaState extends BaseNodesResponse { - NodesGatewayMetaState() { - } - public NodesGatewayMetaState(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @@ -135,15 +122,6 @@ public NodeRequest() { super(nodeId); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } public static class NodeGatewayMetaState extends BaseNodeResponse { diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index c9e7100ebd66e..1893be3acd518 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -50,6 +50,7 @@ import java.io.IOException; import java.util.List; +import java.util.Objects; /** * This transport action is used to fetch the shard version from each node during primary allocation in {@link GatewayAllocator}. @@ -309,14 +310,8 @@ public boolean equals(Object o) { NodeGatewayStartedShards that = (NodeGatewayStartedShards) o; - if (primary != that.primary) { - return false; - } - if (allocationId != null ? !allocationId.equals(that.allocationId) : that.allocationId != null) { - return false; - } - return storeException != null ? storeException.equals(that.storeException) : that.storeException == null; - + return primary == that.primary && Objects.equals(allocationId, that.allocationId) + && Objects.equals(storeException, that.storeException); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 091de68c514d0..b1812c40e03eb 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -251,7 +251,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin } if (hasNested) { - warmUp.add(Queries.newNonNestedFilter(indexSettings.getIndexVersionCreated())); + warmUp.add(Queries.newNonNestedFilter()); } final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 32c44fd5f55a0..12e53a5f9d4b9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; -import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; @@ -202,10 +201,7 @@ public void preParse(ParseContext context) { } @Override - public void postParse(ParseContext context) throws IOException { - if (context.indexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) { - super.parse(context); - } + public void postParse(ParseContext context) { } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index d98630e5f765e..5de5394a94abe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -136,10 +136,7 @@ public Supplier queryShardContextSupplier() { protected Function similarityLookupService() { return similarityLookupService; } public ParserContext createMultiFieldContext(ParserContext in) { - return new MultiFieldParserContext(in) { - @Override - public boolean isWithinMultiField() { return true; } - }; + return new MultiFieldParserContext(in); } static class MultiFieldParserContext extends ParserContext { @@ -147,6 +144,9 @@ static class MultiFieldParserContext extends ParserContext { super(in.type(), in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.queryShardContextSupplier()); } + + @Override + public boolean isWithinMultiField() { return true; } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index 8c032402b5090..7eb8d180547c1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -255,15 +254,9 @@ public void postParse(ParseContext context) throws IOException { // we share the parent docs fields to ensure good compression SequenceIDFields seqID = context.seqID(); assert seqID != null; - final Version versionCreated = context.mapperService().getIndexSettings().getIndexVersionCreated(); - final boolean includePrimaryTerm = versionCreated.before(Version.V_6_1_0); for (Document doc : context.nonRootDocuments()) { doc.add(seqID.seqNo); doc.add(seqID.seqNoDocValue); - if (includePrimaryTerm) { - // primary terms are used to distinguish between parent and nested docs since 6.1.0 - doc.add(seqID.primaryTerm); - } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index c4d9ef966ca3d..cb17f182ef77a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -140,7 +140,7 @@ public Query termsQuery(List values, QueryShardContext context) { .anyMatch(indexType::equals)) { if (context.getMapperService().hasNested()) { // type filters are expected not to match nested docs - return Queries.newNonNestedFilter(context.indexVersionCreated()); + return Queries.newNonNestedFilter(); } else { return new MatchAllDocsQuery(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index 77d7be62fc1b9..9848a23cac11b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -19,8 +19,10 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.AnalysisMode; @@ -37,6 +39,7 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; public class TypeParsers { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TypeParsers.class)); public static final String DOC_VALUES = "doc_values"; public static final String INDEX_OPTIONS_DOCS = "docs"; @@ -214,11 +217,18 @@ public static void parseField(FieldMapper.Builder builder, String name, Map multiFieldsPropNodes; + parserContext = parserContext.createMultiFieldContext(parserContext); + final Map multiFieldsPropNodes; if (propNode instanceof List && ((List) propNode).isEmpty()) { multiFieldsPropNodes = Collections.emptyMap(); } else if (propNode instanceof Map) { diff --git a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java index 7a2373e5ad8b5..c5894a3e1c018 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java @@ -19,14 +19,11 @@ package org.elasticsearch.index.query; -import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -148,10 +145,6 @@ public static Query newFilter(QueryShardContext context, String fieldPattern) { fields = context.simpleMatchToIndexNames(fieldPattern); } - if (context.indexVersionCreated().before(Version.V_6_1_0)) { - return newLegacyExistsQuery(context, fields); - } - if (fields.size() == 1) { String field = fields.iterator().next(); return newFieldExistsQuery(context, field); @@ -164,28 +157,6 @@ public static Query newFilter(QueryShardContext context, String fieldPattern) { return new ConstantScoreQuery(boolFilterBuilder.build()); } - private static Query newLegacyExistsQuery(QueryShardContext context, Collection fields) { - // We create TermsQuery directly here rather than using FieldNamesFieldType.termsQuery() - // so we don't end up with deprecation warnings - if (fields.size() == 1) { - Query filter = newLegacyExistsQuery(context, fields.iterator().next()); - return new ConstantScoreQuery(filter); - } - - BooleanQuery.Builder boolFilterBuilder = new BooleanQuery.Builder(); - for (String field : fields) { - Query filter = newLegacyExistsQuery(context, field); - boolFilterBuilder.add(filter, BooleanClause.Occur.SHOULD); - } - return new ConstantScoreQuery(boolFilterBuilder.build()); - } - - private static Query newLegacyExistsQuery(QueryShardContext context, String field) { - MappedFieldType fieldType = context.fieldMapper(field); - String fieldName = fieldType != null ? fieldType.name() : field; - return new TermQuery(new Term(FieldNamesFieldMapper.NAME, fieldName)); - } - private static Query newFieldExistsQuery(QueryShardContext context, String field) { MappedFieldType fieldType = context.getMapperService().fullName(field); if (fieldType == null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index ee8062308ac11..fecf5c8407e98 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -281,7 +281,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { Query innerQuery; ObjectMapper objectMapper = context.nestedScope().getObjectMapper(); if (objectMapper == null) { - parentFilter = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())); + parentFilter = context.bitsetFilter(Queries.newNonNestedFilter()); } else { parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter()); } @@ -388,7 +388,7 @@ public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { SearchHit hit = hits[i]; Query rawParentFilter; if (parentObjectMapper == null) { - rawParentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()); + rawParentFilter = Queries.newNonNestedFilter(); } else { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 437e7934088e7..892056674019f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.ObjectLongMap; - import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -181,6 +180,18 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L */ private RetentionLeases retentionLeases = RetentionLeases.EMPTY; + /** + * The primary term of the most-recently persisted retention leases. This is used to check if we need to persist the current retention + * leases. + */ + private long persistedRetentionLeasesPrimaryTerm; + + /** + * The version of the most-recently persisted retention leases. This is used to check if we need to persist the current retention + * leases. + */ + private long persistedRetentionLeasesVersion; + /** * Get all retention leases tracked on this shard. * @@ -343,7 +354,8 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { private final Object retentionLeasePersistenceLock = new Object(); /** - * Persists the current retention leases to their dedicated state file. + * Persists the current retention leases to their dedicated state file. If this version of the retention leases are already persisted + * then persistence is skipped. * * @param path the path to the directory containing the state file * @throws WriteStateException if an exception occurs writing the state file @@ -352,10 +364,16 @@ public void persistRetentionLeases(final Path path) throws WriteStateException { synchronized (retentionLeasePersistenceLock) { final RetentionLeases currentRetentionLeases; synchronized (this) { + if (retentionLeases.supersedes(persistedRetentionLeasesPrimaryTerm, persistedRetentionLeasesVersion) == false) { + logger.trace("skipping persisting retention leases [{}], already persisted", retentionLeases); + return; + } currentRetentionLeases = retentionLeases; } logger.trace("persisting retention leases [{}]", currentRetentionLeases); RetentionLeases.FORMAT.writeAndCleanup(currentRetentionLeases, path); + persistedRetentionLeasesPrimaryTerm = currentRetentionLeases.primaryTerm(); + persistedRetentionLeasesVersion = currentRetentionLeases.version(); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index c69a4c6fab042..74c98bf3dca19 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -28,8 +28,6 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -45,7 +43,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; import java.util.Objects; import java.util.function.Supplier; @@ -88,14 +85,10 @@ abstract static class TransportRetentionLeaseAction> extend @Override protected ShardsIterator shards(final ClusterState state, final InternalRequest request) { - final IndexShardRoutingTable shardRoutingTable = state + return state .routingTable() - .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()); - if (shardRoutingTable.primaryShard().active()) { - return shardRoutingTable.primaryShardIt(); - } else { - return new PlainShardIterator(request.request().getShardId(), Collections.emptyList()); - } + .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()) + .primaryShardIt(); } @Override @@ -174,6 +167,7 @@ void doRetentionLeaseAction(final IndexShard indexShard, final AddRequest reques protected Writeable.Reader getResponseReader() { return Response::new; } + } @Override @@ -400,9 +394,10 @@ public static class Response extends ActionResponse { public Response() { } - Response(StreamInput in) throws IOException { + Response(final StreamInput in) throws IOException { super(in); } + } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java index 7c3b9e3c7b9c9..81fd7e2fce047 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java @@ -70,13 +70,27 @@ public long version() { /** * Checks if this retention leases collection supersedes the specified retention leases collection. A retention leases collection - * supersedes another retention leases collection if its primary term is higher, or if for equal primary terms its version is higher + * supersedes another retention leases collection if its primary term is higher, or if for equal primary terms its version is higher. * * @param that the retention leases collection to test against * @return true if this retention leases collection supercedes the specified retention lease collection, otherwise false */ - public boolean supersedes(final RetentionLeases that) { - return primaryTerm > that.primaryTerm || primaryTerm == that.primaryTerm && version > that.version; + boolean supersedes(final RetentionLeases that) { + return supersedes(that.primaryTerm, that.version); + } + + /** + * Checks if this retention leases collection would supersede a retention leases collection with the specified primary term and version. + * A retention leases collection supersedes another retention leases collection if its primary term is higher, or if for equal primary + * terms its version is higher. + * + * @param primaryTerm the primary term + * @param version the version + * @return true if this retention leases collection would supercedes a retention lease collection with the specified primary term and + * version + */ + boolean supersedes(final long primaryTerm, final long version) { + return this.primaryTerm > primaryTerm || this.primaryTerm == primaryTerm && this.version > version; } private final Map leases; @@ -203,7 +217,7 @@ public static RetentionLeases fromXContent(final XContentParser parser) { return PARSER.apply(parser, null); } - static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("retention-leases-") { + static final MetaDataStateFormat FORMAT = new MetaDataStateFormat<>("retention-leases-") { @Override public void toXContent(final XContentBuilder builder, final RetentionLeases retentionLeases) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 11e4fb81d9fbe..da5ee8f8363ff 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.shard; import com.carrotsearch.hppc.ObjectLongMap; - import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; @@ -2496,7 +2495,7 @@ public void acquirePrimaryOperationPermit(ActionListener onPermitAcq verifyNotClosed(); assert shardRouting.primary() : "acquirePrimaryOperationPermit should only be called on primary shard: " + shardRouting; - indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo); + indexShardOperationPermits.acquire(wrapPrimaryOperationPermitListener(onPermitAcquired), executorOnDelay, false, debugInfo); } /** @@ -2507,7 +2506,27 @@ public void acquireAllPrimaryOperationsPermits(final ActionListener verifyNotClosed(); assert shardRouting.primary() : "acquireAllPrimaryOperationsPermits should only be called on primary shard: " + shardRouting; - asyncBlockOperations(onPermitAcquired, timeout.duration(), timeout.timeUnit()); + asyncBlockOperations(wrapPrimaryOperationPermitListener(onPermitAcquired), timeout.duration(), timeout.timeUnit()); + } + + /** + * Wraps the action to run on a primary after acquiring permit. This wrapping is used to check if the shard is in primary mode before + * executing the action. + * + * @param listener the listener to wrap + * @return the wrapped listener + */ + private ActionListener wrapPrimaryOperationPermitListener(final ActionListener listener) { + return ActionListener.delegateFailure( + listener, + (l, r) -> { + if (replicationTracker.isPrimaryMode()) { + l.onResponse(r); + } else { + r.close(); + l.onFailure(new ShardNotInPrimaryModeException(shardId, state)); + } + }); } private void asyncBlockOperations(ActionListener onPermitAcquired, long timeout, TimeUnit timeUnit) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java similarity index 52% rename from server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java rename to server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java index 9566e1afa6df0..8bc23dcdd00f7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java @@ -17,25 +17,20 @@ * under the License. */ -package org.elasticsearch.index.mapper; +package org.elasticsearch.index.shard; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.common.io.stream.StreamInput; -public class LegacyTypeFieldMapperTests extends ESSingleNodeTestCase { +import java.io.IOException; - @Override - protected boolean forbidPrivateIndexSettings() { - return false; +public class ShardNotInPrimaryModeException extends IllegalIndexShardStateException { + + public ShardNotInPrimaryModeException(final ShardId shardId, final IndexShardState currentState) { + super(shardId, currentState, "shard is not in primary mode"); } - public void testDocValuesMultipleTypes() throws Exception { - TypeFieldMapperTests.testDocValues(index -> { - final Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0).build(); - return this.createIndex(index, settings); - }); + public ShardNotInPrimaryModeException(final StreamInput in) throws IOException { + super(in); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index dae910e5fe3c4..4082293d9f2eb 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -41,7 +41,6 @@ import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.common.lucene.search.Queries; @@ -67,7 +66,7 @@ final class ShardSplittingQuery extends Query { ShardSplittingQuery(IndexMetaData indexMetaData, int shardId, boolean hasNested) { this.indexMetaData = indexMetaData; this.shardId = shardId; - this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null; + this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer() : null; } @Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) { @@ -339,9 +338,9 @@ public float matchCost() { * than once. There is no point in using BitsetFilterCache#BitSetProducerWarmer since we use this only as a delete by query which is * executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either. */ - private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) { + private static BitSetProducer newParentDocBitSetProducer() { return context -> { - Query query = Queries.newNonNestedFilter(indexVersionCreated); + Query query = Queries.newNonNestedFilter(); final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 65d2f8d7812f8..5f1f7d23a8c6a 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1550,23 +1550,8 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long final IndexCommit lastIndexCommitCommit = existingCommits.get(existingCommits.size() - 1); final String translogUUID = lastIndexCommitCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY); final IndexCommit startingIndexCommit; - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog - // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog are fully retained. - if (indexVersionCreated.before(org.elasticsearch.Version.V_6_2_0)) { - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); - } - } - assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); if (translogUUID.equals(startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY)) == false) { throw new IllegalStateException("starting commit translog uuid [" diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 921a8f9cc7c47..6291531b7f907 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -594,10 +594,6 @@ static final class PreSyncedFlushResponse extends TransportResponse { this.existingSyncId = existingSyncId; } - boolean includeNumDocs(Version version) { - return version.onOrAfter(Version.V_6_2_2); - } - boolean includeExistingSyncId(Version version) { return version.onOrAfter(Version.V_6_3_0); } @@ -606,11 +602,7 @@ boolean includeExistingSyncId(Version version) { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); commitId = new Engine.CommitId(in); - if (includeNumDocs(in.getVersion())) { - numDocs = in.readInt(); - } else { - numDocs = UNKNOWN_NUM_DOCS; - } + numDocs = in.readInt(); if (includeExistingSyncId(in.getVersion())) { existingSyncId = in.readOptionalString(); } @@ -620,9 +612,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); commitId.writeTo(out); - if (includeNumDocs(out.getVersion())) { - out.writeInt(numDocs); - } + out.writeInt(numDocs); if (includeExistingSyncId(out.getVersion())) { out.writeOptionalString(existingSyncId); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 4b77c06447836..782101763b7a4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.InternalClusterInfoService; @@ -271,8 +272,9 @@ protected Node( nodeEnvironment = new NodeEnvironment(tmpSettings, environment); resourcesToClose.add(nodeEnvironment); - logger.info("node name [{}], node ID [{}]", - NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId()); + logger.info("node name [{}], node ID [{}], cluster name [{}]", + NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId(), + ClusterName.CLUSTER_NAME_SETTING.get(tmpSettings).value()); final JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( @@ -470,8 +472,8 @@ protected Node( ).collect(Collectors.toSet()); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders); - final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, nodeEnvironment, metaStateService, - metaDataIndexUpgradeService, metaDataUpgrader, transportService, clusterService, indicesService); + final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, metaStateService, + metaDataIndexUpgradeService, metaDataUpgrader, transportService, clusterService); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService(transportService, SearchExecutionStatsCollector.makeWrapper(responseCollectorService)); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java deleted file mode 100644 index 044caee41c55d..0000000000000 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.repositories.blobstore; - -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.snapshots.SnapshotInfo; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Base class that handles serialization of various data structures during snapshot/restore operations. - */ -public abstract class BlobStoreFormat { - - protected final String blobNameFormat; - - protected final CheckedFunction reader; - - protected final NamedXContentRegistry namedXContentRegistry; - - // Serialization parameters to specify correct context for metadata serialization - protected static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; - - static { - Map snapshotOnlyParams = new HashMap<>(); - // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot - // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT - snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); - // serialize SnapshotInfo using the SNAPSHOT mode - snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); - SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); - } - - /** - * @param blobNameFormat format of the blobname in {@link String#format(Locale, String, Object...)} format - * @param reader the prototype object that can deserialize objects with type T - */ - protected BlobStoreFormat(String blobNameFormat, CheckedFunction reader, - NamedXContentRegistry namedXContentRegistry) { - this.reader = reader; - this.blobNameFormat = blobNameFormat; - this.namedXContentRegistry = namedXContentRegistry; - } - - /** - * Reads and parses the blob with given blob name. - * - * @param blobContainer blob container - * @param blobName blob name - * @return parsed blob object - */ - public abstract T readBlob(BlobContainer blobContainer, String blobName) throws IOException; - - /** - * Reads and parses the blob with given name, applying name translation using the {link #blobName} method - * - * @param blobContainer blob container - * @param name name to be translated into - * @return parsed blob object - */ - public T read(BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - return readBlob(blobContainer, blobName); - } - - /** - * Deletes obj in the blob container - */ - public void delete(BlobContainer blobContainer, String name) throws IOException { - blobContainer.deleteBlob(blobName(name)); - } - - public String blobName(String name) { - return String.format(Locale.ROOT, blobNameFormat, name); - } - - protected T read(BytesReference bytes) throws IOException { - try (XContentParser parser = XContentHelper - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes)) { - return reader.apply(parser); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index d4d009b8ad63e..d216fe3234e83 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.OutputStreamIndexOutput; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.blobstore.BlobContainer; @@ -33,24 +34,43 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.gateway.CorruptStateException; +import org.elasticsearch.snapshots.SnapshotInfo; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; /** * Snapshot metadata file format used in v2.0 and above */ -public class ChecksumBlobStoreFormat extends BlobStoreFormat { +public final class ChecksumBlobStoreFormat { + + // Serialization parameters to specify correct context for metadata serialization + private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + + static { + Map snapshotOnlyParams = new HashMap<>(); + // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot + // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT + snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); + // serialize SnapshotInfo using the SNAPSHOT mode + snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); + SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); + } private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; @@ -59,12 +79,18 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm private static final int BUFFER_SIZE = 4096; - protected final XContentType xContentType; + private final XContentType xContentType; - protected final boolean compress; + private final boolean compress; private final String codec; + private final String blobNameFormat; + + private final CheckedFunction reader; + + private final NamedXContentRegistry namedXContentRegistry; + /** * @param codec codec name * @param blobNameFormat format of the blobname in {@link String#format} format @@ -74,7 +100,9 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm */ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunction reader, NamedXContentRegistry namedXContentRegistry, boolean compress, XContentType xContentType) { - super(blobNameFormat, reader, namedXContentRegistry); + this.reader = reader; + this.blobNameFormat = blobNameFormat; + this.namedXContentRegistry = namedXContentRegistry; this.xContentType = xContentType; this.compress = compress; this.codec = codec; @@ -91,6 +119,29 @@ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunct this(codec, blobNameFormat, reader, namedXContentRegistry, compress, DEFAULT_X_CONTENT_TYPE); } + /** + * Reads and parses the blob with given name, applying name translation using the {link #blobName} method + * + * @param blobContainer blob container + * @param name name to be translated into + * @return parsed blob object + */ + public T read(BlobContainer blobContainer, String name) throws IOException { + String blobName = blobName(name); + return readBlob(blobContainer, blobName); + } + + /** + * Deletes obj in the blob container + */ + public void delete(BlobContainer blobContainer, String name) throws IOException { + blobContainer.deleteBlob(blobName(name)); + } + + public String blobName(String name) { + return String.format(Locale.ROOT, blobNameFormat, name); + } + /** * Reads blob with specified name without resolving the blobName using using {@link #blobName} method. * @@ -108,8 +159,10 @@ public T readBlob(BlobContainer blobContainer, String blobName) throws IOExcepti CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION); long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; - BytesReference bytesReference = new BytesArray(bytes, (int) filePointer, (int) contentSize); - return read(bytesReference); + try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, + new BytesArray(bytes, (int) filePointer, (int) contentSize))) { + return reader.apply(parser); + } } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); @@ -156,7 +209,17 @@ public void write(T obj, BlobContainer blobContainer, String name) throws IOExce } private void writeTo(final T obj, final String blobName, final CheckedConsumer consumer) throws IOException { - final BytesReference bytes = write(obj); + final BytesReference bytes; + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + if (compress) { + try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { + write(obj, compressedStreamOutput); + } + } else { + write(obj, bytesStreamOutput); + } + bytes = bytesStreamOutput.bytes(); + } try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) { @@ -176,20 +239,7 @@ public void close() { } } - protected BytesReference write(T obj) throws IOException { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - if (compress) { - try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { - write(obj, compressedStreamOutput); - } - } else { - write(obj, bytesStreamOutput); - } - return bytesStreamOutput.bytes(); - } - } - - protected void write(T obj, StreamOutput streamOutput) throws IOException { + private void write(T obj, StreamOutput streamOutput) throws IOException { try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput)) { builder.startObject(); obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index d9a7fdb831efd..f0eaa0d51dadf 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -278,7 +278,7 @@ public Query buildFilteredQuery(Query query) { && typeFilter == null // when a _type filter is set, it will automatically exclude nested docs && new NestedHelper(mapperService()).mightMatchNestedDocs(query) && (aliasFilter == null || new NestedHelper(mapperService()).mightMatchNestedDocs(aliasFilter))) { - filters.add(Queries.newNonNestedFilter(mapperService().getIndexSettings().getIndexVersionCreated())); + filters.add(Queries.newNonNestedFilter()); } if (aliasFilter != null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 8cf3138212f7f..b703493b4d505 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; @@ -39,7 +40,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.core.internal.io.IOUtils; @@ -302,21 +302,7 @@ protected void doClose() { } public void executeDfsPhase(ShardSearchRequest request, SearchTask task, ActionListener listener) { - rewriteShardRequest(request, new ActionListener() { - @Override - public void onResponse(ShardSearchRequest request) { - try { - listener.onResponse(executeDfsPhase(request, task)); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + rewriteShardRequest(request, ActionListener.map(listener, r -> executeDfsPhase(r, task))); } private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchTask task) throws IOException { @@ -351,30 +337,11 @@ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final Sea } public void executeQueryPhase(ShardSearchRequest request, SearchTask task, ActionListener listener) { - rewriteShardRequest(request, new ActionListener() { - @Override - public void onResponse(ShardSearchRequest request) { - try { - listener.onResponse(executeQueryPhase(request, task)); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + rewriteShardRequest(request, ActionListener.map(listener, r -> executeQueryPhase(r, task))); } private void runAsync(long id, Supplier executable, ActionListener listener) { - getExecutor(id).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + getExecutor(id).execute(new ActionRunnable(listener) { @Override protected void doRun() { listener.onResponse(executable.get()); @@ -1058,12 +1025,7 @@ private void rewriteShardRequest(ShardSearchRequest request, ActionListener actionListener = ActionListener.wrap(r -> // now we need to check if there is a pending refresh and register shard.awaitShardSearchActive(b -> - executor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + executor.execute(new ActionRunnable(listener) { @Override protected void doRun() { listener.onResponse(request); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index 810126e851251..54dfc301b2dbc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -40,6 +40,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -47,7 +48,7 @@ import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; public class FiltersAggregationBuilder extends AbstractAggregationBuilder - implements MultiBucketAggregationBuilder { + implements MultiBucketAggregationBuilder { public static final String NAME = "filters"; private static final ParseField FILTERS_FIELD = new ParseField("filters"); @@ -74,7 +75,7 @@ private FiltersAggregationBuilder(String name, List filters, boolea this.filters = new ArrayList<>(filters); if (keyed) { // internally we want to have a fixed order of filters, regardless of the order of the filters in the request - Collections.sort(this.filters, (KeyedFilter kf1, KeyedFilter kf2) -> kf1.key().compareTo(kf2.key())); + this.filters.sort(Comparator.comparing(KeyedFilter::key)); this.keyed = true; } else { this.keyed = false; @@ -220,9 +221,9 @@ protected AggregationBuilder doRewrite(QueryRewriteContext queryShardContext) th @Override protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder) - throws IOException { + throws IOException { return new FiltersAggregatorFactory(name, filters, keyed, otherBucket, otherBucketKey, context, parent, - subFactoriesBuilder, metaData); + subFactoriesBuilder, metaData); } @Override @@ -248,15 +249,15 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param } public static FiltersAggregationBuilder parse(String aggregationName, XContentParser parser) - throws IOException { + throws IOException { - List keyedFilters = null; - List nonKeyedFilters = null; + List filters = new ArrayList<>(); - XContentParser.Token token = null; + XContentParser.Token token; String currentFieldName = null; String otherBucketKey = null; Boolean otherBucket = null; + boolean keyed = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -265,61 +266,61 @@ public static FiltersAggregationBuilder parse(String aggregationName, XContentPa otherBucket = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_STRING) { if (OTHER_BUCKET_KEY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { otherBucketKey = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { if (FILTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - keyedFilters = new ArrayList<>(); String key = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { key = parser.currentName(); } else { QueryBuilder filter = parseInnerQueryBuilder(parser); - keyedFilters.add(new FiltersAggregator.KeyedFilter(key, filter)); + filters.add(new FiltersAggregator.KeyedFilter(key, filter)); } } + keyed = true; } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { if (FILTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - nonKeyedFilters = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + List builders = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { QueryBuilder filter = parseInnerQueryBuilder(parser); - nonKeyedFilters.add(filter); + builders.add(filter); + } + for (int i = 0; i < builders.size(); i++) { + filters.add(new KeyedFilter(String.valueOf(i), builders.get(i))); } } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } + if (filters.isEmpty()) { + throw new IllegalArgumentException("[" + FILTERS_FIELD + "] cannot be empty."); + } + + FiltersAggregationBuilder factory = new FiltersAggregationBuilder(aggregationName, filters, keyed); + if (otherBucket == null && otherBucketKey != null) { // automatically enable the other bucket if a key is set, as per the doc otherBucket = true; } - - FiltersAggregationBuilder factory; - if (keyedFilters != null) { - factory = new FiltersAggregationBuilder(aggregationName, - keyedFilters.toArray(new FiltersAggregator.KeyedFilter[keyedFilters.size()])); - } else { - factory = new FiltersAggregationBuilder(aggregationName, - nonKeyedFilters.toArray(new QueryBuilder[nonKeyedFilters.size()])); - } if (otherBucket != null) { factory.otherBucket(otherBucket); } @@ -338,9 +339,9 @@ protected int doHashCode() { protected boolean doEquals(Object obj) { FiltersAggregationBuilder other = (FiltersAggregationBuilder) obj; return Objects.equals(filters, other.filters) - && Objects.equals(keyed, other.keyed) - && Objects.equals(otherBucket, other.otherBucket) - && Objects.equals(otherBucketKey, other.otherBucketKey); + && Objects.equals(keyed, other.keyed) + && Objects.equals(otherBucket, other.otherBucket) + && Objects.equals(otherBucketKey, other.otherBucketKey); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 3e3c07bc42398..68e46b37bb064 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -66,7 +66,7 @@ public class NestedAggregator extends BucketsAggregator implements SingleBucketA super(name, factories, context, parentAggregator, pipelineAggregators, metaData); Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter() - : Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated()); + : Queries.newNonNestedFilter(); this.parentFilter = context.bitsetFilterCache().getBitSetProducer(parentFilter); this.childFilter = childObjectMapper.nestedTypeFilter(); this.collectsFromSingleBucket = collectsFromSingleBucket; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 415ae39c71e00..2f29f8f2cdcfc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -54,7 +54,7 @@ public ReverseNestedAggregator(String name, AggregatorFactories factories, Objec throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); if (objectMapper == null) { - parentFilter = Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated()); + parentFilter = Queries.newNonNestedFilter(); } else { parentFilter = objectMapper.nestedTypeFilter(); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index ab2f864bfce35..c23be0f4cb994 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -190,7 +190,7 @@ public void execute(SearchContext context) { private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException { if (context.mapperService().hasNested()) { BitSet bits = context.bitsetFilterCache() - .getBitSetProducer(Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated())) + .getBitSetProducer(Queries.newNonNestedFilter()) .getBitSet(subReaderContext); if (!bits.get(subDocId)) { return bits.nextSetBit(subDocId); @@ -363,7 +363,7 @@ private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context } parentFilter = nestedParentObjectMapper.nestedTypeFilter(); } else { - parentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()); + parentFilter = Queries.newNonNestedFilter(); } Query childFilter = nestedObjectMapper.nestedTypeFilter(); diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 40e10eb589006..08f042aa69650 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -106,10 +106,6 @@ public SliceBuilder(String field, int id, int max) { public SliceBuilder(StreamInput in) throws IOException { String field = in.readString(); - if ("_uid".equals(field) && in.getVersion().before(Version.V_6_3_0)) { - // This is safe because _id and _uid are handled the same way in #toFilter - field = IdFieldMapper.NAME; - } this.field = field; this.id = in.readVInt(); this.max = in.readVInt(); @@ -117,11 +113,7 @@ public SliceBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (IdFieldMapper.NAME.equals(field) && out.getVersion().before(Version.V_6_3_0)) { - out.writeString("_uid"); - } else { - out.writeString(field); - } + out.writeString(field); out.writeVInt(id); out.writeVInt(max); } diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index b4358abee0728..4d793de18443d 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -185,7 +185,7 @@ protected static Nested resolveNested(QueryShardContext context, NestedSortBuild final ObjectMapper objectMapper = context.nestedScope().getObjectMapper(); final Query parentQuery; if (objectMapper == null) { - parentQuery = Queries.newNonNestedFilter(context.indexVersionCreated()); + parentQuery = Queries.newNonNestedFilter(); } else { parentQuery = objectMapper.nestedTypeFilter(); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java index fc7ebe4b9644e..9e49d06f2b0b6 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java @@ -84,10 +84,7 @@ void registerNodeConnection(List nodeChannels, ConnectionProfile con for (TcpChannel channel : nodeChannels) { scheduledPing.addChannel(channel); - - channel.addCloseListener(ActionListener.wrap(() -> { - scheduledPing.removeChannel(channel); - })); + channel.addCloseListener(ActionListener.wrap(() -> scheduledPing.removeChannel(channel))); } } diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index e0d8140c708d6..59e289b9e98ef 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -199,29 +199,24 @@ public void testSerializationBWC() throws IOException { randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6))); final List versions = Version.getDeclaredVersions(Version.class); - final Version pre63Version = randomFrom(versions.stream().filter(v -> v.before(Version.V_6_3_0)).collect(Collectors.toList())); final Version post63Pre67Version = randomFrom(versions.stream() .filter(v -> v.onOrAfter(Version.V_6_3_0) && v.before(Version.V_6_7_0)).collect(Collectors.toList())); final Version post67Pre70Version = randomFrom(versions.stream() .filter(v -> v.onOrAfter(Version.V_6_7_0) && v.before(Version.V_7_0_0)).collect(Collectors.toList())); final Version post70Version = randomFrom(versions.stream().filter(v -> v.onOrAfter(Version.V_7_0_0)).collect(Collectors.toList())); - final WriteableBuild pre63 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, pre63Version); final WriteableBuild post63pre67 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post63Pre67Version); final WriteableBuild post67pre70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post67Pre70Version); final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); - assertThat(pre63.build.flavor(), equalTo(Build.Flavor.OSS)); assertThat(post63pre67.build.flavor(), equalTo(dockerBuild.build.flavor())); assertThat(post67pre70.build.flavor(), equalTo(dockerBuild.build.flavor())); assertThat(post70.build.flavor(), equalTo(dockerBuild.build.flavor())); - assertThat(pre63.build.type(), equalTo(Build.Type.UNKNOWN)); assertThat(post63pre67.build.type(), equalTo(Build.Type.TAR)); assertThat(post67pre70.build.type(), equalTo(dockerBuild.build.type())); assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - assertThat(pre63.build.getQualifiedVersion(), equalTo(pre63Version.toString())); assertThat(post63pre67.build.getQualifiedVersion(), equalTo(post63Pre67Version.toString())); assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString())); assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 1fac56886de45..5b33068013965 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -66,6 +66,7 @@ import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.RecoverFilesRecoveryException; @@ -365,7 +366,7 @@ public void testCircuitBreakingException() throws IOException { } public void testTooManyBucketsException() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_2_0, Version.CURRENT); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); MultiBucketConsumerService.TooManyBucketsException ex = serialize(new MultiBucketConsumerService.TooManyBucketsException("Too many buckets", 100), version); assertEquals("Too many buckets", ex.getMessage()); @@ -816,6 +817,7 @@ public void testIds() { ids.put(152, NoSuchRemoteClusterException.class); ids.put(153, RetentionLeaseAlreadyExistsException.class); ids.put(154, RetentionLeaseNotFoundException.class); + ids.put(155, ShardNotInPrimaryModeException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java index 1d2a4ca6d5f75..2de2f259e6ff1 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch; import org.apache.commons.codec.DecoderException; +import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; @@ -183,4 +184,31 @@ public void testGroupByNullIndex() { ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures); assertThat(groupBy.length, equalTo(2)); } + + public void testUnwrapCorruption() { + final Throwable corruptIndexException = new CorruptIndexException("corrupt", "resource"); + assertThat(ExceptionsHelper.unwrapCorruption(corruptIndexException), equalTo(corruptIndexException)); + + final Throwable corruptionAsCause = new RuntimeException(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionAsCause), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressed = new RuntimeException(); + corruptionSuppressed.addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressed), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressedOnCause = new RuntimeException(new RuntimeException()); + corruptionSuppressedOnCause.getCause().addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressedOnCause), equalTo(corruptIndexException)); + + final Throwable corruptionCauseOnSuppressed = new RuntimeException(); + corruptionCauseOnSuppressed.addSuppressed(new RuntimeException(corruptIndexException)); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionCauseOnSuppressed), equalTo(corruptIndexException)); + + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException()), nullValue()); + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException(new RuntimeException())), nullValue()); + + final Throwable withSuppressedException = new RuntimeException(); + withSuppressedException.addSuppressed(new RuntimeException()); + assertThat(ExceptionsHelper.unwrapCorruption(withSuppressedException), nullValue()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java index ad2447cb7b3d0..e0ef29bf7f49e 100644 --- a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -65,19 +65,17 @@ public void testSimulatedSearchRejectionLoad() throws Throwable { client().prepareSearch("test") .setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(QueryBuilders.matchQuery("field", "1")) - .execute(new ActionListener() { + .execute(new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(SearchResponse searchResponse) { responses.add(searchResponse); - latch.countDown(); } @Override public void onFailure(Exception e) { responses.add(e); - latch.countDown(); } - }); + }, latch)); } latch.await(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index e9b940df3847d..b883d593352c2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -470,17 +470,7 @@ public void testCancellingTasksThatDontSupportCancellation() throws Exception { connectNodes(testNodes); CountDownLatch checkLatch = new CountDownLatch(1); CountDownLatch responseLatch = new CountDownLatch(1); - Task task = startBlockingTestNodesAction(checkLatch, new ActionListener() { - @Override - public void onResponse(NodesResponse nodeResponses) { - responseLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - responseLatch.countDown(); - } - }); + Task task = startBlockingTestNodesAction(checkLatch, ActionListener.wrap(responseLatch::countDown)); String actionName = "internal:testAction"; // only pick the main action // Try to cancel main task using action name diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java index e8bd14b640dfa..485b2dc3fb4d1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java @@ -19,16 +19,13 @@ package org.elasticsearch.action.admin.cluster.settings; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.test.VersionUtils; -import java.io.IOException; import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -100,8 +97,4 @@ protected ClusterUpdateSettingsResponse createBlankInstance() { return new ClusterUpdateSettingsResponse(); } - public void testOldSerialisation() throws IOException { - ClusterUpdateSettingsResponse original = createTestInstance(); - assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); - } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index cca95e09151ef..f86beff7738e3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; @@ -48,7 +47,7 @@ public void testBwcSerialization() throws Exception { { final CloseIndexResponse response = randomResponse(); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); response.writeTo(out); final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse(); @@ -65,7 +64,7 @@ public void testBwcSerialization() throws Exception { final CloseIndexResponse deserializedResponse = new CloseIndexResponse(); try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + in.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); deserializedResponse.readFrom(in); } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index feeb9646e40bf..b14bdd0ed9883 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -65,7 +65,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; -import java.util.List; import java.util.Map; import java.util.stream.IntStream; @@ -165,11 +164,8 @@ public void testCreateShrinkIndexToN() { } public void testShrinkIndexPrimaryTerm() throws Exception { - final List factors = Arrays.asList(2, 3, 5, 7); - final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size() - 1), factors); - final int numberOfShards = numberOfShardsFactors.stream().reduce(1, (x, y) -> x * y); - final int numberOfTargetShards = randomSubsetOf(randomInt(numberOfShardsFactors.size() - 1), numberOfShardsFactors) - .stream().reduce(1, (x, y) -> x * y); + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); @@ -218,7 +214,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { final Settings.Builder prepareShrinkSettings = Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true); client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards final IndexMetaData indexMetaData = indexMetaData(client(), "source"); final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetaData::primaryTerm).max().getAsLong(); @@ -228,7 +224,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); final IndexMetaData afterShrinkIndexMetaData = indexMetaData(client(), "target"); for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java index 0cc3f455e83df..a38de844626dc 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -20,14 +20,11 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.test.VersionUtils; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -131,9 +128,4 @@ protected RolloverResponse mutateInstance(RolloverResponse response) { throw new UnsupportedOperationException(); } } - - public void testOldSerialisation() throws IOException { - RolloverResponse original = createTestInstance(); - assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); - } } diff --git a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java index 1dff130fb98a6..2ee9cb9e1397e 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java @@ -41,7 +41,7 @@ protected MainResponse createTestInstance() { ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); final String date = new Date(randomNonNegativeLong()).toString(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_1, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); Build build = new Build( Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(), version.toString() diff --git a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java index 55c39f735ce31..bcb4a1200b7e8 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -42,32 +43,24 @@ public class ClearScrollControllerTests extends ESTestCase { - public void testClearAll() throws IOException, InterruptedException { + public void testClearAll() throws InterruptedException { DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(3, clearScrollResponse.getNumFreed()); - assertTrue(clearScrollResponse.isSucceeded()); - } finally { - latch.countDown(); - } + assertEquals(3, clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override @@ -112,27 +105,18 @@ public void testClearScrollIds() throws IOException, InterruptedException { String scrollId = TransportSearchHelper.buildScrollId(array); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); - assertTrue(clearScrollResponse.isSucceeded()); - } finally { - latch.countDown(); - } - + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { @@ -185,32 +169,22 @@ public void testClearScrollIdsWithFailure() throws IOException, InterruptedExcep DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); - if (numFailures.get() > 0) { - assertFalse(clearScrollResponse.isSucceeded()); - } else { - assertTrue(clearScrollResponse.isSucceeded()); - } - - } finally { - latch.countDown(); + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + if (numFailures.get() > 0) { + assertFalse(clearScrollResponse.isSucceeded()); + } else { + assertTrue(clearScrollResponse.isSucceeded()); } - } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index a0c4626e9543f..8f1d89a37daaa 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -80,7 +80,7 @@ public void testRandomVersionSerialization() throws IOException { SearchRequest deserializedRequest = copyWriteable(searchRequest, namedWriteableRegistry, SearchRequest::new, version); assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); - assertEquals(searchRequest.getOrCreateAbsoluteStartMillis(), deserializedRequest.getOrCreateAbsoluteStartMillis()); + assertEquals(searchRequest.getAbsoluteStartMillis(), deserializedRequest.getAbsoluteStartMillis()); assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); } diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index f222bcc015c62..96d057f50c4f7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; @@ -65,7 +66,7 @@ public void shutdown() throws Exception { terminate(threadPool); } - public void testActionFiltersRequest() throws ExecutionException, InterruptedException { + public void testActionFiltersRequest() throws InterruptedException { int numFilters = randomInt(10); Set orders = new HashSet<>(numFilters); while (orders.size() < numFilters) { @@ -139,7 +140,7 @@ protected void doExecute(Task task, TestRequest request, ActionListener failures = new CopyOnWriteArrayList<>(); - transportAction.execute(new TestRequest(), new ActionListener() { + transportAction.execute(new TestRequest(), new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(TestResponse testResponse) { responses.incrementAndGet(); - latch.countDown(); } @Override public void onFailure(Exception e) { failures.add(e); - latch.countDown(); } - }); + }, latch)); if (!latch.await(10, TimeUnit.SECONDS)) { fail("timeout waiting for the filter to notify the listener as many times as expected"); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 12cc9097b652c..4459aa5556988 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -64,9 +64,11 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -390,6 +392,43 @@ public void testNotStartedPrimary() { assertIndexShardCounter(0); } + public void testShardNotInPrimaryMode() { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + final ClusterState state = state(index, true, ShardRoutingState.RELOCATING); + setState(clusterService, state); + final ReplicationTask task = maybeTask(); + final Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + final AtomicBoolean executed = new AtomicBoolean(); + + final ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); + final TransportReplicationAction.ConcreteShardRequest primaryRequest + = new TransportReplicationAction.ConcreteShardRequest<>(request, primaryShard.allocationId().getId(), primaryTerm); + + isPrimaryMode.set(false); + + new TestAction(Settings.EMPTY, "internal:test-action", transportService, clusterService, shardStateAction, threadPool) { + @Override + protected void shardOperationOnPrimary(Request shardRequest, IndexShard primary, + ActionListener> listener) { + assertPhase(task, "primary"); + assertFalse(executed.getAndSet(true)); + super.shardOperationOnPrimary(shardRequest, primary, listener); + } + }.new AsyncPrimaryAction(primaryRequest, listener, task).run(); + + assertFalse(executed.get()); + assertIndexShardCounter(0); // no permit should be held + + final ExecutionException e = expectThrows(ExecutionException.class, listener::get); + assertThat(e.getCause(), instanceOf(ReplicationOperation.RetryOnPrimaryException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + assertThat(e.getCause().getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause().getCause(), hasToString(containsString("shard is not in primary mode"))); + } + /** * When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from * the relocation source to the relocation target. If relocation source receives and processes this cluster state @@ -1126,6 +1165,8 @@ private void assertIndexShardCounter(int expected) { private final AtomicBoolean isRelocated = new AtomicBoolean(false); + private final AtomicBoolean isPrimaryMode = new AtomicBoolean(true); + /** * Sometimes build a ReplicationTask for tracking the phase of the * TransportReplicationAction. Since TransportReplicationAction has to work @@ -1271,10 +1312,16 @@ private IndexService mockIndexService(final IndexMetaData indexMetaData, Cluster private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { final IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.state()).thenReturn(IndexShardState.STARTED); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; - count.incrementAndGet(); - callback.onResponse(count::decrementAndGet); + if (isPrimaryMode.get()) { + count.incrementAndGet(); + callback.onResponse(count::decrementAndGet); + + } else { + callback.onFailure(new ShardNotInPrimaryModeException(shardId, IndexShardState.STARTED)); + } return null; }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), anyObject()); doAnswer(invocation -> { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 1a7e5a73e7523..57b30d3484bc9 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -369,17 +369,7 @@ public void testConcurrentWriteReplicaResultCompletion() throws InterruptedExcep CountDownLatch completionLatch = new CountDownLatch(1); threadPool.generic().execute(() -> { waitForBarrier.run(); - replicaResult.respond(new ActionListener() { - @Override - public void onResponse(TransportResponse.Empty empty) { - completionLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - completionLatch.countDown(); - } - }); + replicaResult.respond(ActionListener.wrap(completionLatch::countDown)); }); if (randomBoolean()) { threadPool.generic().execute(() -> { diff --git a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java index 4bd6c15853aa0..51a34d94b3a05 100644 --- a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -32,9 +32,7 @@ import java.util.List; import static java.util.EnumSet.copyOf; -import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.CoreMatchers.endsWith; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; @@ -62,45 +60,6 @@ public void testSerialization() throws Exception { } } - public void testBwcSerialization() throws Exception { - for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { - // Generate a random cluster block in version < 7.0.0 - final Version version = randomVersionBetween(random(), Version.V_6_0_0, getPreviousVersion(Version.V_6_7_0)); - final ClusterBlock expected = randomClusterBlock(version); - assertNull(expected.uuid()); - - // Serialize to node in current version - final BytesStreamOutput out = new BytesStreamOutput(); - expected.writeTo(out); - - // Deserialize and check the cluster block - final ClusterBlock actual = new ClusterBlock(out.bytes().streamInput()); - assertClusterBlockEquals(expected, actual); - } - - for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { - // Generate a random cluster block in current version - final ClusterBlock expected = randomClusterBlock(Version.CURRENT); - - // Serialize to node in version < 7.0.0 - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, getPreviousVersion(Version.V_6_7_0))); - expected.writeTo(out); - - // Deserialize and check the cluster block - final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); - final ClusterBlock actual = new ClusterBlock(in); - - assertThat(actual.id(), equalTo(expected.id())); - assertThat(actual.status(), equalTo(expected.status())); - assertThat(actual.description(), equalTo(expected.description())); - assertThat(actual.retryable(), equalTo(expected.retryable())); - assertThat(actual.disableStatePersistence(), equalTo(expected.disableStatePersistence())); - assertArrayEquals(actual.levels().toArray(), expected.levels().toArray()); - } - } - public void testToStringDanglingComma() { final ClusterBlock clusterBlock = randomClusterBlock(); assertThat(clusterBlock.toString(), not(endsWith(","))); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java index 35fa5786bbda3..e20559ca00561 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.Version; -import org.elasticsearch.cluster.coordination.JoinTaskExecutor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,7 +29,6 @@ import org.elasticsearch.test.VersionUtils; import static org.elasticsearch.test.VersionUtils.getPreviousVersion; -import static org.elasticsearch.test.VersionUtils.incompatibleFutureVersion; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomVersion; @@ -89,21 +87,9 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { }); } - if (minNodeVersion.before(Version.V_6_0_0)) { - Version tooHigh = incompatibleFutureVersion(minNodeVersion); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooHigh, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooHigh, minNodeVersion, maxNodeVersion); - } - }); - } + Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); + expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); - if (minNodeVersion.onOrAfter(Version.V_7_0_0)) { - Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); - expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); - } final Version minGoodVersion = maxNodeVersion.major == minNodeVersion.major ? // we have to stick with the same major diff --git a/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java index a1236fd53df92..bb759848dc4b7 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java @@ -20,33 +20,21 @@ package org.elasticsearch.common.lucene.search; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; public class QueriesTests extends ESTestCase { public void testNonNestedQuery() { - for (Version version : VersionUtils.allVersions()) { - // This is a custom query that extends AutomatonQuery and want to make sure the equals method works - assertEquals(Queries.newNonNestedFilter(version), Queries.newNonNestedFilter(version)); - assertEquals(Queries.newNonNestedFilter(version).hashCode(), Queries.newNonNestedFilter(version).hashCode()); - if (version.onOrAfter(Version.V_6_1_0)) { - assertEquals(Queries.newNonNestedFilter(version), new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME)); - } else { - assertEquals(Queries.newNonNestedFilter(version), new BooleanQuery.Builder() - .add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER) - .add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT) - .build()); - } - } + // This is a custom query that extends AutomatonQuery and want to make sure the equals method works + assertEquals(Queries.newNonNestedFilter(), Queries.newNonNestedFilter()); + assertEquals(Queries.newNonNestedFilter().hashCode(), Queries.newNonNestedFilter().hashCode()); + assertEquals(Queries.newNonNestedFilter(), new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME)); } public void testIsNegativeQuery() { diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index b18daf07bf361..94945dc92c952 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion; @@ -189,23 +188,16 @@ public void testCacheFilterReader() throws Exception { } public void testLuceneVersionOnUnknownVersions() { - List allVersions = VersionUtils.allVersions(); - - // should have the same Lucene version as the latest 6.x version - Version version = Version.fromString("6.88.50"); - assertEquals(allVersions.get(Collections.binarySearch(allVersions, Version.V_7_0_0) - 1).luceneVersion, - version.luceneVersion); - // between two known versions, should use the lucene version of the previous version - version = Version.fromString("6.2.50"); - assertEquals(VersionUtils.getPreviousVersion(Version.V_6_2_4).luceneVersion, version.luceneVersion); + Version version = VersionUtils.getPreviousVersion(Version.CURRENT); + assertEquals(Version.fromId(version.id + 100).luceneVersion, version.luceneVersion); // too old version, major should be the oldest supported lucene version minus 1 version = Version.fromString("5.2.1"); - assertEquals(Version.V_6_0_0.luceneVersion.major - 1, version.luceneVersion.major); + assertEquals(VersionUtils.getFirstVersion().luceneVersion.major - 1, version.luceneVersion.major); // future version, should be the same version as today - version = Version.fromString("8.77.1"); + version = Version.fromId(Version.CURRENT.id + 100); assertEquals(Version.CURRENT.luceneVersion, version.luceneVersion); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 220392a952c29..b2f73db90f722 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -964,4 +964,13 @@ public void testAffixMapUpdateWithNullSettingValue() { assertEquals("", value); } + public void testNonSecureSettingInKeystore() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("foo", "bar"); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + Setting setting = Setting.simpleString("foo", Property.NodeScope); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings)); + assertThat(e.getMessage(), containsString("must be stored inside elasticsearch.yml")); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java index 2b125127f66d3..4ef095da049ec 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java @@ -45,7 +45,8 @@ public class DateUtilsTests extends ESTestCase { private static final Set IGNORE = new HashSet<>(Arrays.asList( - "Eire", "Europe/Dublin" // dublin timezone in joda does not account for DST + "Eire", "Europe/Dublin", // dublin timezone in joda does not account for DST + "Asia/Qostanay" // this has been added in joda 2.10.2 but is not part of the JDK 12.0.1 tzdata yet )); public void testTimezoneIds() { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java index 1a32064fe7daa..46021344fb73a 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java @@ -34,12 +34,13 @@ public void testConcurrent() throws InterruptedException { final AtomicInteger count = new AtomicInteger(0); final CountDown countDown = new CountDown(scaledRandomIntBetween(10, 1000)); Thread[] threads = new Thread[between(3, 10)]; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(1 + threads.length); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { @Override public void run() { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java index e50e205ff1386..2160052619c11 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java @@ -45,8 +45,8 @@ public void testIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedExce for (int i = 0; i < names.length; i++) { names[i] = randomRealisticUnicodeOfLengthBetween(10, 20); } - CountDownLatch startLatch = new CountDownLatch(1); int numThreads = randomIntBetween(3, 10); + final CountDownLatch startLatch = new CountDownLatch(1 + numThreads); AcquireAndReleaseThread[] threads = new AcquireAndReleaseThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new AcquireAndReleaseThread(startLatch, connectionLock, names, counter, safeCounter); @@ -157,6 +157,7 @@ public AcquireAndReleaseThread(CountDownLatch startLatch, KeyedLock conn @Override public void run() { + startLatch.countDown(); try { startLatch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java index e833edc9d56b3..a41d37be2150a 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java @@ -45,9 +45,10 @@ public void testRunOnceConcurrently() throws InterruptedException { final RunOnce runOnce = new RunOnce(counter::incrementAndGet); final Thread[] threads = new Thread[between(3, 10)]; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(1 + threads.length); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(() -> { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 36f75c79a1792..37e260a01d069 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -19,12 +19,18 @@ package org.elasticsearch.env; +import org.elasticsearch.Version; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.startsWith; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -86,4 +92,35 @@ public Settings onNodeStopped(String nodeName) { + Node.NODE_DATA_SETTING.getKey() + "=false, but has shard data")); } + + private IllegalStateException expectThrowsOnRestart(CheckedConsumer onNodeStopped) { + internalCluster().startNode(); + final Path[] dataPaths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); + return expectThrows(IllegalStateException.class, + () -> internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + try { + onNodeStopped.accept(dataPaths); + } catch (Exception e) { + throw new AssertionError(e); + } + return Settings.EMPTY; + } + })); + } + + public void testFailsToStartIfDowngraded() { + final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooNewVersion()), dataPaths)); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot downgrade a node from version ["), endsWith("] to version [" + Version.CURRENT + "]"))); + } + + public void testFailsToStartIfUpgradedTooFar() { + final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooOldVersion()), dataPaths)); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); + } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java new file mode 100644 index 0000000000000..59cf6247f9613 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import org.elasticsearch.Version; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; + +public class NodeMetaDataTests extends ESTestCase { + private Version randomVersion() { + // VersionUtils.randomVersion() only returns known versions, which are necessarily no later than Version.CURRENT; however we want + // also to consider our behaviour with all versions, so occasionally pick up a truly random version. + return rarely() ? Version.fromId(randomInt()) : VersionUtils.randomVersion(random()); + } + + public void testEqualsHashcodeSerialization() { + final Path tempDir = createTempDir(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(new NodeMetaData(randomAlphaOfLength(10), randomVersion()), + nodeMetaData -> { + final long generation = NodeMetaData.FORMAT.writeAndCleanup(nodeMetaData, tempDir); + final Tuple nodeMetaDataLongTuple + = NodeMetaData.FORMAT.loadLatestStateWithGeneration(logger, xContentRegistry(), tempDir); + assertThat(nodeMetaDataLongTuple.v2(), equalTo(generation)); + return nodeMetaDataLongTuple.v1(); + }, nodeMetaData -> { + if (randomBoolean()) { + return new NodeMetaData(randomAlphaOfLength(21 - nodeMetaData.nodeId().length()), nodeMetaData.nodeVersion()); + } else { + return new NodeMetaData(nodeMetaData.nodeId(), randomValueOtherThan(nodeMetaData.nodeVersion(), this::randomVersion)); + } + }); + } + + public void testReadsFormatWithoutVersion() throws IOException { + // the behaviour tested here is only appropriate if the current version is compatible with versions 7 and earlier + assertTrue(Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(Version.V_7_0_0)); + // when the current version is incompatible with version 7, the behaviour should change to reject files like the given resource + // which do not have the version field + + final Path tempDir = createTempDir(); + final Path stateDir = Files.createDirectory(tempDir.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + final InputStream resource = this.getClass().getResourceAsStream("testReadsFormatWithoutVersion.binary"); + assertThat(resource, notNullValue()); + Files.copy(resource, stateDir.resolve(NodeMetaData.FORMAT.getStateFileName(between(0, Integer.MAX_VALUE)))); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), tempDir); + assertThat(nodeMetaData.nodeId(), equalTo("y6VUVMSaStO4Tz-B5BxcOw")); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.V_EMPTY)); + } + + public void testUpgradesLegitimateVersions() { + final String nodeId = randomAlphaOfLength(10); + final NodeMetaData nodeMetaData = new NodeMetaData(nodeId, + randomValueOtherThanMany(v -> v.after(Version.CURRENT) || v.before(Version.CURRENT.minimumIndexCompatibilityVersion()), + this::randomVersion)).upgradeToCurrentVersion(); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + } + + public void testUpgradesMissingVersion() { + final String nodeId = randomAlphaOfLength(10); + final NodeMetaData nodeMetaData = new NodeMetaData(nodeId, Version.V_EMPTY).upgradeToCurrentVersion(); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + } + + public void testDoesNotUpgradeFutureVersion() { + final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> new NodeMetaData(randomAlphaOfLength(10), tooNewVersion()) + .upgradeToCurrentVersion()); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot downgrade a node from version ["), endsWith("] to version [" + Version.CURRENT + "]"))); + } + + public void testDoesNotUpgradeAncientVersion() { + final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> new NodeMetaData(randomAlphaOfLength(10), tooOldVersion()).upgradeToCurrentVersion()); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); + } + + public static Version tooNewVersion() { + return Version.fromId(between(Version.CURRENT.id + 1, 99999999)); + } + + public static Version tooOldVersion() { + return Version.fromId(between(1, Version.CURRENT.minimumIndexCompatibilityVersion().id - 1)); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java new file mode 100644 index 0000000000000..704617c7b5e95 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.WriteStateException; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class OverrideNodeVersionCommandTests extends ESTestCase { + + private Environment environment; + private Path[] nodePaths; + + @Before + public void createNodePaths() throws IOException { + final Settings settings = buildEnvSettings(Settings.EMPTY); + environment = TestEnvironment.newEnvironment(settings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, environment)) { + nodePaths = nodeEnvironment.nodeDataPaths(); + } + } + + public void testFailsOnEmptyPath() { + final Path emptyPath = createTempDir(); + final MockTerminal mockTerminal = new MockTerminal(); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, new Path[]{emptyPath}, environment)); + assertThat(elasticsearchException.getMessage(), equalTo(OverrideNodeVersionCommand.NO_METADATA_MESSAGE)); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + } + + public void testFailsIfUnnecessary() throws WriteStateException { + final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumIndexCompatibilityVersion().id, Version.CURRENT.id)); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), allOf( + containsString("compatible with current version"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + } + + public void testWarnsIfTooOld() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput("n\n"); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); + assertThat(mockTerminal.getOutput(), allOf( + containsString("too old"), + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); + } + + public void testWarnsIfTooNew() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); + assertThat(mockTerminal.getOutput(), allOf( + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); + } + + public void testOverwritesIfTooOld() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("y", "Y")); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + assertThat(mockTerminal.getOutput(), allOf( + containsString("too old"), + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()), + containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + } + + public void testOverwritesIfTooNew() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("y", "Y")); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + assertThat(mockTerminal.getOutput(), allOf( + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()), + containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + } +} diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index d6fd80f3513c2..4049cec796102 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -32,8 +32,7 @@ private GatewayService createService(final Settings.Builder settings) { final ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "GatewayServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null); - return new GatewayService(settings.build(), - null, clusterService, null, null, null, null); + return new GatewayService(settings.build(), null, clusterService, null, null, null); } public void testDefaultRecoverAfterTime() { diff --git a/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java index 7541ca860def6..7749a0edc37b8 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.transport.TransportService; @@ -46,10 +45,9 @@ public class MockGatewayMetaState extends GatewayMetaState { public MockGatewayMetaState(Settings settings, NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry, DiscoveryNode localNode) throws IOException { - super(settings, nodeEnvironment, new MetaStateService(nodeEnvironment, xContentRegistry), + super(settings, new MetaStateService(nodeEnvironment, xContentRegistry), mock(MetaDataIndexUpgradeService.class), mock(MetaDataUpgrader.class), - mock(TransportService.class), mock(ClusterService.class), - mock(IndicesService.class)); + mock(TransportService.class), mock(ClusterService.class)); this.localNode = localNode; } diff --git a/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java b/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java deleted file mode 100644 index 4382f677ad63e..0000000000000 --- a/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.get; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.get.GetActionIT.indexOrAlias; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; - -public class LegacyGetActionIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testGetFieldsMetaDataWithRouting() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("_doc", "field1", "type=keyword,store=true") - .addAlias(new Alias("alias")) - .setSettings( - Settings.builder() - .put("index.refresh_interval", -1) - .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0))); // multi-types in 6.0.0 - - try (XContentBuilder source = jsonBuilder().startObject().field("field1", "value").endObject()) { - client() - .prepareIndex("test", "_doc", "1") - .setRouting("1") - .setSource(source) - .get(); - } - - { - final GetResponse getResponse = client() - .prepareGet(indexOrAlias(), "_doc", "1") - .setRouting("1") - .setStoredFields("field1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - } - - flush(); - - { - final GetResponse getResponse = client() - .prepareGet(indexOrAlias(), "_doc", "1") - .setStoredFields("field1") - .setRouting("1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index cdc38cd3abd0d..3ca1bec5a4b57 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Collection; @@ -61,21 +62,21 @@ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_6_0_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT.minimumIndexCompatibilityVersion()))); } public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT)); // same es version should be cached - assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1)); - assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_0), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_1)); + Version v = VersionUtils.randomVersion(random()); + assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(v), PreBuiltAnalyzers.STANDARD.getAnalyzer(v)); + assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), + PreBuiltAnalyzers.STANDARD.getAnalyzer(VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT))); // Same Lucene version should be cached: - assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1), - PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_2)); + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.0")), + PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.1"))); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java index 34200b51cb317..1a6e6e2c90aed 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -28,9 +26,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.VersionUtils; - -import static org.hamcrest.CoreMatchers.containsString; public class AllFieldMapperTests extends ESSingleNodeTestCase { @@ -39,64 +34,6 @@ protected boolean forbidPrivateIndexSettings() { return false; } - public void testAllDisabled() throws Exception { - { - final Version version = VersionUtils.randomVersionBetween(random(), - Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); - IndexService indexService = createIndex("test_6x", - Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .build() - ); - String mappingDisabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", false) - .endObject().endObject() - ); - indexService.mapperService().merge("_doc", new CompressedXContent(mappingDisabled), MergeReason.MAPPING_UPDATE); - - String mappingEnabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", true) - .endObject().endObject() - ); - MapperParsingException exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingEnabled), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("[_all] is disabled in this version.")); - } - { - IndexService indexService = createIndex("test"); - String mappingEnabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", true) - .endObject().endObject() - ); - MapperParsingException exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingEnabled), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); - - String mappingDisabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", false) - .endObject().endObject() - ); - exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingDisabled), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); - - String mappingAll = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all").endObject().endObject() - ); - exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingAll), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); - - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().endObject()); - indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); - assertEquals("{\"_doc\":{}}", indexService.mapperService().documentMapper("_doc").mapping().toString()); - } - } - public void testUpdateDefaultSearchAnalyzer() throws Exception { IndexService indexService = createIndex("test", Settings.builder() .put("index.analysis.analyzer.default_search.type", "custom") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index d80776007aba8..e5d3040f7a3bc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -169,6 +169,12 @@ public void testExternalValuesWithMultifield() throws Exception { assertThat(raw, notNullValue()); assertThat(raw.binaryValue(), is(new BytesRef("foo"))); + + assertWarnings("At least one multi-field, [field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { @@ -234,5 +240,11 @@ public void testExternalValuesWithMultifieldTwoLevels() throws Exception { assertThat(doc.rootDoc().getField("field.raw"), notNullValue()); assertThat(doc.rootDoc().getField("field.raw").stringValue(), is("foo")); + + assertWarnings("At least one multi-field, [field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index d3f41589fb1fd..edca517830833 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.index.mapper; -import java.util.HashSet; import org.apache.lucene.index.IndexableField; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -35,12 +32,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -698,55 +695,4 @@ protected boolean forbidPrivateIndexSettings() { */ return false; } - - public void testReorderParentBWC() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("nested1").field("type", "nested").endObject() - .endObject().endObject().endObject()); - - Version bwcVersion = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0); - for (Version version : new Version[] {Version.V_6_5_0, bwcVersion}) { - DocumentMapper docMapper = createIndex("test-" + version, - Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), version).build()) - .mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - - assertThat(docMapper.hasNestedObjects(), equalTo(true)); - ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); - assertThat(nested1Mapper.nested().isNested(), equalTo(true)); - - ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", - BytesReference.bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .startArray("nested1") - .startObject() - .field("field1", "1") - .field("field2", "2") - .endObject() - .startObject() - .field("field1", "3") - .field("field2", "4") - .endObject() - .endArray() - .endObject()), - XContentType.JSON)); - - assertThat(doc.docs().size(), equalTo(3)); - if (version.onOrAfter(Version.V_6_5_0)) { - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); - assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); - assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); - assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3")); - assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4")); - assertThat(doc.docs().get(2).get("field"), equalTo("value")); - } else { - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); - assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("3")); - assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("4")); - assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1")); - assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("2")); - assertThat(doc.docs().get(2).get("field"), equalTo("value")); - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java index 4e6f504e99263..79a93c04faa1f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java @@ -67,7 +67,7 @@ public void testTermsQuery() throws Exception { Mockito.when(mapperService.hasNested()).thenReturn(true); query = ft.termQuery("my_type", context); - assertEquals(Queries.newNonNestedFilter(context.indexVersionCreated()), query); + assertEquals(Queries.newNonNestedFilter(), query); mapper = Mockito.mock(DocumentMapper.class); Mockito.when(mapper.type()).thenReturn("other_type"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index bc59c59aa54ab..70f469b96370c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -24,7 +24,11 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AnalysisMode; @@ -36,6 +40,7 @@ import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -157,6 +162,38 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); } + public void testMultiFieldWithinMultiField() throws IOException { + TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField"); + + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject() + .field("type", "keyword") + .startObject("fields") + .startObject("sub-field") + .field("type", "keyword") + .startObject("fields") + .startObject("sub-sub-field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + + Map fieldNode = XContentHelper.convertToMap( + BytesReference.bytes(mapping), true, mapping.contentType()).v2(); + + Mapper.TypeParser typeParser = new KeywordFieldMapper.TypeParser(); + Mapper.TypeParser.ParserContext parserContext = new Mapper.TypeParser.ParserContext("type", + null, null, type -> typeParser, Version.CURRENT, null); + + TypeParsers.parseField(builder, "some-field", fieldNode, parserContext); + assertWarnings("At least one multi-field, [sub-field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); + } + private Analyzer createAnalyzerWithMode(String name, AnalysisMode mode) { TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory(indexSettings, name, Settings.EMPTY) { @Override diff --git a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java index a5329856630d5..47bd8d8a34c14 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -65,26 +64,7 @@ protected void doAssertLuceneQuery(ExistsQueryBuilder queryBuilder, Query query, Collection fields = context.getQueryShardContext().simpleMatchToIndexNames(fieldPattern); Collection mappedFields = fields.stream().filter((field) -> context.getQueryShardContext().getObjectMapper(field) != null || context.getQueryShardContext().getMapperService().fullName(field) != null).collect(Collectors.toList()); - if (context.mapperService().getIndexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) { - if (fields.size() == 1) { - assertThat(query, instanceOf(ConstantScoreQuery.class)); - ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query; - String field = expectedFieldName(fields.iterator().next()); - assertThat(constantScoreQuery.getQuery(), instanceOf(TermQuery.class)); - TermQuery termQuery = (TermQuery) constantScoreQuery.getQuery(); - assertEquals(field, termQuery.getTerm().text()); - } else { - assertThat(query, instanceOf(ConstantScoreQuery.class)); - ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query; - assertThat(constantScoreQuery.getQuery(), instanceOf(BooleanQuery.class)); - BooleanQuery booleanQuery = (BooleanQuery) constantScoreQuery.getQuery(); - assertThat(booleanQuery.clauses().size(), equalTo(mappedFields.size())); - for (int i = 0; i < mappedFields.size(); i++) { - BooleanClause booleanClause = booleanQuery.clauses().get(i); - assertThat(booleanClause.getOccur(), equalTo(BooleanClause.Occur.SHOULD)); - } - } - } else if (fields.size() == 1 && mappedFields.size() == 0) { + if (fields.size() == 1 && mappedFields.size() == 0) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); MatchNoDocsQuery matchNoDocsQuery = (MatchNoDocsQuery) query; assertThat(matchNoDocsQuery.toString(null), diff --git a/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java b/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java deleted file mode 100644 index dc2a4a0e3fffe..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.common.geo.SpatialStrategy; -import org.elasticsearch.common.geo.builders.ShapeBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.geo.RandomShapeGenerator; -import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; - -import java.io.IOException; - -public class LegacyGeoShapeFieldQueryTests extends GeoShapeQueryBuilderTests { - - @Override - protected String fieldName() { - return GEO_SHAPE_FIELD_NAME; - } - - @Override - protected Settings createTestIndexSettings() { - // force the legacy shape impl - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_5_0); - return Settings.builder() - .put(super.createTestIndexSettings()) - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .build(); - } - - @Override - protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { - ShapeType shapeType = ShapeType.randomType(random()); - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); - GeoShapeQueryBuilder builder; - clearShapeFields(); - if (indexedShape == false) { - builder = new GeoShapeQueryBuilder(fieldName(), shape); - } else { - indexedShapeToReturn = shape; - indexedShapeId = randomAlphaOfLengthBetween(3, 20); - builder = new GeoShapeQueryBuilder(fieldName(), indexedShapeId); - if (randomBoolean()) { - indexedShapeIndex = randomAlphaOfLengthBetween(3, 20); - builder.indexedShapeIndex(indexedShapeIndex); - } - if (randomBoolean()) { - indexedShapePath = randomAlphaOfLengthBetween(3, 20); - builder.indexedShapePath(indexedShapePath); - } - if (randomBoolean()) { - indexedShapeRouting = randomAlphaOfLengthBetween(3, 20); - builder.indexedShapeRouting(indexedShapeRouting); - } - } - if (randomBoolean()) { - SpatialStrategy strategy = randomFrom(SpatialStrategy.values()); - // ShapeType.MULTILINESTRING + SpatialStrategy.TERM can lead to large queries and will slow down tests, so - // we try to avoid that combination - while (shapeType == ShapeType.MULTILINESTRING && strategy == SpatialStrategy.TERM) { - strategy = randomFrom(SpatialStrategy.values()); - } - builder.strategy(strategy); - if (strategy != SpatialStrategy.TERM) { - builder.relation(randomFrom(ShapeRelation.values())); - } - } - - if (randomBoolean()) { - builder.ignoreUnmapped(randomBoolean()); - } - return builder; - } - - public void testInvalidRelation() throws IOException { - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); - GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - builder.strategy(SpatialStrategy.TERM); - expectThrows(IllegalArgumentException.class, () -> builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN))); - GeoShapeQueryBuilder builder2 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - builder2.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN)); - expectThrows(IllegalArgumentException.class, () -> builder2.strategy(SpatialStrategy.TERM)); - GeoShapeQueryBuilder builder3 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - builder3.strategy(SpatialStrategy.TERM); - expectThrows(IllegalArgumentException.class, () -> builder3.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN))); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 5ace39c0890df..001df6deb5647 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -50,7 +50,6 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -1033,8 +1032,7 @@ public void testExistsFieldQuery() throws Exception { QueryShardContext context = createShardContext(); QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder(STRING_FIELD_NAME + ":*"); Query query = queryBuilder.toQuery(context); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) { + if (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false) { assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); } else { assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); @@ -1044,8 +1042,7 @@ public void testExistsFieldQuery() throws Exception { String value = (quoted ? "\"" : "") + STRING_FIELD_NAME + (quoted ? "\"" : ""); queryBuilder = new QueryStringQueryBuilder("_exists_:" + value); query = queryBuilder.toQuery(context); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) { + if (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false) { assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); } else { assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 30780d5c49018..d270a8c7113b5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; @@ -139,11 +138,9 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, String expectedFieldName = expectedFieldName(queryBuilder.fieldName()); if (queryBuilder.from() == null && queryBuilder.to() == null) { final Query expectedQuery; - if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && context.mapperService().fullName(queryBuilder.fieldName()).hasDocValues()) { + if (context.mapperService().fullName(queryBuilder.fieldName()).hasDocValues()) { expectedQuery = new ConstantScoreQuery(new DocValuesFieldExistsQuery(expectedFieldName)); - } else if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) && - context.mapperService().fullName(queryBuilder.fieldName()).omitNorms() == false) { + } else if (context.mapperService().fullName(queryBuilder.fieldName()).omitNorms() == false) { expectedQuery = new ConstantScoreQuery(new NormsFieldExistsQuery(expectedFieldName)); } else { expectedQuery = new ConstantScoreQuery(new TermQuery(new Term(FieldNamesFieldMapper.NAME, expectedFieldName))); @@ -425,8 +422,7 @@ protected MappedFieldType.Relation getRelation(QueryRewriteContext queryRewriteC // Range query with open bounds rewrite to an exists query Query luceneQuery = rewrittenRange.toQuery(queryShardContext); final Query expectedQuery; - if (queryShardContext.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && queryShardContext.fieldMapper(query.fieldName()).hasDocValues()) { + if (queryShardContext.fieldMapper(query.fieldName()).hasDocValues()) { expectedQuery = new ConstantScoreQuery(new DocValuesFieldExistsQuery(query.fieldName())); } else { expectedQuery = new ConstantScoreQuery(new TermQuery(new Term(FieldNamesFieldMapper.NAME, query.fieldName()))); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 0dccf5937dca7..dd44a386329f9 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -44,7 +44,6 @@ import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; -import org.elasticsearch.Version; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; @@ -799,7 +798,7 @@ private static TopFieldDocs search(QueryBuilder queryBuilder, FieldSortBuilder s IndexSearcher searcher) throws IOException { Query query = new BooleanQuery.Builder() .add(queryBuilder.toQuery(queryShardContext), Occur.MUST) - .add(Queries.newNonNestedFilter(Version.CURRENT), Occur.FILTER) + .add(Queries.newNonNestedFilter(), Occur.FILTER) .build(); Sort sort = new Sort(sortBuilder.build(queryShardContext).field); return searcher.search(query, 10, sort); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index d1bd5712dbadc..2334cb4330887 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; @@ -489,6 +490,48 @@ public void testLoadAndPersistRetentionLeases() throws IOException { assertThat(replicationTracker.loadRetentionLeases(path), equalTo(replicationTracker.getRetentionLeases())); } + public void testUnnecessaryPersistenceOfRetentionLeases() throws IOException { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId)); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), retainingSequenceNumber, "test-" + i, ActionListener.wrap(() -> {})); + } + + final Path path = createTempDir(); + replicationTracker.persistRetentionLeases(path); + + final Tuple retentionLeasesWithGeneration = + RetentionLeases.FORMAT.loadLatestStateWithGeneration(logger, NamedXContentRegistry.EMPTY, path); + + replicationTracker.persistRetentionLeases(path); + final Tuple retentionLeasesWithGenerationAfterUnnecessaryPersistence = + RetentionLeases.FORMAT.loadLatestStateWithGeneration(logger, NamedXContentRegistry.EMPTY, path); + + assertThat(retentionLeasesWithGenerationAfterUnnecessaryPersistence.v1(), equalTo(retentionLeasesWithGeneration.v1())); + assertThat(retentionLeasesWithGenerationAfterUnnecessaryPersistence.v2(), equalTo(retentionLeasesWithGeneration.v2())); + } + /** * Test that we correctly synchronize writing the retention lease state file in {@link ReplicationTracker#persistRetentionLeases(Path)}. * This test can fail without the synchronization block in that method. diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 92d31e305adc7..cb40a0726d42f 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -356,7 +356,7 @@ public void testRetentionLeasesBackgroundSyncWithSoftDeletesDisabled() throws Ex assertFalse("retention leases background sync must be a noop if soft deletes is disabled", backgroundSyncRequestSent.get()); } - @TestLogging(value = "org.elasticsearch.indices.recovery:trace") + @TestLogging(value = "org.elasticsearch.index:debug,org.elasticsearch.indices.recovery:trace") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java index 28444c7825e4d..c63b2ebb6645b 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java @@ -60,7 +60,9 @@ public void testSupersedesByPrimaryTerm() { final long higherPrimaryTerm = randomLongBetween(lowerPrimaryTerm + 1, Long.MAX_VALUE); final RetentionLeases right = new RetentionLeases(higherPrimaryTerm, randomLongBetween(1, Long.MAX_VALUE), Collections.emptyList()); assertTrue(right.supersedes(left)); + assertTrue(right.supersedes(left.primaryTerm(), left.version())); assertFalse(left.supersedes(right)); + assertFalse(left.supersedes(right.primaryTerm(), right.version())); } public void testSupersedesByVersion() { @@ -70,7 +72,9 @@ public void testSupersedesByVersion() { final RetentionLeases left = new RetentionLeases(primaryTerm, lowerVersion, Collections.emptyList()); final RetentionLeases right = new RetentionLeases(primaryTerm, higherVersion, Collections.emptyList()); assertTrue(right.supersedes(left)); + assertTrue(right.supersedes(left.primaryTerm(), left.version())); assertFalse(left.supersedes(right)); + assertFalse(left.supersedes(right.primaryTerm(), right.version())); } public void testRetentionLeasesRejectsDuplicates() { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 0be7b4433fac3..64886af18332a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -636,11 +636,13 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShard indexShard; + final boolean isPrimaryMode; if (randomBoolean()) { // relocation target indexShard = newShard(newShardRouting(shardId, "local_node", "other node", true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); assertEquals(0, indexShard.getActiveOperationsCount()); + isPrimaryMode = false; } else if (randomBoolean()) { // simulate promotion indexShard = newStartedShard(false); @@ -660,21 +662,60 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { if (randomBoolean()) { assertBusy(() -> assertEquals(0, indexShard.getActiveOperationsCount())); } + isPrimaryMode = true; } else { indexShard = newStartedShard(true); assertEquals(0, indexShard.getActiveOperationsCount()); + isPrimaryMode = true; } - final long primaryTerm = indexShard.getPendingPrimaryTerm(); - Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(1, indexShard.getActiveOperationsCount()); - Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(2, indexShard.getActiveOperationsCount()); + assert indexShard.getReplicationTracker().isPrimaryMode() == isPrimaryMode; + final long pendingPrimaryTerm = indexShard.getPendingPrimaryTerm(); + if (isPrimaryMode) { + Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(1, indexShard.getActiveOperationsCount()); + Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(2, indexShard.getActiveOperationsCount()); - Releasables.close(operation1, operation2); - assertEquals(0, indexShard.getActiveOperationsCount()); + Releasables.close(operation1, operation2); + assertEquals(0, indexShard.getActiveOperationsCount()); + } else { + indexShard.acquirePrimaryOperationPermit( + new ActionListener<>() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + } + }, + ThreadPool.Names.SAME, + "test"); + + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquireAllPrimaryOperationsPermits( + new ActionListener<>() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + latch.countDown(); + } + }, + TimeValue.timeValueSeconds(30)); + latch.await(); + } if (Assertions.ENABLED && indexShard.routingEntry().isRelocationTarget() == false) { - assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(primaryTerm, + assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(pendingPrimaryTerm, indexShard.getGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override public void onResponse(Releasable releasable) { @@ -1688,10 +1729,9 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { // recovery can be now finalized recoveryThread.join(); assertTrue(shard.isRelocatedPrimary()); - try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) { - // lock can again be acquired - assertTrue(shard.isRelocatedPrimary()); - } + final ExecutionException e = expectThrows(ExecutionException.class, () -> acquirePrimaryOperationPermitBlockingly(shard)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); closeShards(shard); } @@ -1699,30 +1739,66 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); + final CountDownLatch startRecovery = new CountDownLatch(1); + final CountDownLatch relocationStarted = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { try { - shard.relocated(primaryContext -> {}); + startRecovery.await(); + shard.relocated(primaryContext -> relocationStarted.countDown()); } catch (InterruptedException e) { throw new RuntimeException(e); } }); recoveryThread.start(); - List> onLockAcquiredActions = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - PlainActionFuture onLockAcquired = new PlainActionFuture() { - @Override - public void onResponse(Releasable releasable) { - releasable.close(); - super.onResponse(releasable); - } - }; + + final int numberOfAcquisitions = randomIntBetween(1, 10); + final List assertions = new ArrayList<>(numberOfAcquisitions); + final int recoveryIndex = randomIntBetween(0, numberOfAcquisitions - 1); + + for (int i = 0; i < numberOfAcquisitions; i++) { + final PlainActionFuture onLockAcquired; + if (i < recoveryIndex) { + final AtomicBoolean invoked = new AtomicBoolean(); + onLockAcquired = new PlainActionFuture<>() { + + @Override + public void onResponse(Releasable releasable) { + invoked.set(true); + releasable.close(); + super.onResponse(releasable); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(); + } + + }; + assertions.add(() -> assertTrue(invoked.get())); + } else if (recoveryIndex == i) { + startRecovery.countDown(); + relocationStarted.await(); + onLockAcquired = new PlainActionFuture<>(); + assertions.add(() -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }); + } else { + onLockAcquired = new PlainActionFuture<>(); + assertions.add(() -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }); + } + shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); - onLockAcquiredActions.add(onLockAcquired); } - for (PlainActionFuture onLockAcquired : onLockAcquiredActions) { - assertNotNull(onLockAcquired.get(30, TimeUnit.SECONDS)); + for (final Runnable assertion : assertions) { + assertion.run(); } recoveryThread.join(); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index f31ac0627138e..6254449df05a6 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -98,19 +98,8 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); - assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); - assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); - Map metadataMapperParsers = - module.getMapperRegistry().getMetadataMapperParsers(version); - assertEquals(EXPECTED_METADATA_FIELDS_6x.length, metadataMapperParsers.size()); - int i = 0; - for (String field : metadataMapperParsers.keySet()) { - assertEquals(EXPECTED_METADATA_FIELDS_6x[i++], field); - } - } - { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); Map metadataMapperParsers = @@ -127,15 +116,12 @@ public void testBuiltinWithPlugins() { IndicesModule noPluginsModule = new IndicesModule(Collections.emptyList()); IndicesModule module = new IndicesModule(fakePlugins); MapperRegistry registry = module.getMapperRegistry(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); assertThat(registry.getMapperParsers().size(), greaterThan(noPluginsModule.getMapperRegistry().getMapperParsers().size())); - assertThat(registry.getMetadataMapperParsers(version).size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(version).size())); - Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(version); + assertThat(registry.getMetadataMapperParsers(Version.CURRENT).size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT).size())); + Map metadataMapperParsers = + module.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT); Iterator iterator = metadataMapperParsers.keySet().iterator(); - if (version.before(Version.V_7_0_0)) { - assertEquals(AllFieldMapper.NAME, iterator.next()); - } assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; while(iterator.hasNext()) { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 1ccf858ed1590..4a77160ce36d0 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.cluster.ClusterName; @@ -385,18 +386,18 @@ public void testDanglingIndicesWithAliasConflict() throws Exception { .numberOfShards(1) .numberOfReplicas(0) .build(); - DanglingListener listener = new DanglingListener(); - dangling.allocateDangled(Arrays.asList(indexMetaData), listener); - listener.latch.await(); + CountDownLatch latch = new CountDownLatch(1); + dangling.allocateDangled(Arrays.asList(indexMetaData), ActionListener.wrap(latch::countDown)); + latch.await(); assertThat(clusterService.state(), equalTo(originalState)); // remove the alias client().admin().indices().prepareAliases().removeAlias(indexName, alias).get(); // now try importing a dangling index with the same name as the alias, it should succeed. - listener = new DanglingListener(); - dangling.allocateDangled(Arrays.asList(indexMetaData), listener); - listener.latch.await(); + latch = new CountDownLatch(1); + dangling.allocateDangled(Arrays.asList(indexMetaData), ActionListener.wrap(latch::countDown)); + latch.await(); assertThat(clusterService.state(), not(originalState)); assertNotNull(clusterService.state().getMetaData().index(alias)); } @@ -431,20 +432,6 @@ public void testIndexAndTombstoneWithSameNameOnStartup() throws Exception { indicesService.verifyIndexIsDeleted(tombstonedIndex, clusterState); } - private static class DanglingListener implements LocalAllocateDangledIndices.Listener { - final CountDownLatch latch = new CountDownLatch(1); - - @Override - public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { - latch.countDown(); - } - - @Override - public void onFailure(Throwable e) { - latch.countDown(); - } - } - /** * Tests that teh {@link MapperService} created by {@link IndicesService#createIndexMapperService(IndexMetaData)} contains * custom types and similarities registered by plugins diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b63c7a2e0e8f6..b49bef57aceb1 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -438,10 +438,12 @@ protected void failEngine(IOException cause) { handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); fail("exception index"); } catch (RuntimeException ex) { - assertNull(ExceptionsHelper.unwrapCorruption(ex)); + final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex); if (throwCorruptedIndexException) { + assertNotNull(unwrappedCorruption); assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]"); } else { + assertNull(unwrappedCorruption); assertEquals(ex.getMessage(), "boom"); } } catch (CorruptIndexException ex) { diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a3697af50b0b6..59e7c21a3e6e8 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -62,7 +62,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.ArrayList; @@ -1008,8 +1007,6 @@ private void assertCumulativeQueryCacheStats(IndicesStatsResponse response) { assertEquals(total, shardTotal); } - @TestLogging("_root:DEBUG") // this fails at a very low rate on CI: https://github.com/elastic/elasticsearch/issues/32506 - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32506") public void testFilterCacheStats() throws Exception { Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); assertAcked(prepareCreate("index").setSettings(settings).get()); @@ -1034,7 +1031,6 @@ public void testFilterCacheStats() throws Exception { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), equalTo(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); @@ -1045,7 +1041,6 @@ public void testFilterCacheStats() throws Exception { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), greaterThan(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); diff --git a/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java deleted file mode 100644 index c8ae3edb886ec..0000000000000 --- a/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.stats; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; - -public class LegacyIndexStatsIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testFieldDataFieldsParam() { - assertAcked(client() - .admin() - .indices() - .prepareCreate("test1") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0)) - .addMapping("_doc", "bar", "type=text,fielddata=true", "baz", "type=text,fielddata=true") - .get()); - - ensureGreen(); - - client().prepareIndex("test1", "_doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - client().prepareIndex("test1", "_doc", Integer.toString(2)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - refresh(); - - client().prepareSearch("_all").addSort("bar", SortOrder.ASC).addSort("baz", SortOrder.ASC).execute().actionGet(); - - final IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); - - { - final IndicesStatsResponse stats = builder.execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields(), is(nullValue())); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("bar").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("bar", "baz").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("*").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("*r").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); - } - - } - -} diff --git a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java index 5fedfa7869e8b..7ac254f9948f8 100644 --- a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java @@ -77,9 +77,10 @@ public void testNodeStats() throws Exception { public void testConcurrentAddingAndRemoving() throws Exception { String[] nodes = new String[] {"a", "b", "c", "d"}; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(5); Runnable f = () -> { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index aeb4d9b3a9bfb..2ea6567c9f8d0 100644 --- a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -23,9 +23,7 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.persistent.TestPersistentTasksPlugin; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; @@ -72,17 +70,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { for (int i = 0; i < numberOfTasks; i++) { PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); service.sendStartRequest("task_" + i, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)), - new ActionListener>() { - @Override - public void onResponse(PersistentTask task) { - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - latch.countDown(); - } - }); + ActionListener.wrap(latch::countDown)); } latch.await(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java index 4c7fdccb64b00..aa1ff6f55af82 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java @@ -92,7 +92,9 @@ public void testFiltersSortedByKey() { public void testOtherBucket() throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); - builder.startArray("filters").endArray(); + builder.startArray("filters") + .startObject().startObject("term").field("field", "foo").endObject().endObject() + .endArray(); builder.endObject(); try (XContentParser parser = createParser(shuffleXContent(builder))) { parser.nextToken(); @@ -102,7 +104,9 @@ public void testOtherBucket() throws IOException { builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); - builder.startArray("filters").endArray(); + builder.startArray("filters") + .startObject().startObject("term").field("field", "foo").endObject().endObject() + .endArray(); builder.field("other_bucket_key", "some_key"); builder.endObject(); } @@ -114,7 +118,9 @@ public void testOtherBucket() throws IOException { builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); - builder.startArray("filters").endArray(); + builder.startArray("filters") + .startObject().startObject("term").field("field", "foo").endObject().endObject() + .endArray(); builder.field("other_bucket", false); builder.field("other_bucket_key", "some_key"); builder.endObject(); @@ -192,4 +198,30 @@ public void testRewritePreservesOtherBucket() throws IOException { assertEquals(originalFilters.otherBucket(), rewrittenFilters.otherBucket()); assertEquals(originalFilters.otherBucketKey(), rewrittenFilters.otherBucketKey()); } + + public void testEmptyFilters() throws IOException { + { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.startObject(); + builder.startArray("filters").endArray(); // unkeyed array + builder.endObject(); + XContentParser parser = createParser(shuffleXContent(builder)); + parser.nextToken(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> FiltersAggregationBuilder.parse("agg_name", parser)); + assertThat(e.getMessage(), equalTo("[filters] cannot be empty.")); + } + + { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.startObject(); + builder.startObject("filters").endObject(); // keyed object + builder.endObject(); + XContentParser parser = createParser(shuffleXContent(builder)); + parser.nextToken(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> FiltersAggregationBuilder.parse("agg_name", parser)); + assertThat(e.getMessage(), equalTo("[filters] cannot be empty.")); + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 96d66c9e0c269..f3c1ea7ca529a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -57,15 +56,14 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.InternalSum; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.InternalSum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -356,7 +354,7 @@ public void testResetRootDocId() throws Exception { fieldType.setName(VALUE_FIELD_NAME); BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(Queries.newNonNestedFilter(VersionUtils.randomVersion(random())), BooleanClause.Occur.MUST); + bq.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); bq.add(new TermQuery(new Term(IdFieldMapper.NAME, Uid.encodeId("2"))), BooleanClause.Occur.MUST_NOT); InternalNested nested = search(newSearcher(indexReader, false, true), @@ -638,7 +636,7 @@ public void testPreGetChildLeafCollectors() throws IOException { fieldType2.setHasDocValues(true); Filter filter = search(newSearcher(indexReader, false, true), - Queries.newNonNestedFilter(Version.CURRENT), filterAggregationBuilder, fieldType1, fieldType2); + Queries.newNonNestedFilter(), filterAggregationBuilder, fieldType1, fieldType2); assertEquals("filterAgg", filter.getName()); assertEquals(3L, filter.getDocCount()); diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index bf053d34bff56..fffa501cc4be4 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -53,7 +53,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexNumericFieldData; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; @@ -63,6 +62,7 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -455,21 +455,6 @@ public void testToFilterDeprecationMessage() throws IOException { } } - public void testSerializationBackcompat() throws IOException { - SliceBuilder sliceBuilder = new SliceBuilder(1, 5); - assertEquals(IdFieldMapper.NAME, sliceBuilder.getField()); - - SliceBuilder copy62 = copyWriteable(sliceBuilder, - new NamedWriteableRegistry(Collections.emptyList()), - SliceBuilder::new, Version.V_6_2_0); - assertEquals(sliceBuilder, copy62); - - SliceBuilder copy63 = copyWriteable(copy62, - new NamedWriteableRegistry(Collections.emptyList()), - SliceBuilder::new, Version.V_6_3_0); - assertEquals(sliceBuilder, copy63); - } - public void testToFilterWithRouting() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { @@ -489,15 +474,14 @@ public void testToFilterWithRouting() throws IOException { when(clusterService.operationRouting()).thenReturn(routing); when(clusterService.getSettings()).thenReturn(Settings.EMPTY); try (IndexReader reader = DirectoryReader.open(dir)) { - QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + QueryShardContext context = createShardContext(version, reader, "field", DocValuesType.SORTED, 5, 0); SliceBuilder builder = new SliceBuilder("field", 6, 10); String[] routings = new String[] { "foo" }; - Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT); + Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, version); assertEquals(new DocValuesSliceQuery("field", 6, 10), query); - query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT); + query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, version); assertEquals(new DocValuesSliceQuery("field", 6, 10), query); - query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0); - assertEquals(new DocValuesSliceQuery("field", 1, 2), query); } } } diff --git a/server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary b/server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary new file mode 100644 index 0000000000000..3a8bb297e7449 Binary files /dev/null and b/server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary differ diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 990ae8e1f09a2..8bfa0becaee94 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -255,4 +255,14 @@ public static Version maxCompatibleVersion(Version version) { public static Version randomIndexCompatibleVersion(Random random) { return randomVersionBetween(random, Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT); } + + /** + * Returns a random version index compatible with the given version, but not the given version. + */ + public static Version randomPreviousCompatibleVersion(Random random, Version version) { + // TODO: change this to minimumCompatibilityVersion(), but first need to remove released/unreleased + // versions so getPreviousVerison returns the *actual* previous version. Otherwise eg 8.0.0 returns say 7.0.2 for previous, + // but 7.2.0 for minimum compat + return randomVersionBetween(random, version.minimumIndexCompatibilityVersion(), getPreviousVersion(version)); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java index 0143dad55b0e0..cab910ad6c430 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java @@ -76,8 +76,8 @@ public void testRandomVersionBetween() { assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0))); // unbounded upper - got = VersionUtils.randomVersionBetween(random(), fromId(7000099), null); - assertTrue(got.onOrAfter(fromId(7000099))); + got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), null); + assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); assertTrue(got.onOrBefore(Version.CURRENT)); got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null); assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion())); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 7d7263699be88..fac390b6e7692 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -108,9 +108,8 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); - assertThat(testSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_6_2_0)); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); + assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.2.0"))); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection)testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index da485a8430e28..48655a61813d8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -148,7 +148,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), - equalTo(Version.V_6_0_0)); + equalTo(Version.fromString("6.0.0"))); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index e2d30d0bc2099..bf73f2efba42a 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -118,7 +118,7 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index e5e466a82cc18..e92ef2ce13576 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -37,7 +37,7 @@ public void testSkip() { SkipSection section = new SkipSection("6.0.0 - 6.1.0", randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.V_6_0_0)); + assertTrue(section.skip(Version.fromString("6.0.0"))); section = new SkipSection(randomBoolean() ? null : "6.0.0 - 6.1.0", Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); @@ -54,15 +54,16 @@ public void testMessage() { } public void testParseSkipSectionVersionNoFeature() throws Exception { + Version version = VersionUtils.randomVersion(random()); parser = createParser(YamlXContent.yamlXContent, - "version: \" - 6.1.1\"\n" + + "version: \" - " + version + "\"\n" + "reason: Delete ignores the parent param" ); SkipSection skipSection = SkipSection.parse(parser); assertThat(skipSection, notNullValue()); assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.V_6_1_1)); + assertThat(skipSection.getUpperVersion(), equalTo(version)); assertThat(skipSection.getFeatures().size(), equalTo(0)); assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 96bff85389c8a..b2baf40267287 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -75,8 +75,8 @@ public void testParseWithSkip() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.3.0"))); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(((DoSection)section.getDoSections().get(0)).getApiCallSection().getApi(), equalTo("delete")); diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index d7517d007d7c8..0075b4989e69f 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -73,6 +73,7 @@ File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/reso project.copyRestSpec.from(xpackResources) { include 'rest-api-spec/api/**' } +File jwks = new File(xpackProject('test:idp-fixture').projectDir, 'oidc/op-jwks.json') integTestCluster { setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' @@ -81,9 +82,22 @@ integTestCluster { setting 'xpack.monitoring.exporters._local.type', 'local' setting 'xpack.monitoring.exporters._local.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.authc.realms.file.file.order', '0' + setting 'xpack.security.authc.realms.native.native.order', '1' + setting 'xpack.security.authc.realms.oidc.oidc1.order', '2' + setting 'xpack.security.authc.realms.oidc.oidc1.op.issuer', 'http://127.0.0.1:8080' + setting 'xpack.security.authc.realms.oidc.oidc1.op.authorization_endpoint', "http://127.0.0.1:8080/c2id-login" + setting 'xpack.security.authc.realms.oidc.oidc1.op.token_endpoint', "http://127.0.0.1:8080/c2id/token" + setting 'xpack.security.authc.realms.oidc.oidc1.op.jwkset_path', 'op-jwks.json' + setting 'xpack.security.authc.realms.oidc.oidc1.rp.redirect_uri', 'https://my.fantastic.rp/cb' + setting 'xpack.security.authc.realms.oidc.oidc1.rp.client_id', 'elasticsearch-rp' + keystoreSetting 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2' + setting 'xpack.security.authc.realms.oidc.oidc1.rp.response_type', 'id_token' + setting 'xpack.security.authc.realms.oidc.oidc1.claims.principal', 'sub' setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' waitCondition = waitWithAuth + extraConfigFile 'op-jwks.json', jwks } diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index c04bae90801ee..abad1e38d77fd 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -76,6 +76,8 @@ native realm: * <> * <> +[float] +[[security-openid-apis]] === OpenID Connect You can use the following APIs to authenticate users against an OpenID Connect @@ -110,7 +112,7 @@ include::security/get-users.asciidoc[] include::security/has-privileges.asciidoc[] include::security/invalidate-api-keys.asciidoc[] include::security/invalidate-tokens.asciidoc[] -include::security/ssl.asciidoc[] include::security/oidc-prepare-authentication-api.asciidoc[] include::security/oidc-authenticate-api.asciidoc[] include::security/oidc-logout-api.asciidoc[] +include::security/ssl.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/authenticate.asciidoc b/x-pack/docs/en/rest-api/security/authenticate.asciidoc index 51b0d64419453..d23c410a62389 100644 --- a/x-pack/docs/en/rest-api/security/authenticate.asciidoc +++ b/x-pack/docs/en/rest-api/security/authenticate.asciidoc @@ -46,11 +46,11 @@ The following example output provides information about the "rdeniro" user: "metadata": { }, "enabled": true, "authentication_realm": { - "name" : "default_file", + "name" : "file", "type" : "file" }, "lookup_realm": { - "name" : "default_file", + "name" : "file", "type" : "file" } } diff --git a/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc index 0efb2b23145f7..bc60e4fbf231d 100644 --- a/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc +++ b/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc @@ -51,7 +51,7 @@ POST /_security/oidc/authenticate } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] +// TEST[catch:unauthorized] The following example output contains the access token that was generated in response, the amount of time (in seconds) that the token expires in, the type, and the refresh token: diff --git a/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc index 6f5288a135f2a..cb8840ca53590 100644 --- a/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc +++ b/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc @@ -39,7 +39,7 @@ POST /_security/oidc/logout } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] +// TEST[catch:unauthorized] The following example output of the response contains the URI pointing to the End Session Endpoint of the OpenID Connect Provider with all the parameters of the Logout Request, as HTTP GET parameters diff --git a/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc index aeb400ce97ef1..a6ce410be6ee6 100644 --- a/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc +++ b/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc @@ -57,20 +57,19 @@ POST /_security/oidc/prepare } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] - The following example output of the response contains the URI pointing to the Authorization Endpoint of the OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters [source,js] -------------------------------------------------- { - "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=0o43gasov3TxMWJOt839", + "redirect" : "http://127.0.0.1:8080/c2id-login?scope=openid&response_type=id_token&redirect_uri=https%3A%2F%2Fmy.fantastic.rp%2Fcb&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=elasticsearch-rp", "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM" } -------------------------------------------------- -// NOTCONSOLE +// TESTRESPONSE[s/4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I/\$\{body.state\}/] +// TESTRESPONSE[s/WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM/\$\{body.nonce\}/] The following example generates an authentication request for the OpenID Connect Realm `oidc1`, where the values for the state and the nonce have been generated by the client @@ -85,7 +84,6 @@ POST /_security/oidc/prepare } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] The following example output of the response contains the URI pointing to the Authorization Endpoint of the OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters @@ -93,12 +91,12 @@ OpenID Connect Provider with all the parameters of the Authentication Request, a [source,js] -------------------------------------------------- { - "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=0o43gasov3TxMWJOt839", + "redirect" : "http://127.0.0.1:8080/c2id-login?scope=openid&response_type=id_token&redirect_uri=https%3A%2F%2Fmy.fantastic.rp%2Fcb&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=elasticsearch-rp", "state" : "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", "nonce" : "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5" } -------------------------------------------------- -// NOTCONSOLE +// TESTRESPONSE The following example generates an authentication request for a 3rd party initiated single sign on, specifying the issuer that should be used for matching the appropriate OpenID Connect Authentication realm @@ -107,12 +105,11 @@ issuer that should be used for matching the appropriate OpenID Connect Authentic -------------------------------------------------- POST /_security/oidc/prepare { - "issuer" : "https://op-issuer.org:8800", + "iss" : "http://127.0.0.1:8080", "login_hint": "this_is_an_opaque_string" } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] The following example output of the response contains the URI pointing to the Authorization Endpoint of the OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters @@ -120,9 +117,10 @@ OpenID Connect Provider with all the parameters of the Authentication Request, a [source,js] -------------------------------------------------- { - "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=0o43gasov3TxMWJOt839&login_hint=this_is_an_opaque_string", + "redirect" : "http://127.0.0.1:8080/c2id-login?login_hint=this_is_an_opaque_string&scope=openid&response_type=id_token&redirect_uri=https%3A%2F%2Fmy.fantastic.rp%2Fcb&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=elasticsearch-rp", "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM" } -------------------------------------------------- -// NOTCONSOLE \ No newline at end of file +// TESTRESPONSE[s/4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I/\$\{body.state\}/] +// TESTRESPONSE[s/WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM/\$\{body.nonce\}/] \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc new file mode 100644 index 0000000000000..df5ce11c63c14 --- /dev/null +++ b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc @@ -0,0 +1,649 @@ +[role="xpack"] +[[oidc-guide]] + +== Configuring single sign-on to the {stack} using OpenID Connect + +The Elastic Stack supports single sign-on (SSO) using OpenID Connect via {kib} using +{es} as the backend service that holds most of the functionality. {kib} and {es} +together represent an OpenID Connect Relying Party (RP) that supports the Authorization +Code Flow as this is defined in the OpenID Connect specification. + +This guide assumes that you have an OpenID Connect Provider where the +Elastic Stack Relying Party will be registered. + +NOTE: The OpenID Connect realm support in {kib} is designed with the expectation that it +will be the primary authentication method for the users of that {kib} instance. The +<> section describes what this entails and how you can set it up to support +other realms if necessary. + +[[oidc-guide-op]] +=== The OpenID Connect Provider + +The OpenID Connect Provider (OP) is the entity in OpenID Connect that is responsible for +authenticating the user and for granting the necessary tokens with the authentication and +user information to be consumed by the Relying Parties. + +In order for the Elastic Stack to be able use your OpenID Connect Provider for authentication, +a trust relationship needs to be established between the OP and the RP. In the OpenID Connect +Provider, this means registering the RP as a client. OpenID Connect defines a dynamic client +registration protocol but this is usually geared towards real-time client registration and +not the trust establishment process for cross security domain single sign on. All OPs will +also allow for the manual registration of an RP as a client, via a user interface or (less often) +via the consumption of a metadata document. + +The process for registering the Elastic Stack RP will be different from OP to OP and following +the provider's relevant documentation is prudent. The information for the +RP that you commonly need to provide for registration are the following: + +- `Relying Party Name`: An arbitrary identifier for the relying party. Neither the specification +nor the Elastic Stack implementation impose any constraints on this value. +- `Redirect URI`: This is the URI where the OP will redirect the user's browser after authentication. The +appropriate value for this will depend on your setup and whether or not {kib} sits behind a proxy or +load balancer. It will typically be +$\{kibana-url}/api/security/v1/oidc+ where _$\{kibana-url}_ +is the base URL for your {kib} instance. You might also see this called `Callback URI`. + +At the end of the registration process, the OP will assign a Client Identifier and a Client Secret for the RP ({stack}) to use. +Note these two values as they will be used in the {es} configuration. + +[[oidc-guide-authentication]] +=== Configure {es} for OpenID Connect authentication + +The following is a summary of the configuration steps required in order to enable authentication +using OpenID Connect in {es}: + +. <> +. <> +. <> +. <> + +[[oidc-enable-http]] +==== Enable TLS for HTTP + +If your {es} cluster is operating in production mode, then you must +configure the HTTP interface to use SSL/TLS before you can enable OpenID Connect +authentication. + +For more information, see +{ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. + +[[oidc-enable-token]] +==== Enable the token service + +The {es} OpenID Connect implementation makes use of the {es} Token Service. This service +is automatically enabled if you configure TLS on the HTTP interface, and can be +explicitly configured by including the following in your `elasticsearch.yml` file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.token.enabled: true +------------------------------------------------------------ + +[[oidc-create-realm]] +==== Create an OpenID Connect realm + +OpenID Connect based authentication is enabled by configuring the appropriate realm within +the authentication chain for {es}. + +This realm has a few mandatory settings, and a number of optional settings. +The available settings are described in detail in the +{ref}/security-settings.html#ref-oidc-settings[Security settings in {es}]. This +guide will explore the most common settings. + +Create an OpenID Connect (the realm type is `oidc`) realm in your `elasticsearch.yml` file +similar to what is shown below: + +NOTE: The values used below are meant to be an example and are not intended to apply to +every use case. The details below the configuration snippet provide insights and suggestions +to help you pick the proper values, depending on your OP configuration. + +[source, yaml] +------------------------------------------------------------------------------------- +xpack.security.authc.realms.oidc.oidc1: + order: 2 + rp.client_id: "the_client_id" + rp.response_type: code + rp.redirect_uri: "https://kibana.example.org:5601/api/security/v1/oidc" + op.authorization_endpoint: "https://op.example.org/oauth2/v1/authorize" + op.token_endpoint: "https://op.example.org/oauth2/v1/token" + op.userinfo_endpoint: "https://op.example.org/oauth2/v1/userinfo" + op.endsession_endpoint: "https://op.example.org/oauth2/v1/logout" + op.issuer: "https://op.example.org" + op.jwkset_path: oidc/jwkset.json + claims.principal: sub + claims.groups: "http://example.info/claims/groups" +------------------------------------------------------------------------------------- + +The configuration values used in the example above are: + +xpack.security.authc.realms.oidc.oidc1:: + This defines a new `oidc` authentication realm named "oidc1". + See <> for more explanation of realms. + +order:: + You should define a unique order on each realm in your authentication chain. + It is recommended that the OpenID Connect realm be at the bottom of your authentication + chain (that is, that it has the _highest_ order). + +rp.client_id:: + This, usually opaque, arbitrary string, is the Client Identifier that was assigned to the Elastic Stack RP by the OP upon + registration. + +rp.response_type:: + This is an identifier that controls which OpenID Connect authentication flow this RP supports and also + which flow this RP requests the OP should follow. Supported values are + - `code`, which means that the RP wants to use the Authorization Code flow. If your OP supports the + Authorization Code flow, you should select this instead of the Implicit Flow. + - `id_token token` which means that the RP wants to use the Implicit flow and we also request an oAuth2 + access token from the OP, that we can potentially use for follow up requests ( UserInfo ). This + should be selected if the OP offers a UserInfo endpoint in its configuration, or if you know that + the claims you will need to use for role mapping are not available in the ID Token. + - `id_token` which means that the RP wants to use the Implicit flow, but is not interested in getting + an oAuth2 token too. Select this if you are certain that all necessary claims will be contained in + the ID Token or if the OP doesn't offer a User Info endpoint. + +rp.redirect_uri:: + The redirect URI where the OP will redirect the browser after authentication. This needs to be + _exactly_ the same as the one <> and will + typically be +$\{kibana-url}/api/security/v1/oidc+ where _$\{kibana-url}_ is the base URL for your {kib} instance + +op.authorization_endpoint:: + The URL for the Authorization Endpoint in the OP. This is where the user's browser + will be redirected to start the authentication process. The value for this setting should be provided by your + OpenID Connect Provider. + +op.token_endpoint:: + The URL for the Token Endpoint in the OpenID Connect Provider. This is the endpoint where + {es} will send a request to exchange the code for an ID Token, in the case where the Authorization Code + flow is used. The value for this setting should be provided by your OpenID Connect Provider. + +op.userinfo_endpoint:: + (Optional) The URL for the UserInfo Endpoint in the OpenID Connect Provider. This is the endpoint of the OP that + can be queried to get further user information, if required. The value for this setting should be provided by your + OpenID Connect Provider. + +op.endsession_endpoint:: + (Optional) The URL to the End Session Endpoint in the OpenID Connect Provider. This is the endpoint where the user's + browser will be redirected after local logout, if the realm is configured for RP initiated Single Logout and + the OP supports it. The value for this setting should be provided by your OpenID Connect Provider. + +op.jwkset_path:: + The path to a file containing a JSON Web Key Set with the key material that the OpenID Connect + Provider uses for signing tokens and claims responses. The path is resolved relative to the {es} + config directory. + {es} will automatically monitor this file for changes and will reload the configuration whenever + it is updated. Your OpenID Connect Provider should provide you with this file. + +claims.principal:: See <>. +claims.groups:: See <>. + +A final piece of configuration of the OpenID Connect realm is to set the `Client Secret` that was assigned +to the RP during registration in the OP. This is a secure setting and as such is not defined in the realm +configuration in `elasticsearch.yml` but added to the {ref}/secure-settings.html[elasticsearch keystore]. +For instance + + +[source,sh] +---- +bin/elasticsearch-keystore add xpack.security.authc.realms.oidc.oidc1.rp.client_secret +---- + + +NOTE: According to the OpenID Connect specification, the OP should also make their configuration +available at a well known URL, which is the concatenation of their `Issuer` value with the +`.well-known/openid-configuration` string. For example: `https://op.org.com/.well-known/openid-configuration` +That document should contain all the necessary information to configure the OpenID Connect realm in {es}. + + +[[oidc-claims-mapping]] +==== Claims mapping + +===== Claims and scopes + +When authenticating to {kib} using OpenID Connect, the OP will provide information about the user +in the form of OpenID Connect Claims, that can be included either in the ID Token, or be retrieved from the +UserInfo endpoint of the OP. The claim is defined as a piece of information asserted by the OP +for the authenticated user. Simply put, a claim is a name/value pair that contains information about +the user. Related to claims, we also have the notion of OpenID Connect Scopes. Scopes are identifiers +that are used to request access to specific lists of claims. The standard defines a set of scope +identifiers that can be requested. The only mandatory one is `openid`, while commonly used ones are +`profile` and `email`. The `profile` scope requests access to the `name`,`family_name`,`given_name`,`middle_name`,`nickname`, +`preferred_username`,`profile`,`picture`,`website`,`gender`,`birthdate`,`zoneinfo`,`locale`, and `updated_at` claims. +The `email` scope requests access to the `email` and `email_verified` claims. The process is that +the RP requests specific scopes during the authentication request. If the OP Privacy Policy +allows it and the authenticating user consents to it, the related claims are returned to the +RP (either in the ID Token or as a UserInfo response). + +The list of the supported claims will vary depending on the OP you are using, but you can expect +the https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[Standard Claims] to be +largely supported. + +[[oidc-claim-to-property]] +===== Mapping claims to user properties + +The goal of claims mapping is to configure {es} in such a way as to be able to map the values of +specified returned claims to one of the <> that are supported +by {es}. These user properties are then utilized to identify the user in the {kib} UI or the audit +logs, and can also be used to create <> rules. + +The recommended steps for configuring OpenID Claims mapping are as follows: + +. Consult your OP configuration to see what claims it might support. Note that + the list provided in the OP's metadata or in the configuration page of the OP + is a list of potentially supported claims. However, for privacy reasons it might + not be a complete one, or not all supported claims will be available for all + authenticated users. + +. Read through the list of <> that {es} + supports, and decide which of them are useful to you, and can be provided by + your OP in the form of claims. At a _minimum_, the `principal` user property + is required. + +. Configure your OP to "release" those claims to your {stack} Relying + party. This process greatly varies by provider. You can use a static + configuration while others will support that the RP requests the scopes that + correspond to the claims to be "released" on authentication time. See + {ref}/security-settings.html#ref-oidc-settings[`rp.requested_scopes`] for details about how + to configure the scopes to request. To ensure interoperability and minimize + the errors, you should only request scopes that the OP supports, and which you + intend to map to {es} user properties. + +. Configure the OpenID Connect realm in {es} to associate the {es} user properties (see + <> below), to the name of the claims that your + OP will release. In the example above, we have configured the `principal` and + `groups` user properties as follows: + + .. `claims.principal: sub` : This instructs {es} to look for the OpenID Connect claim named `sub` + in the ID Token that the OP issued for the user ( or in the UserInfo response ) and assign the + value of this claim to the `principal` user property. `sub` is a commonly used claim for the + principal property as it is an identifier of the user in the OP and it is also a required + claim of the ID Token, thus offering guarantees that it will be available. It is, however, + only used as an example here, the OP may provide another claim that is a better fit for your needs. + + .. `claims.groups: "http://example.info/claims/groups"` : Similarly, this instructs {es} to look + for the claim with the name `http://example.info/claims/groups` (note that this is a URI - an + identifier, treated as a string and not a URL pointing to a location that will be retrieved) + either in the ID Token or in the UserInfo response, and map the value(s) of it to the user + property `groups` in {es}. There is no standard claim in the specification that is used for + expressing roles or group memberships of the authenticated user in the OP, so the name of the + claim that should be mapped here, will vary greatly between providers. Consult your OP + documentation for more details. + +[[oidc-user-properties]] +===== {es} user properties + +The {es} OpenID Connect realm can be configured to map OpenID Connect claims to the +following properties on the authenticated user: + +principal:: _(Required)_ + This is the _username_ that will be applied to a user that authenticates + against this realm. + The `principal` appears in places such as the {es} audit logs. + +NOTE: If the principal property fails to be mapped from a claim, the authentication fails. + +groups:: _(Recommended)_ + If you wish to use your OP's concept of groups or roles as the basis for a + user's {es} privileges, you should map them with this property. + The `groups` are passed directly to your <>. + +name:: _(Optional)_ The user's full name. +mail:: _(Optional)_ The user's email address. +dn:: _(Optional)_ The user's X.500 _Distinguished Name_. + + +===== Extracting partial values from OpenID Connect claims + +There are some occasions where the value of a claim may contain more information +than you wish to use within {es}. A common example of this is one where the +OP works exclusively with email addresses, but you would like the user's +`principal` to use the _local-name_ part of the email address. +For example if their email address was `james.wong@staff.example.com`, then you +would like their principal to simply be `james.wong`. + +This can be achieved using the `claim_patterns` setting in the {es} +realm, as demonstrated in the realm configuration below: + +[source, yaml] +------------------------------------------------------------------------------------- +xpack.security.authc.realms.oidc.oidc1: + rp.client_id: "the_client_id" + rp.response_type: code + rp.redirect_uri: "https://kibana.example.org:5601/api/security/v1/oidc" + op.authorization_endpoint: "https://op.example.org/oauth2/v1/authorize" + op.token_endpoint: "https://op.example.org/oauth2/v1/token" + op.userinfo_endpoint: "https://op.example.org/oauth2/v1/userinfo" + op.endsession_endpoint: "https://op.example.org/oauth2/v1/logout" + op.issuer: "https://op.example.org" + op.jwkset_path: oidc/jwkset.json + claims.principal: email_verified + claim_patterns.principal: "^([^@]+)@staff\\.example\\.com$" +------------------------------------------------------------------------------------- + +In this case, the user's `principal` is mapped from the `email_verified` claim, but a +regular expression is applied to the value before it is assigned to the user. +If the regular expression matches, then the result of the first group is used as the +effective value. If the regular expression does not match then the claim +mapping fails. + +In this example, the email address must belong to the `staff.example.com` domain, +and then the local-part (anything before the `@`) is used as the principal. +Any users who try to login using a different email domain will fail because the +regular expression will not match against their email address, and thus their +principal user property - which is mandatory - will not be populated. + +IMPORTANT: Small mistakes in these regular expressions can have significant +security consequences. For example, if we accidentally left off the trailing +`$` from the example above, then we would match any email address where the +domain starts with `staff.example.com`, and this would accept an email +address such as `admin@staff.example.com.attacker.net`. It is important that +you make sure your regular expressions are as precise as possible so that +you do not inadvertently open an avenue for user impersonation attacks. + +[[third-party-login]] +==== Third party initiated single sign-on + +The Open ID Connect realm in {es} supports 3rd party initiated login as described in the +https://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin[relevant specification]. + +This allows the OP itself or another, third party other than the RP, to initiate the authentication +process while requesting the OP to be used for the authentication. Please note that the Elastic +Stack RP should already be configured for this OP, in order for this process to succeed. + + +[[oidc-logout]] +==== OpenID Connect Logout + +The OpenID Connect realm in {es} supports RP-Initiated Logout Functionality as +described in the +https://openid.net/specs/openid-connect-session-1_0.html#RPLogout[relevant part of the specification] + +In this process, the OpenID Connect RP (the Elastic Stack in this case) will redirect the user's +browser to predefined URL of the OP after successfully completing a local logout. The OP can then +logout the user also, depending on the configuration, and should finally redirect the user back to the +RP. The `op.endsession_endpoint` in the realm configuration determines the URL in the OP that the browser +will be redirected to. The `rp.post_logout_redirect_uri` setting determines the URL to redirect +the user back to after the OP logs them out. + +When configuring `rp.post_logout_redirect_uri`, care should be taken to not point this to a URL that +will trigger re-authentication of the user. For instance, when using OpenID Connect to support +single sign-on to {kib}, this could be set to +$\{kibana-url}/logged_out+, which will show a user- +friendly message to the user. + +[[oidc-role-mapping]] +=== Configuring role mappings + +When a user authenticates using OpenID Connect, they are identified to the Elastic Stack, +but this does not automatically grant them access to perform any actions or +access any data. + +Your OpenID Connect users cannot do anything until they are assigned roles. This can be done +through either the +{ref}/security-api-put-role-mapping.html[add role mapping API], or with +<>. + +NOTE: You cannot use {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files] +to grant roles to users authenticating via OpenID Connect. + +This is an example of a simple role mapping that grants the `kibana_user` role +to any user who authenticates against the `oidc1` OpenID Connect realm: + +[source,js] +-------------------------------------------------- +PUT /_security/role_mapping/oidc-kibana +{ + "roles": [ "kibana_user" ], + "enabled": true, + "rules": { + "field": { "realm.name": "oidc1" } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + + +The user properties that are mapped via the realm configuration are used to process +role mapping rules, and these rules determine which roles a user is granted. + +The user fields that are provided to the role +mapping are derived from the OpenID Connect claims as follows: + +- `username`: The `principal` user property +- `dn`: The `dn` user property +- `groups`: The `groups` user property +- `metadata`: See <> + +For more information, see <> and +{ref}/security-api.html#security-role-mapping-apis[role mapping APIs]. + +If your OP has the ability to provide groups or roles to RPs via tha use of +an OpenID Claim, then you should map this claim to the `claims.groups` setting in +the {es} realm (see <>), and then make use of it in a role mapping +as per the example below. + +This mapping grants the {es} `finance_data` role, to any users who authenticate +via the `oidc1` realm with the `finance-team` group membership. + +[source,js] +-------------------------------------------------- +PUT /_security/role_mapping/oidc-finance +{ + "roles": [ "finance_data" ], + "enabled": true, + "rules": { "all": [ + { "field": { "realm.name": "oidc1" } }, + { "field": { "groups": "finance-team" } } + ] } +} +-------------------------------------------------- +// CONSOLE +// TEST + +If your users also exist in a repository that can be directly accessed by {es} +(such as an LDAP directory) then you can use +<> instead of role mappings. + +In this case, you perform the following steps: +1. In your OpenID Connect realm, assign a claim to act as the lookup userid, + by configuring the `claims.principal` setting. +2. Create a new realm that can lookup users from your local repository (e.g. an + `ldap` realm) +3. In your OpenID Connect realm, set `authorization_realms` to the name of the realm you + created in step 2. + +[[oidc-user-metadata]] +=== User metadata + +By default users who authenticate via OpenID Connect will have some additional metadata +fields. These fields will include every OpenID Claim that is provided in the authentication response +(regardless of whether it is mapped to an {es} user property). For example, +in the metadata field `oidc(claim_name)`, "claim_name" is the name of the +claim as it was contained in the ID Token or in the User Info response. Note that these will +include all the https://openid.net/specs/openid-connect-core-1_0.html#IDToken[ID Token claims] +that pertain to the authentication event, rather than the user themselves. + +This behaviour can be disabled by adding `populate_user_metadata: false` as +a setting in the oidc realm. + +[[oidc-kibana]] +=== Configuring {kib} + +OpenID Connect authentication in {kib} requires a small number of additional settings +in addition to the standard {kib} security configuration. The +{kibana-ref}/using-kibana-with-security.html[{kib} security documentation] +provides details on the available configuration options that you can apply. + +In particular, since your {es} nodes have been configured to use TLS on the HTTP +interface, you must configure {kib} to use a `https` URL to connect to {es}, and +you may need to configure `elasticsearch.ssl.certificateAuthorities` to trust +the certificates that {es} has been configured to use. + +OpenID Connect authentication in {kib} is also subject to the +`xpack.security.sessionTimeout` setting that is described in the {kib} security +documentation, and you may wish to adjust this timeout to meet your local needs. + +The three additional settings that are required for OpenID Connect support are shown below: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authProviders: [oidc] +xpack.security.auth.oidc.realm: "oidc1" +server.xsrf.whitelist: [/api/security/v1/oidc] +------------------------------------------------------------ + +The configuration values used in the example above are: + +`xpack.security.authProviders`:: +Set this to `[ oidc ]` to instruct {kib} to use OpenID Connect single sign-on as the +authentication method. This instructs Kibana to attempt to initiate an SSO flow +everytime a user attempts to access a URL in Kibana, if the user is not already +authenticated. If you also want to allow users to login with a username and password, +you must enable the `basic` authProvider too. For example: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authProviders: [oidc, basic] +------------------------------------------------------------ + +This will allow users that haven't already authenticated with OpenID Connect to +navigate directly to the `/login` page in {kib} in order to use the login form. + +`xpack.security.auth.oidc.realm`:: +The name of the OpenID Connect realm in {es} that should handle authentication +for this Kibana instance. + +`server.xsrf.whitelist`:: +{kib} has in-built protection against _Cross Site Request Forgery_ attacks, which +is designed to prevent the {kib} server from processing requests that +originated from outside the {kib} application. +In order to support OpenID Connect messages that originate from your +OP or a third party (see <>, we need to explicitly _whitelist_ the +OpenID Connect authentication endpoint within {kib}, so that the {kib} server will +not reject these external messages. + + +=== OpenID Connect without {kib} + +The OpenID Connect realm is designed to allow users to authenticate to {kib} and as +such, most of the parts of the guide above make the assumption that {kib} is used. +This section describes how a custom web application could use the relevant OpenID +Connect REST APIs in order to authenticate the users to {es}, with OpenID Connect. + +Single sign-on realms such as OpenID Connect and SAML make use of the Token Service in +{es} and in principle exchange a SAML or OpenID Connect Authentication response for +an {es} access token and a refresh token. The access token is used as credentials for subsequent calls to {es}. The +refresh token enables the user to get new {es} access tokens after the current one +expires. + +NOTE: The {es} Token Service can be seen as a minimal oAuth2 authorization server +and the access token and refresh token mentioned above are tokens that pertain +_only_ to this authorization server. They are generated and consumed _only_ by {es} +and are in no way related to the tokens ( access token and ID Token ) that the +OpenID Connect Provider issues. + +==== Register the RP with an OpenID Connect Provider + +The Relying Party ( {es} and the custom web app ) will need to be registered as +client with the OpenID Connect Provider. Note that when registering the +`Redirect URI`, it needs to be a URL in the custom web app. + +==== OpenID Connect Realm + +An OpenID Connect realm needs to be created and configured accordingly +in {es}. See <> + +==== Service Account user for accessing the APIs + +The realm is designed with the assumption that there needs to be a privileged entity +acting as an authentication proxy. In this case, the custom web application is the +authentication proxy handling the authentication of end users ( more correctly, +"delegating" the authentication to the OpenID Connect Provider ). The OpenID Connect +APIs require authentication and the necessary authorization level for the authenticated +user. For this reason, a Service Account user needs to be created and assigned a role +that gives them the `manage_oidc` cluster privilege. The use of the `manage_token` +cluster privilege will be necessary after the authentication takes place, so that the +the user can maintain access or be subsequently logged out. + +[source,js] +-------------------------------------------------- +POST /_security/role/facilitator-role +{ + "cluster" : ["manage_oidc", "manage_token"] +} +-------------------------------------------------- +// CONSOLE + + +[source,js] +-------------------------------------------------- +POST /_security/user/facilitator +{ + "password" : "", + "roles" : [ "facilitator-role"] +} +-------------------------------------------------- +// CONSOLE + + +==== Handling the authentication flow + +On a high level, the custom web application would need to perform the following steps in order to +authenticate a user with OpenID Connect: + +. Make an HTTP POST request to `_security/oidc/prepare`, authenticating as the `facilitator` user, using the name of the +OpenID Connect realm in the {es} configuration in the request body. See the +{ref}/security-api-oidc-prepare-authentication.html[OIDC Prepare Authentication API] for more details ++ +[source,js] +-------------------------------------------------- +POST /_security/oidc/prepare +{ + "realm" : "oidc1" +} +-------------------------------------------------- +// CONSOLE ++ +. Handle the response to `/_security/oidc/prepare`. The response from {es} will contain 3 parameters: + `redirect`, `state`, `nonce`. The custom web application would need to store the values for `state` + and `nonce` in the user's session (client side in a cookie or server side if session information is + persisted this way) and redirect the user's browser to the URL that will be contained in the + `redirect` value. +. Handle a subsequent response from the OP. After the user is successfully authenticated with the + OpenID Connect Provider, they will be redirected back to the callback/redirect URI. Upon receiving + this HTTP GET request, the custom web app will need to make an HTTP POST request to + `_security/oidc/authenticate`, again - authenticating as the `facilitator` user - passing the URL + where the user's browser was redirected to, as a parameter, along with the + values for `nonce` and `state` it had saved in the user's session previously. + See {ref}/security-api-oidc-authenticate.html[OIDC Authenticate API] for more details ++ +[source,js] +----------------------------------------------------------------------- +POST /_security/oidc/authenticate +{ + "redirect_uri" : "https://oidc-kibana.elastic.co:5603/api/security/v1/oidc?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM" +} +----------------------------------------------------------------------- +// CONSOLE +// TEST[catch:unauthorized] ++ +Elasticsearch will validate this and if all is correct will respond with an access token that can be used + as a `Bearer` token for subsequent requests and a refresh token that can be later used to refresh the given + access token as described in {ref}/security-api-get-token.html[get token API]. +. At some point, if necessary, the custom web application can log the user out by using the + {ref}/security-api-oidc-logout.html[OIDC Logout API] passing the access token and refresh token as parameters. For example: ++ +[source,js] +-------------------------------------------------- +POST /_security/oidc/logout +{ + "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "refresh_token": "vLBPvmAB6KvwvJZr27cS" +} +-------------------------------------------------- +// CONSOLE +// TEST[catch:unauthorized] ++ +If the realm is configured accordingly, this may result in a response with a `redirect` parameter indicating where +the user needs to be redirected in the OP in order to complete the logout process. diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index 63fded729eb8c..6672c0316493e 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -5,8 +5,7 @@ Elasticsearch nodes store data that may be confidential. Attacks on the data may come from the network. These attacks could include sniffing of the data, manipulation of the data, and attempts to gain access to the server and thus the -files storing the data. Securing your nodes is required in order to use a production -license that enables {security-features} and helps reduce the risk from +files storing the data. Securing your nodes helps reduce the risk from network-based attacks. This section shows how to: diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index 62ffd76e8ea05..e39b5b7dcc196 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -780,22 +780,4 @@ public Builder validate() { } } - /** - * Returns true iff the license is a production licnese - */ - public boolean isProductionLicense() { - switch (operationMode()) { - case MISSING: - case TRIAL: - case BASIC: - return false; - case STANDARD: - case GOLD: - case PLATINUM: - return true; - default: - throw new AssertionError("unknown operation mode: " + operationMode()); - - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 837caf2da070b..f750d1349a0ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -218,10 +218,13 @@ public void registerLicense(final PutLicenseRequest request, final ActionListene } } + // This check would be incorrect if "basic" licenses were allowed here + // because the defaults there mean that security can be "off", even if the setting is "on" + // BUT basic licenses are explicitly excluded earlier in this method, so we don't need to worry if (XPackSettings.SECURITY_ENABLED.get(settings)) { // TODO we should really validate that all nodes have xpack installed and are consistently configured but this // should happen on a different level and not in this code - if (newLicense.isProductionLicense() + if (XPackLicenseState.isTransportTlsRequired(newLicense, settings) && XPackSettings.TRANSPORT_SSL_ENABLED.get(settings) == false && isProductionMode(settings, clusterService.localNode())) { // security is on but TLS is not configured we gonna fail the entire request and throw an exception diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java index 6d001dea516ac..f131b24252e5b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java @@ -166,13 +166,11 @@ public void writeTo(StreamOutput streamOutput) throws IOException { streamOutput.writeBoolean(true); // has a license license.writeTo(streamOutput); } - if (streamOutput.getVersion().onOrAfter(Version.V_6_1_0)) { - if (trialVersion == null) { - streamOutput.writeBoolean(false); - } else { - streamOutput.writeBoolean(true); - Version.writeVersion(trialVersion, streamOutput); - } + if (trialVersion == null) { + streamOutput.writeBoolean(false); + } else { + streamOutput.writeBoolean(true); + Version.writeVersion(trialVersion, streamOutput); } } @@ -182,11 +180,9 @@ public LicensesMetaData(StreamInput streamInput) throws IOException { } else { license = LICENSE_TOMBSTONE; } - if (streamInput.getVersion().onOrAfter(Version.V_6_1_0)) { - boolean hasExercisedTrial = streamInput.readBoolean(); - if (hasExercisedTrial) { - this.trialVersion = Version.readVersion(streamInput); - } + boolean hasExercisedTrial = streamInput.readBoolean(); + if (hasExercisedTrial) { + this.trialVersion = Version.readVersion(streamInput); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 131069d27f628..e206ed3db5149 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -282,7 +282,7 @@ private static class Status { public XPackLicenseState(Settings settings) { this.listeners = new CopyOnWriteArrayList<>(); this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - this.isSecurityExplicitlyEnabled = isSecurityEnabled && settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + this.isSecurityExplicitlyEnabled = isSecurityEnabled && isSecurityExplicitlyEnabled(settings); } private XPackLicenseState(XPackLicenseState xPackLicenseState) { @@ -292,6 +292,10 @@ private XPackLicenseState(XPackLicenseState xPackLicenseState) { this.status = xPackLicenseState.status; } + private static boolean isSecurityExplicitlyEnabled(Settings settings) { + return settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + } + /** * Updates the current state of the license, which will change what features are available. * @@ -727,6 +731,25 @@ public synchronized boolean isSecurityDisabledByLicenseDefaults() { return false; } + public static boolean isTransportTlsRequired(License license, Settings settings) { + if (license == null) { + return false; + } + switch (license.operationMode()) { + case STANDARD: + case GOLD: + case PLATINUM: + return XPackSettings.SECURITY_ENABLED.get(settings); + case BASIC: + return XPackSettings.SECURITY_ENABLED.get(settings) && isSecurityExplicitlyEnabled(settings); + case MISSING: + case TRIAL: + return false; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } + } + private static boolean isSecurityEnabled(final OperationMode mode, final boolean isSecurityExplicitlyEnabled, final boolean isSecurityEnabled) { switch (mode) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 92f8915e922f1..848aacd621111 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.xpack.core.security.SecurityField; @@ -117,7 +118,7 @@ private XPackSettings() { * SSL settings. These are the settings that are specifically registered for SSL. Many are private as we do not explicitly use them * but instead parse based on a prefix (eg *.ssl.*) */ - public static final List DEFAULT_CIPHERS = List.of( + private static final List JDK11_CIPHERS = List.of( "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support @@ -129,6 +130,23 @@ private XPackSettings() { "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + private static final List JDK12_CIPHERS = List.of( + "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support + "TLS_CHACHA20_POLY1305_SHA256", // TLSv1.3 cipher has PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", // AEAD, hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + + public static final List DEFAULT_CIPHERS = + JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1 ? JDK12_CIPHERS : JDK11_CIPHERS; + /* * Do not allow insecure hashing algorithms to be used for password hashing */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java index c2981c40dfdc1..8f83fd375490d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java @@ -109,15 +109,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(transformId); } - /** - * Get the persisted stats document name from the Data Frame Transformer Id. - * - * @return The id of document the where the transform stats are persisted - */ - public static String documentId(String transformId) { - return NAME + "-" + transformId; - } - @Nullable public String getTransformId() { return transformId; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java index 5b7346bca2a38..0741be296ed4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java @@ -23,9 +23,9 @@ public class DataFrameTransformProgress implements Writeable, ToXContentObject { - private static final ParseField TOTAL_DOCS = new ParseField("total_docs"); - private static final ParseField DOCS_REMAINING = new ParseField("docs_remaining"); - private static final String PERCENT_COMPLETE = "percent_complete"; + public static final ParseField TOTAL_DOCS = new ParseField("total_docs"); + public static final ParseField DOCS_REMAINING = new ParseField("docs_remaining"); + public static final String PERCENT_COMPLETE = "percent_complete"; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_frame_transform_progress", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java index bc1b710cd2e6f..d4480caa0b9a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java @@ -42,12 +42,12 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState @Nullable private final String reason; - private static final ParseField TASK_STATE = new ParseField("task_state"); - private static final ParseField INDEXER_STATE = new ParseField("indexer_state"); - private static final ParseField CURRENT_POSITION = new ParseField("current_position"); - private static final ParseField CHECKPOINT = new ParseField("checkpoint"); - private static final ParseField REASON = new ParseField("reason"); - private static final ParseField PROGRESS = new ParseField("progress"); + public static final ParseField TASK_STATE = new ParseField("task_state"); + public static final ParseField INDEXER_STATE = new ParseField("indexer_state"); + public static final ParseField CURRENT_POSITION = new ParseField("current_position"); + public static final ParseField CHECKPOINT = new ParseField("checkpoint"); + public static final ParseField REASON = new ParseField("reason"); + public static final ParseField PROGRESS = new ParseField("progress"); @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java index 2a145ba260f4e..d28d64bdb1e82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -14,6 +15,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.indexing.IndexerState; @@ -22,7 +24,7 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObject { - private static final String NAME = "data_frame_transform_state_and_stats"; + public static final String NAME = "data_frame_transform_state_and_stats"; public static final ParseField STATE_FIELD = new ParseField("state"); public static final ParseField CHECKPOINTING_INFO_FIELD = new ParseField("checkpointing"); @@ -47,6 +49,10 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj (p, c) -> DataFrameTransformCheckpointingInfo.fromXContent(p), CHECKPOINTING_INFO_FIELD); } + public static DataFrameTransformStateAndStats fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + public static DataFrameTransformStateAndStats initialStateAndStats(String id) { return initialStateAndStats(id, new DataFrameIndexerTransformStats(id)); } @@ -58,6 +64,15 @@ public static DataFrameTransformStateAndStats initialStateAndStats(String id, Da DataFrameTransformCheckpointingInfo.EMPTY); } + /** + * Get the persisted state and stats document name from the Data Frame Transform Id. + * + * @return The id of document the where the transform stats are persisted + */ + public static String documentId(String transformId) { + return NAME + "-" + transformId; + } + public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats, DataFrameTransformCheckpointingInfo checkpointingInfo) { this.id = Objects.requireNonNull(id); @@ -73,6 +88,11 @@ public DataFrameTransformStateAndStats(StreamInput in) throws IOException { this.checkpointingInfo = new DataFrameTransformCheckpointingInfo(in); } + @Nullable + public String getTransformId() { + return transformStats.getTransformId(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -80,6 +100,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(STATE_FIELD.getPreferredName(), transformState, params); builder.field(DataFrameField.STATS_FIELD.getPreferredName(), transformStats, params); builder.field(CHECKPOINTING_INFO_FIELD.getPreferredName(), checkpointingInfo, params); + if (params.paramAsBoolean(DataFrameField.FOR_INTERNAL_STORAGE, false)) { + builder.field(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), NAME); + } builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index ccf075b13ae5a..80b0378ae35ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -16,6 +16,7 @@ import java.util.List; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; /** @@ -94,16 +95,21 @@ public synchronized IndexerState start() { * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). */ public synchronized IndexerState stop() { + AtomicBoolean wasStartedAndSetStopped = new AtomicBoolean(false); IndexerState currentState = state.updateAndGet(previousState -> { if (previousState == IndexerState.INDEXING) { return IndexerState.STOPPING; } else if (previousState == IndexerState.STARTED) { - onStop(); + wasStartedAndSetStopped.set(true); return IndexerState.STOPPED; } else { return previousState; } }); + + if (wasStartedAndSetStopped.get()) { + onStop(); + } return currentState; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 755d6faef0ba2..da38d1d2903a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -44,11 +43,7 @@ public MachineLearningFeatureSetUsage(StreamInput in) throws IOException { super(in); this.jobsUsage = in.readMap(); this.datafeedsUsage = in.readMap(); - if (in.getVersion().onOrAfter(Version.V_6_5_0)) { - this.nodeCount = in.readInt(); - } else { - this.nodeCount = -1; - } + this.nodeCount = in.readInt(); } @Override @@ -56,9 +51,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(jobsUsage); out.writeMap(datafeedsUsage); - if (out.getVersion().onOrAfter(Version.V_6_5_0)) { - out.writeInt(nodeCount); - } + out.writeInt(nodeCount); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java index 9bc413b2e220f..95ec597bb9cd8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksRequest; @@ -91,9 +90,7 @@ public Request(StreamInput in) throws IOException { force = in.readBoolean(); openJobIds = in.readStringArray(); local = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoJobs = in.readBoolean(); - } + allowNoJobs = in.readBoolean(); } @Override @@ -104,9 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(force); out.writeStringArray(openJobIds); out.writeBoolean(local); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoJobs); - } + out.writeBoolean(allowNoJobs); } public Request(String jobId) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java index 59589fa34ef9d..950fa58af95c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -58,18 +57,14 @@ public Request() { public Request(StreamInput in) throws IOException { super(in); datafeedId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoDatafeeds = in.readBoolean(); - } + allowNoDatafeeds = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(datafeedId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoDatafeeds); - } + out.writeBoolean(allowNoDatafeeds); } public String getDatafeedId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java index 6dbb86fbcd082..39055501444f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -62,18 +61,14 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); datafeedId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoDatafeeds = in.readBoolean(); - } + allowNoDatafeeds = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(datafeedId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoDatafeeds); - } + out.writeBoolean(allowNoDatafeeds); } public String getDatafeedId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java index 18428eff13758..98b1eb7a118f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -56,18 +55,14 @@ public Request() { public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoJobs = in.readBoolean(); - } + allowNoJobs = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoJobs); - } + out.writeBoolean(allowNoJobs); } public void setAllowNoJobs(boolean allowNoJobs) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index d4c7124af3238..17de9dfc3522c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -85,9 +85,7 @@ public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); expandedJobsIds = in.readStringList(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoJobs = in.readBoolean(); - } + allowNoJobs = in.readBoolean(); } @Override @@ -95,9 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); out.writeStringCollection(expandedJobsIds); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoJobs); - } + out.writeBoolean(allowNoJobs); } public List getExpandedJobsIds() { return expandedJobsIds; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java index c914150173b4a..0021040c69801 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; @@ -96,9 +95,7 @@ public Request(StreamInput in) throws IOException { resolvedStartedDatafeedIds = in.readStringArray(); stopTimeout = in.readTimeValue(); force = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoDatafeeds = in.readBoolean(); - } + allowNoDatafeeds = in.readBoolean(); } public String getDatafeedId() { @@ -160,9 +157,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(resolvedStartedDatafeedIds); out.writeTimeValue(stopTimeout); out.writeBoolean(force); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoDatafeeds); - } + out.writeBoolean(allowNoDatafeeds); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 85e1615c0dfe0..6ecee409c30f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -92,11 +92,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); jobId = in.readString(); update = new JobUpdate(in); - if (in.getVersion().onOrAfter(Version.V_6_2_2)) { - isInternal = in.readBoolean(); - } else { - isInternal = false; - } + isInternal = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.getVersion().before(Version.V_7_0_0)) { in.readBoolean(); // was waitForAck } @@ -107,9 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); update.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_2_2)) { - out.writeBoolean(isInternal); - } + out.writeBoolean(isInternal); if (out.getVersion().onOrAfter(Version.V_6_3_0) && out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(false); // was waitForAck } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java index 5091ff1f968f1..6a8e1703ad1f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -121,10 +120,8 @@ public Request(StreamInput in) throws IOException { if (in.readBoolean()) { detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new); } - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - filter = in.readOptionalWriteable(MlFilter::new); - updateScheduledEvents = in.readBoolean(); - } + filter = in.readOptionalWriteable(MlFilter::new); + updateScheduledEvents = in.readBoolean(); } @Override @@ -136,10 +133,8 @@ public void writeTo(StreamOutput out) throws IOException { if (hasDetectorUpdates) { out.writeList(detectorUpdates); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalWriteable(filter); - out.writeBoolean(updateScheduledEvents); - } + out.writeOptionalWriteable(filter); + out.writeBoolean(updateScheduledEvents); } public Request(String jobId, ModelPlotConfig modelPlotConfig, List detectorUpdates, MlFilter filter, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java index 8585e4122e673..1c39c6d985d45 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java @@ -7,8 +7,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -68,14 +66,7 @@ static AggProvider fromParsedAggs(AggregatorFactories.Builder parsedAggs) throws } static AggProvider fromStream(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - return new AggProvider(in.readMap(), in.readOptionalWriteable(AggregatorFactories.Builder::new), in.readException()); - } else if (in.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - return new AggProvider(in.readMap(), null, null); - } else { // only supports eagerly parsed objects - // Upstream, we have read the bool already and know for sure that we have parsed aggs in the stream - return AggProvider.fromParsedAggs(new AggregatorFactories.Builder(in)); - } + return new AggProvider(in.readMap(), in.readOptionalWriteable(AggregatorFactories.Builder::new), in.readException()); } AggProvider(Map aggs, AggregatorFactories.Builder parsedAggs, Exception parsingException) { @@ -92,29 +83,9 @@ static AggProvider fromStream(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - out.writeMap(aggs); - out.writeOptionalWriteable(parsedAggs); - out.writeException(parsingException); - } else if (out.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - // We allow the lazy parsing nodes that have the bug throw any parsing errors themselves as - // they already have the ability to fully parse the passed Maps - out.writeMap(aggs); - } else { // only supports eagerly parsed objects - if (parsingException != null) { - if (parsingException instanceof IOException) { - throw (IOException) parsingException; - } else { - throw new ElasticsearchException(parsingException); - } - } else if (parsedAggs == null) { - // This is an admittedly rare case but we should fail early instead of writing null when there - // actually are aggregations defined - throw new ElasticsearchException("Unsupported operation: parsed aggregations are null"); - } - // Upstream we already verified that this calling object is not null, no need to write a second boolean to the stream - parsedAggs.writeTo(out); - } + out.writeMap(aggs); + out.writeOptionalWriteable(parsedAggs); + out.writeException(parsingException); } public Exception getParsingException() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 810d97df34636..f08c4a9d7391d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -222,11 +222,7 @@ public DatafeedConfig(StreamInput in) throws IOException { } this.scrollSize = in.readOptionalVInt(); this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); - } else { - this.headers = Collections.emptyMap(); - } + this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); } else { @@ -432,9 +428,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); - } + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalWriteable(delayedDataCheckConfig); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java index ff6d2f595af81..755c5a3526d01 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java @@ -7,8 +7,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -74,13 +72,7 @@ static QueryProvider fromParsedQuery(QueryBuilder parsedQuery) throws IOExceptio } static QueryProvider fromStream(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - return new QueryProvider(in.readMap(), in.readOptionalNamedWriteable(QueryBuilder.class), in.readException()); - } else if (in.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - return new QueryProvider(in.readMap(), null, null); - } else { // only supports eagerly parsed objects - return QueryProvider.fromParsedQuery(in.readNamedWriteable(QueryBuilder.class)); - } + return new QueryProvider(in.readMap(), in.readOptionalNamedWriteable(QueryBuilder.class), in.readException()); } QueryProvider(Map query, QueryBuilder parsedQuery, Exception parsingException) { @@ -95,28 +87,9 @@ static QueryProvider fromStream(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - out.writeMap(query); - out.writeOptionalNamedWriteable(parsedQuery); - out.writeException(parsingException); - } else if (out.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - // We allow the lazy parsing nodes that have the bug throw any parsing errors themselves as - // they already have the ability to fully parse the passed Maps - out.writeMap(query); - } else { // only supports eagerly parsed objects - if (parsingException != null) { // Do we have a parsing error? Throw it - if (parsingException instanceof IOException) { - throw (IOException) parsingException; - } else { - throw new ElasticsearchException(parsingException); - } - } else if (parsedQuery == null) { // Do we have a query defined but not parsed? - // This is an admittedly rare case but we should fail early instead of writing null when there - // actually is a query defined - throw new ElasticsearchException("Unsupported operation: parsed query is null"); - } - out.writeNamedWriteable(parsedQuery); - } + out.writeMap(query); + out.writeOptionalNamedWriteable(parsedQuery); + out.writeException(parsingException); } public Exception getParsingException() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 933188c8221eb..9e01cd21e2b90 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -126,11 +125,7 @@ public AnalysisConfig(StreamInput in) throws IOException { bucketSpan = in.readTimeValue(); categorizationFieldName = in.readOptionalString(); categorizationFilters = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); - } else { - categorizationAnalyzerConfig = null; - } + categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); latency = in.readOptionalTimeValue(); summaryCountFieldName = in.readOptionalString(); detectors = Collections.unmodifiableList(in.readList(Detector::new)); @@ -149,9 +144,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalWriteable(categorizationAnalyzerConfig); - } + out.writeOptionalWriteable(categorizationAnalyzerConfig); out.writeOptionalTimeValue(latency); out.writeOptionalString(summaryCountFieldName); out.writeList(detectors); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index de37702fe5246..1cb44f9625cb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -186,11 +186,7 @@ public Job(StreamInput in) throws IOException { jobId = in.readString(); jobType = in.readString(); jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - groups = Collections.unmodifiableList(in.readStringList()); - } else { - groups = Collections.emptyList(); - } + groups = Collections.unmodifiableList(in.readStringList()); description = in.readOptionalString(); createTime = new Date(in.readVLong()); finishedTime = in.readBoolean() ? new Date(in.readVLong()) : null; @@ -200,10 +196,6 @@ public Job(StreamInput in) throws IOException { in.readVLong(); } } - // for removed establishedModelMemory field - if (in.getVersion().onOrAfter(Version.V_6_1_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalLong(); - } analysisConfig = new AnalysisConfig(in); analysisLimits = in.readOptionalWriteable(AnalysisLimits::new); dataDescription = in.readOptionalWriteable(DataDescription::new); @@ -449,9 +441,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeStringCollection(groups); - } + out.writeStringCollection(groups); out.writeOptionalString(description); out.writeVLong(createTime.getTime()); if (finishedTime != null) { @@ -464,10 +454,6 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(false); } - // for removed establishedModelMemory field - if (out.getVersion().onOrAfter(Version.V_6_1_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeOptionalLong(null); - } analysisConfig.writeTo(out); out.writeOptionalWriteable(analysisLimits); out.writeOptionalWriteable(dataDescription); @@ -676,11 +662,7 @@ public Builder(StreamInput in) throws IOException { id = in.readOptionalString(); jobType = in.readString(); jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - groups = in.readStringList(); - } else { - groups = Collections.emptyList(); - } + groups = in.readStringList(); description = in.readOptionalString(); createTime = in.readBoolean() ? new Date(in.readVLong()) : null; finishedTime = in.readBoolean() ? new Date(in.readVLong()) : null; @@ -690,10 +672,6 @@ public Builder(StreamInput in) throws IOException { in.readVLong(); } } - // for removed establishedModelMemory field - if (in.getVersion().onOrAfter(Version.V_6_1_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalLong(); - } analysisConfig = in.readOptionalWriteable(AnalysisConfig::new); analysisLimits = in.readOptionalWriteable(AnalysisLimits::new); dataDescription = in.readOptionalWriteable(DataDescription::new); @@ -861,9 +839,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeStringCollection(groups); - } + out.writeStringCollection(groups); out.writeOptionalString(description); if (createTime != null) { out.writeBoolean(true); @@ -881,10 +857,6 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(false); } - // for removed establishedModelMemory field - if (out.getVersion().onOrAfter(Version.V_6_1_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeOptionalLong(null); - } out.writeOptionalWriteable(analysisConfig); out.writeOptionalWriteable(analysisLimits); out.writeOptionalWriteable(dataDescription); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 36e1fc1096675..81a0e017c6584 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -106,12 +106,8 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String public JobUpdate(StreamInput in) throws IOException { jobId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - String[] groupsArray = in.readOptionalStringArray(); - groups = groupsArray == null ? null : Arrays.asList(groupsArray); - } else { - groups = null; - } + String[] groupsArray = in.readOptionalStringArray(); + groups = groupsArray == null ? null : Arrays.asList(groupsArray); description = in.readOptionalString(); if (in.readBoolean()) { detectorUpdates = in.readList(DetectorUpdate::new); @@ -131,10 +127,6 @@ public JobUpdate(StreamInput in) throws IOException { } customSettings = in.readMap(); modelSnapshotId = in.readOptionalString(); - // was establishedModelMemory - if (in.getVersion().onOrAfter(Version.V_6_1_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalLong(); - } if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.readBoolean()) { jobVersion = Version.readVersion(in); } else { @@ -155,10 +147,8 @@ public JobUpdate(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - String[] groupsArray = groups == null ? null : groups.toArray(new String[groups.size()]); - out.writeOptionalStringArray(groupsArray); - } + String[] groupsArray = groups == null ? null : groups.toArray(new String[groups.size()]); + out.writeOptionalStringArray(groupsArray); out.writeOptionalString(description); out.writeBoolean(detectorUpdates != null); if (detectorUpdates != null) { @@ -176,10 +166,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeMap(customSettings); out.writeOptionalString(modelSnapshotId); - // was establishedModelMemory - if (out.getVersion().onOrAfter(Version.V_6_1_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeOptionalLong(null); - } if (out.getVersion().onOrAfter(Version.V_6_3_0)) { if (jobVersion != null) { out.writeBoolean(true); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 42ce9eb4273a1..22eb0dc357bed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -91,8 +91,11 @@ public final class Messages { public static final String JOB_AUDIT_CALENDARS_UPDATED_ON_PROCESS = "Updated calendars in running process"; public static final String JOB_AUDIT_MEMORY_STATUS_SOFT_LIMIT = "Job memory status changed to soft_limit; memory pruning will now be " + "more aggressive"; - public static final String JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT = "Job memory status changed to hard_limit at {0}; adjust the " + - "analysis_limits.model_memory_limit setting to ensure all data is analyzed"; + public static final String JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT = "Job memory status changed to hard_limit; " + + "job exceeded model memory limit {0} by {1}. " + + "Adjust the analysis_limits.model_memory_limit setting to ensure all data is analyzed"; + public static final String JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT_PRE_7_2 = "Job memory status changed to hard_limit at {0}; adjust the " + + "analysis_limits.model_memory_limit setting to ensure all data is analyzed"; public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES = "categorization_filters contain duplicates"; public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY = diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java index f5d7cebbc4c52..f02120433efc4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -38,6 +39,8 @@ public class ModelSizeStats implements ToXContentObject, Writeable { * Field Names */ public static final ParseField MODEL_BYTES_FIELD = new ParseField("model_bytes"); + public static final ParseField MODEL_BYTES_EXCEEDED_FIELD = new ParseField("model_bytes_exceeded"); + public static final ParseField MODEL_BYTES_MEMORY_LIMIT_FIELD = new ParseField("model_bytes_memory_limit"); public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("total_by_field_count"); public static final ParseField TOTAL_OVER_FIELD_COUNT_FIELD = new ParseField("total_over_field_count"); public static final ParseField TOTAL_PARTITION_FIELD_COUNT_FIELD = new ParseField("total_partition_field_count"); @@ -56,6 +59,8 @@ private static ConstructingObjectParser createParser(boolean igno parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); parser.declareString((modelSizeStat, s) -> {}, Result.RESULT_TYPE); parser.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD); + parser.declareLong(Builder::setModelBytesExceeded, MODEL_BYTES_EXCEEDED_FIELD); + parser.declareLong(Builder::setModelBytesMemoryLimit, MODEL_BYTES_MEMORY_LIMIT_FIELD); parser.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD); parser.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD); parser.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD); @@ -100,6 +105,8 @@ public String toString() { private final String jobId; private final long modelBytes; + private final Long modelBytesExceeded; + private final Long modelBytesMemoryLimit; private final long totalByFieldCount; private final long totalOverFieldCount; private final long totalPartitionFieldCount; @@ -108,11 +115,14 @@ public String toString() { private final Date timestamp; private final Date logTime; - private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount, - long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus, + private ModelSizeStats(String jobId, long modelBytes, Long modelBytesExceeded, Long modelBytesMemoryLimit, long totalByFieldCount, + long totalOverFieldCount, long totalPartitionFieldCount, long bucketAllocationFailuresCount, + MemoryStatus memoryStatus, Date timestamp, Date logTime) { this.jobId = jobId; this.modelBytes = modelBytes; + this.modelBytesExceeded = modelBytesExceeded; + this.modelBytesMemoryLimit = modelBytesMemoryLimit; this.totalByFieldCount = totalByFieldCount; this.totalOverFieldCount = totalOverFieldCount; this.totalPartitionFieldCount = totalPartitionFieldCount; @@ -125,6 +135,16 @@ private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, lo public ModelSizeStats(StreamInput in) throws IOException { jobId = in.readString(); modelBytes = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + modelBytesExceeded = in.readOptionalLong(); + } else { + modelBytesExceeded = null; + } + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + modelBytesMemoryLimit = in.readOptionalLong(); + } else { + modelBytesMemoryLimit = null; + } totalByFieldCount = in.readVLong(); totalOverFieldCount = in.readVLong(); totalPartitionFieldCount = in.readVLong(); @@ -146,6 +166,12 @@ public static String documentIdPrefix(String jobId) { public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeVLong(modelBytes); + if (out.getVersion().onOrAfter(Version.V_7_2_0)) { + out.writeOptionalLong(modelBytesExceeded); + } + if (out.getVersion().onOrAfter(Version.V_7_2_0)) { + out.writeOptionalLong(modelBytesMemoryLimit); + } out.writeVLong(totalByFieldCount); out.writeVLong(totalOverFieldCount); out.writeVLong(totalPartitionFieldCount); @@ -171,6 +197,12 @@ public XContentBuilder doXContentBody(XContentBuilder builder) throws IOExceptio builder.field(Job.ID.getPreferredName(), jobId); builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes); + if (modelBytesExceeded != null) { + builder.field(MODEL_BYTES_EXCEEDED_FIELD.getPreferredName(), modelBytesExceeded); + } + if (modelBytesMemoryLimit != null) { + builder.field(MODEL_BYTES_MEMORY_LIMIT_FIELD.getPreferredName(), modelBytesMemoryLimit); + } builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount); builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount); builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount); @@ -192,6 +224,14 @@ public long getModelBytes() { return modelBytes; } + public Long getModelBytesExceeded() { + return modelBytesExceeded; + } + + public Long getModelBytesMemoryLimit() { + return modelBytesMemoryLimit; + } + public long getTotalByFieldCount() { return totalByFieldCount; } @@ -231,8 +271,8 @@ public Date getLogTime() { @Override public int hashCode() { // this.id excluded here as it is generated by the datastore - return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return Objects.hash(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } /** @@ -250,7 +290,9 @@ public boolean equals(Object other) { ModelSizeStats that = (ModelSizeStats) other; - return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount + return this.modelBytes == that.modelBytes && Objects.equals(this.modelBytesExceeded, that.modelBytesExceeded) + && Objects.equals(this.modelBytesMemoryLimit, that.modelBytesMemoryLimit) + && this.totalByFieldCount == that.totalByFieldCount && this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount && this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount && Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp) @@ -262,6 +304,8 @@ public static class Builder { private final String jobId; private long modelBytes; + private Long modelBytesExceeded; + private Long modelBytesMemoryLimit; private long totalByFieldCount; private long totalOverFieldCount; private long totalPartitionFieldCount; @@ -279,6 +323,8 @@ public Builder(String jobId) { public Builder(ModelSizeStats modelSizeStats) { this.jobId = modelSizeStats.jobId; this.modelBytes = modelSizeStats.modelBytes; + this.modelBytesExceeded = modelSizeStats.modelBytesExceeded; + this.modelBytesMemoryLimit = modelSizeStats.modelBytesMemoryLimit; this.totalByFieldCount = modelSizeStats.totalByFieldCount; this.totalOverFieldCount = modelSizeStats.totalOverFieldCount; this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount; @@ -293,6 +339,16 @@ public Builder setModelBytes(long modelBytes) { return this; } + public Builder setModelBytesExceeded(long modelBytesExceeded) { + this.modelBytesExceeded = modelBytesExceeded; + return this; + } + + public Builder setModelBytesMemoryLimit(long modelBytesMemoryLimit) { + this.modelBytesMemoryLimit = modelBytesMemoryLimit; + return this; + } + public Builder setTotalByFieldCount(long totalByFieldCount) { this.totalByFieldCount = totalByFieldCount; return this; @@ -330,8 +386,8 @@ public Builder setLogTime(Date logTime) { } public ModelSizeStats build() { - return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return new ModelSizeStats(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index d335ba39e0026..8e04e001ed6cd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -138,12 +138,8 @@ public Bucket(StreamInput in) throws IOException { if (in.getVersion().before(Version.V_6_5_0)) { in.readList(Bucket::readOldPerPartitionNormalization); } - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - scheduledEvents = in.readStringList(); - if (scheduledEvents.isEmpty()) { - scheduledEvents = Collections.emptyList(); - } - } else { + scheduledEvents = in.readStringList(); + if (scheduledEvents.isEmpty()) { scheduledEvents = Collections.emptyList(); } } @@ -164,9 +160,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_6_5_0)) { out.writeList(Collections.emptyList()); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeStringCollection(scheduledEvents); - } + out.writeStringCollection(scheduledEvents); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index ed31f0cc020c6..3fdfaab060542 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -192,32 +192,18 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(grantType); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalString(username); - if (password == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } - out.writeOptionalString(refreshToken); + out.writeOptionalString(username); + if (password == null) { + out.writeOptionalBytesReference(null); } else { - if ("refresh_token".equals(grantType)) { - throw new IllegalArgumentException("a refresh request cannot be sent to an older version"); - } else { - out.writeString(username); - final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); - try { - out.writeByteArray(passwordBytes); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } + out.writeOptionalString(refreshToken); out.writeOptionalString(scope); } @@ -225,29 +211,19 @@ public void writeTo(StreamOutput out) throws IOException { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); grantType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - username = in.readOptionalString(); - BytesReference bytesRef = in.readOptionalBytesReference(); - if (bytesRef != null) { - byte[] bytes = BytesReference.toBytes(bytesRef); - try { - password = new SecureString(CharArrays.utf8BytesToChars(bytes)); - } finally { - Arrays.fill(bytes, (byte) 0); - } - } else { - password = null; - } - refreshToken = in.readOptionalString(); - } else { - username = in.readString(); - final byte[] passwordBytes = in.readByteArray(); + username = in.readOptionalString(); + BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); try { - password = new SecureString(CharArrays.utf8BytesToChars(passwordBytes)); + password = new SecureString(CharArrays.utf8BytesToChars(bytes)); } finally { - Arrays.fill(passwordBytes, (byte) 0); + Arrays.fill(bytes, (byte) 0); } + } else { + password = null; } + refreshToken = in.readOptionalString(); scope = in.readOptionalString(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index 30111a92431dc..93ddc56459677 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -61,12 +61,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scope); if (out.getVersion().onOrAfter(Version.V_6_5_0)) { out.writeOptionalString(refreshToken); - } else if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - if (refreshToken == null) { - out.writeString(""); - } else { - out.writeString(refreshToken); - } } } @@ -78,8 +72,6 @@ public void readFrom(StreamInput in) throws IOException { scope = in.readOptionalString(); if (in.getVersion().onOrAfter(Version.V_6_5_0)) { refreshToken = in.readOptionalString(); - } else if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - refreshToken = in.readString(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index 492622b2c519c..28f263748135f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -351,6 +351,24 @@ public boolean verify(SecureString text, char[] hash) { return CharArrays.constantTimeEquals(computedHash, new String(saltAndHash, 12, saltAndHash.length - 12)); } }, + /* + * Unsalted SHA-256 , not suited for password storage. + */ + SHA256() { + @Override + public char[] hash(SecureString text) { + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + return Base64.getEncoder().encodeToString(md.digest()).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + return CharArrays.constantTimeEquals(Base64.getEncoder().encodeToString(md.digest()).toCharArray(), hash); + } + }, NOOP() { @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java index 08d754b4e5357..bde94b116c982 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java @@ -139,11 +139,11 @@ private static void buildRoleQuery(User user, ScriptService scriptService, Shard NestedHelper nestedHelper = new NestedHelper(queryShardContext.getMapperService()); if (nestedHelper.mightMatchNestedDocs(roleQuery)) { roleQuery = new BooleanQuery.Builder().add(roleQuery, FILTER) - .add(Queries.newNonNestedFilter(queryShardContext.indexVersionCreated()), FILTER).build(); + .add(Queries.newNonNestedFilter(), FILTER).build(); } // If access is allowed on root doc then also access is allowed on all nested docs of that root document: BitSetProducer rootDocs = queryShardContext - .bitsetFilter(Queries.newNonNestedFilter(queryShardContext.indexVersionCreated())); + .bitsetFilter(Queries.newNonNestedFilter()); ToChildBlockJoinQuery includeNestedDocs = new ToChildBlockJoinQuery(roleQuery, rootDocs); filter.add(includeNestedDocs, SHOULD); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java index 6f6592bbdfca2..a042aeb4a2359 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java @@ -9,6 +9,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.XPackSettings; /** @@ -19,10 +20,11 @@ public final class TLSLicenseBootstrapCheck implements BootstrapCheck { public BootstrapCheckResult check(BootstrapContext context) { if (XPackSettings.TRANSPORT_SSL_ENABLED.get(context.settings()) == false) { License license = LicenseService.getLicense(context.metaData()); - if (license != null && license.isProductionLicense()) { - return BootstrapCheckResult.failure("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting [xpack.security.enabled] " + - "to [false]"); + if (XPackLicenseState.isTransportTlsRequired(license, context.settings())) { + return BootstrapCheckResult.failure("Transport SSL must be enabled if security is enabled on a [" + + license.operationMode().description() + "] license. " + + "Please set [xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]"); } } return BootstrapCheckResult.success(); diff --git a/x-pack/plugin/core/src/main/resources/security-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-index-template-7.json index ebf6d073cd8a6..dae6462b7a6f0 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template-7.json @@ -213,8 +213,19 @@ "type": "date", "format": "epoch_millis" }, - "superseded_by": { - "type": "keyword" + "superseding": { + "type": "object", + "properties": { + "encrypted_tokens": { + "type": "binary" + }, + "encryption_iv": { + "type": "binary" + }, + "encryption_salt": { + "type": "binary" + } + } }, "invalidated" : { "type" : "boolean" diff --git a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json index e7450d0be9c28..312d9ff9e3f58 100644 --- a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json @@ -35,8 +35,19 @@ "type": "date", "format": "epoch_millis" }, - "superseded_by": { - "type": "keyword" + "superseding": { + "type": "object", + "properties": { + "encrypted_tokens": { + "type": "binary" + }, + "encryption_iv": { + "type": "binary" + }, + "encryption_salt": { + "type": "binary" + } + } }, "invalidated" : { "type" : "boolean" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java deleted file mode 100644 index 30c370c14c27d..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core; - -import io.netty.util.ThreadDeathWatcher; -import io.netty.util.concurrent.GlobalEventExecutor; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.util.Arrays; -import java.util.Collection; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.test.ESTestCase.getTestTransportPlugin; - -/** - * TransportClient.Builder that installs the XPackPlugin by default. - */ -@SuppressWarnings({"unchecked","varargs"}) -public class TestXPackTransportClient extends TransportClient { - - @SafeVarargs - public TestXPackTransportClient(Settings settings, Class... plugins) { - this(settings, Arrays.asList(plugins)); - } - - public TestXPackTransportClient(Settings settings, Collection> plugins) { - super(settings, Settings.EMPTY, addPlugins(plugins, getTestTransportPlugin()), null); - } - - @Override - public void close() { - super.close(); - if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false - || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(SecurityField.NAME4)) { - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java index 924e55be51cb4..9f430819225a4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import javax.crypto.SecretKeyFactory; @@ -14,6 +15,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.not; public class XPackSettingsTests extends ESTestCase { @@ -22,6 +24,20 @@ public void testDefaultSSLCiphers() { assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_RSA_WITH_AES_256_CBC_SHA")); } + public void testChaCha20InCiphersOnJdk12Plus() { + assumeTrue("Test is only valid on JDK 12+ JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1); + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_CHACHA20_POLY1305_SHA256")); + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256")); + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256")); + } + + public void testChaCha20NotInCiphersOnPreJdk12() { + assumeTrue("Test is only valid on pre JDK 12 JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0); + assertThat(XPackSettings.DEFAULT_CIPHERS, not(hasItem("TLS_CHACHA20_POLY1305_SHA256"))); + assertThat(XPackSettings.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"))); + assertThat(XPackSettings.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"))); + } + public void testPasswordHashingAlgorithmSettingValidation() { final boolean isPBKDF2Available = isSecretkeyFactoryAlgoAvailable("PBKDF2WithHMACSHA512"); final String pbkdf2Algo = randomFrom("PBKDF2_10000", "PBKDF2"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java index dc87cf744cb98..d544584376f47 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java @@ -5,13 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -29,9 +24,7 @@ import java.util.Collections; import java.util.Map; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; public class AggProviderTests extends AbstractSerializingTestCase { @@ -96,68 +89,6 @@ public void testEmptyAggMap() throws IOException { assertThat(e.getMessage(), equalTo("Datafeed aggregations are not parsable")); } - public void testSerializationBetweenBugVersion() throws IOException { - AggProvider tempAggProvider = createRandomValidAggProvider(); - AggProvider aggProviderWithEx = new AggProvider(tempAggProvider.getAggs(), tempAggProvider.getParsedAggs(), new IOException("ex")); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_6_2); - aggProviderWithEx.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_6_2); - AggProvider streamedAggProvider = AggProvider.fromStream(in); - assertThat(streamedAggProvider.getAggs(), equalTo(aggProviderWithEx.getAggs())); - assertThat(streamedAggProvider.getParsingException(), is(nullValue())); - - AggregatorFactories.Builder streamedParsedAggs = XContentObjectTransformer.aggregatorTransformer(xContentRegistry()) - .fromMap(streamedAggProvider.getAggs()); - assertThat(streamedParsedAggs, equalTo(aggProviderWithEx.getParsedAggs())); - assertThat(streamedAggProvider.getParsedAggs(), is(nullValue())); - } - } - } - - public void testSerializationBetweenEagerVersion() throws IOException { - AggProvider validAggProvider = createRandomValidAggProvider(); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); - validAggProvider.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_0_0); - AggProvider streamedAggProvider = AggProvider.fromStream(in); - assertThat(streamedAggProvider.getAggs(), equalTo(validAggProvider.getAggs())); - assertThat(streamedAggProvider.getParsingException(), is(nullValue())); - assertThat(streamedAggProvider.getParsedAggs(), equalTo(validAggProvider.getParsedAggs())); - } - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - AggProvider aggProviderWithEx = new AggProvider(validAggProvider.getAggs(), - validAggProvider.getParsedAggs(), - new IOException("bad parsing")); - output.setVersion(Version.V_6_0_0); - IOException ex = expectThrows(IOException.class, () -> aggProviderWithEx.writeTo(output)); - assertThat(ex.getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - AggProvider aggProviderWithEx = new AggProvider(validAggProvider.getAggs(), - validAggProvider.getParsedAggs(), - new ElasticsearchException("bad parsing")); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> aggProviderWithEx.writeTo(output)); - assertNotNull(ex.getCause()); - assertThat(ex.getCause().getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - AggProvider aggProviderWithOutParsed = new AggProvider(validAggProvider.getAggs(), null, null); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> aggProviderWithOutParsed.writeTo(output)); - assertThat(ex.getMessage(), equalTo("Unsupported operation: parsed aggregations are null")); - } - } - @Override protected AggProvider mutateInstance(AggProvider instance) throws IOException { Exception parsingException = instance.getParsingException(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 6b664777a2d86..062504cbfdc3c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -762,10 +762,10 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); + output.setVersion(Version.CURRENT); datafeedConfig.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - in.setVersion(Version.V_6_0_0); + in.setVersion(Version.CURRENT); DatafeedConfig streamedDatafeedConfig = new DatafeedConfig(in); assertEquals(datafeedConfig, streamedDatafeedConfig); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index 571c9e81a9068..6aa7487147ca1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -321,10 +321,10 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); + output.setVersion(Version.CURRENT); datafeedUpdate.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - in.setVersion(Version.V_6_0_0); + in.setVersion(Version.CURRENT); DatafeedUpdate streamedDatafeedUpdate = new DatafeedUpdate(in); assertEquals(datafeedUpdate, streamedDatafeedUpdate); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java index fb6c2e280d975..8d113aba33579 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java @@ -5,13 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -32,9 +27,7 @@ import java.util.Collections; import java.util.Map; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; public class QueryProviderTests extends AbstractSerializingTestCase { @@ -96,74 +89,6 @@ public void testEmptyQueryMap() throws IOException { assertThat(e.getMessage(), equalTo("Datafeed query is not parsable")); } - public void testSerializationBetweenBugVersion() throws IOException { - QueryProvider tempQueryProvider = createRandomValidQueryProvider(); - QueryProvider queryProviderWithEx = new QueryProvider(tempQueryProvider.getQuery(), - tempQueryProvider.getParsedQuery(), - new IOException("ex")); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_6_2); - queryProviderWithEx.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_6_2); - QueryProvider streamedQueryProvider = QueryProvider.fromStream(in); - assertThat(streamedQueryProvider.getQuery(), equalTo(queryProviderWithEx.getQuery())); - assertThat(streamedQueryProvider.getParsingException(), is(nullValue())); - - QueryBuilder streamedParsedQuery = XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()) - .fromMap(streamedQueryProvider.getQuery()); - assertThat(streamedParsedQuery, equalTo(queryProviderWithEx.getParsedQuery())); - assertThat(streamedQueryProvider.getParsedQuery(), is(nullValue())); - } - } - } - - public void testSerializationBetweenEagerVersion() throws IOException { - QueryProvider validQueryProvider = createRandomValidQueryProvider(); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); - validQueryProvider.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_0_0); - - QueryProvider streamedQueryProvider = QueryProvider.fromStream(in); - XContentObjectTransformer transformer = XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()); - Map sourceQueryMapWithDefaults = transformer.toMap(transformer.fromMap(validQueryProvider.getQuery())); - - assertThat(streamedQueryProvider.getQuery(), equalTo(sourceQueryMapWithDefaults)); - assertThat(streamedQueryProvider.getParsingException(), is(nullValue())); - assertThat(streamedQueryProvider.getParsedQuery(), equalTo(validQueryProvider.getParsedQuery())); - } - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - QueryProvider queryProviderWithEx = new QueryProvider(validQueryProvider.getQuery(), - validQueryProvider.getParsedQuery(), - new IOException("bad parsing")); - output.setVersion(Version.V_6_0_0); - IOException ex = expectThrows(IOException.class, () -> queryProviderWithEx.writeTo(output)); - assertThat(ex.getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - QueryProvider queryProviderWithEx = new QueryProvider(validQueryProvider.getQuery(), - validQueryProvider.getParsedQuery(), - new ElasticsearchException("bad parsing")); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> queryProviderWithEx.writeTo(output)); - assertNotNull(ex.getCause()); - assertThat(ex.getCause().getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - QueryProvider queryProviderWithOutParsed = new QueryProvider(validQueryProvider.getQuery(), null, null); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> queryProviderWithOutParsed.writeTo(output)); - assertThat(ex.getMessage(), equalTo("Unsupported operation: parsed query is null")); - } - } - @Override protected QueryProvider mutateInstance(QueryProvider instance) throws IOException { Exception parsingException = instance.getParsingException(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index e249b22a4a896..eb4f2c0bbc2da 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -91,7 +92,7 @@ public JobUpdate createRandom(String jobId, @Nullable Job job) { update.setModelSnapshotMinVersion(Version.CURRENT); } if (useInternalParser && randomBoolean()) { - update.setJobVersion(randomFrom(Version.CURRENT, Version.V_6_2_0, Version.V_6_1_0)); + update.setJobVersion(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); } if (useInternalParser) { update.setClearFinishTime(randomBoolean()); @@ -213,7 +214,7 @@ public void testMergeWithJob() { updateBuilder.setCategorizationFilters(categorizationFilters); updateBuilder.setCustomSettings(customSettings); updateBuilder.setModelSnapshotId(randomAlphaOfLength(10)); - updateBuilder.setJobVersion(Version.V_6_1_0); + updateBuilder.setJobVersion(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); JobUpdate update = updateBuilder.build(); Job.Builder jobBuilder = new Job.Builder("foo"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java index e66fea90f049b..90e4bacc3f8b1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java @@ -22,6 +22,8 @@ public class ModelSizeStatsTests extends AbstractSerializingTestCase productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - MetaData.Builder builder = MetaData.builder(); - TestUtils.putLicense(builder, license); - MetaData build = builder.build(); - if (productionModes.contains(license.operationMode()) == false) { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", true).build(), build)).isSuccess()); - } else { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).isFailure()); - assertEquals("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + - "[xpack.security.enabled] to [false]", - new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).getMessage()); - } + , randomBoolean()).build(), MetaData.EMPTY_META_DATA)).isSuccess()); + } + + public void testBootstrapCheckFailureOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + if (randomBoolean()) { + // randomise between default-true & explicit-true + settings.put("xpack.security.enabled", true); + } + + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [" + mode.description() + "] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapCheckSucceedsWithTlsEnabledOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertSuccess(result); + } + + public void testBootstrapCheckFailureOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.enabled", true); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [basic] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapSucceedsIfSecurityIsNotEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.enabled", false); + } + if (randomBoolean()) { + // it does not matter whether or not this is set, as security is not enabled. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); } + + public void testBootstrapSucceedsIfTlsIsEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + if (randomBoolean()) { + // it does not matter whether or not this is set, as TLS is enabled. + settings.put("xpack.security.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); + } + + public void testBootstrapCheckAlwaysSucceedsOnTrialLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.enabled", randomBoolean()); + } + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.TRIAL, settings); + assertSuccess(result); + } + + public BootstrapCheck.BootstrapCheckResult runBootstrapCheck(OperationMode mode, Settings.Builder settings) throws Exception { + final License license = TestUtils.generateSignedLicense(mode.description(), TimeValue.timeValueHours(24)); + MetaData.Builder builder = MetaData.builder(); + TestUtils.putLicense(builder, license); + MetaData metaData = builder.build(); + final BootstrapContext context = createTestContext(settings.build(), metaData); + return new TLSLicenseBootstrapCheck().check(context); + } + + public void assertSuccess(BootstrapCheck.BootstrapCheckResult result) { + if (result.isFailure()) { + fail("Bootstrap check failed unexpectedly: " + result.getMessage()); + } + } + } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index db07e8513cc2d..7ffa5391b7a4a 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -292,7 +292,7 @@ public static void removeIndices() throws Exception { wipeIndices(); } - public void wipeDataFrameTransforms() throws IOException, InterruptedException { + public void wipeDataFrameTransforms() throws IOException { List> transformConfigs = getDataFrameTransforms(); for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index 24ce173b37567..4f209c5a9f3f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -72,7 +72,7 @@ public void testUsage() throws Exception { Request statsExistsRequest = new Request("GET", DataFrameInternalIndex.INDEX_NAME+"/_search?q=" + INDEX_DOC_TYPE.getPreferredName() + ":" + - DataFrameIndexerTransformStats.NAME); + DataFrameTransformStateAndStats.NAME); // Verify that we have our two stats documents assertBusy(() -> { Map hasStatsMap = entityAsMap(client().performRequest(statsExistsRequest)); @@ -100,7 +100,6 @@ public void testUsage() throws Exception { expectedStats.merge(statName, statistic, Integer::sum); } - usageResponse = client().performRequest(new Request("GET", "_xpack/usage")); usageAsMap = entityAsMap(usageResponse); @@ -109,7 +108,8 @@ public void testUsage() throws Exception { assertEquals(1, XContentMapValues.extractValue("data_frame.transforms.started", usageAsMap)); assertEquals(2, XContentMapValues.extractValue("data_frame.transforms.stopped", usageAsMap)); for(String statName : PROVIDED_STATS) { - assertEquals(expectedStats.get(statName), XContentMapValues.extractValue("data_frame.stats."+statName, usageAsMap)); + assertEquals("Incorrect stat " + statName, + expectedStats.get(statName), XContentMapValues.extractValue("data_frame.stats." + statName, usageAsMap)); } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index b7e6c235f8e6c..34343e5fe8820 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -76,10 +76,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.function.Supplier; import java.util.function.UnaryOperator; @@ -90,11 +88,7 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu public static final String NAME = "data_frame"; public static final String TASK_THREAD_POOL_NAME = "data_frame_indexing"; - // list of headers that will be stored when a transform is created - public static final Set HEADER_FILTERS = new HashSet<>( - Arrays.asList("es-security-runas-user", "_xpack_security_authentication")); - - private static final Logger logger = LogManager.getLogger(XPackPlugin.class); + private static final Logger logger = LogManager.getLogger(DataFrame.class); private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java index 029fe88766df5..82b8a6060e44e 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; @@ -176,6 +177,7 @@ static DataFrameIndexerTransformStats parseSearchAggs(SearchResponse searchRespo for(String statName : PROVIDED_STATS) { Aggregation agg = searchResponse.getAggregations().get(statName); + if (agg instanceof NumericMetricsAggregation.SingleValue) { statisticsList.add((long)((NumericMetricsAggregation.SingleValue)agg).value()); } else { @@ -197,14 +199,15 @@ static DataFrameIndexerTransformStats parseSearchAggs(SearchResponse searchRespo static void getStatisticSummations(Client client, ActionListener statsListener) { QueryBuilder queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), - DataFrameIndexerTransformStats.NAME))); + DataFrameTransformStateAndStats.NAME))); SearchRequestBuilder requestBuilder = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) .setSize(0) .setQuery(queryBuilder); + final String path = DataFrameField.STATS_FIELD.getPreferredName() + "."; for(String statName : PROVIDED_STATS) { - requestBuilder.addAggregation(AggregationBuilders.sum(statName).field(statName)); + requestBuilder.addAggregation(AggregationBuilders.sum(statName).field(path + statName)); } ActionListener getStatisticSummationsListener = ActionListener.wrap( @@ -213,6 +216,7 @@ static void getStatisticSummations(Client client, ActionListener { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index bb01da4c7e50a..df2d09a875d19 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -9,51 +9,29 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Request; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; -import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; @@ -69,18 +47,16 @@ public class TransportGetDataFrameTransformsStatsAction extends private static final Logger logger = LogManager.getLogger(TransportGetDataFrameTransformsStatsAction.class); - private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; private final DataFrameTransformsCheckpointService transformsCheckpointService; @Inject public TransportGetDataFrameTransformsStatsAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, Client client, + ClusterService clusterService, DataFrameTransformsConfigManager dataFrameTransformsConfigManager, DataFrameTransformsCheckpointService transformsCheckpointService) { super(GetDataFrameTransformsStatsAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); - this.client = client; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; this.transformsCheckpointService = transformsCheckpointService; } @@ -157,32 +133,14 @@ private void collectStatsForTransformsWithoutTasks(Request request, // Small assurance that we are at least below the max. Terms search has a hard limit of 10k, we should at least be below that. assert transformsWithoutTasks.size() <= Request.MAX_SIZE_RETURN; - ActionListener searchStatsListener = ActionListener.wrap( - searchResponse -> { - List nodeFailures = new ArrayList<>(response.getNodeFailures()); - if (searchResponse.getShardFailures().length > 0) { - for(ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) { - String nodeId = ""; - if (shardSearchFailure.shard() != null) { - nodeId = shardSearchFailure.shard().getNodeId(); - } - nodeFailures.add(new FailedNodeException(nodeId, shardSearchFailure.toString(), shardSearchFailure.getCause())); - } - logger.error("transform statistics document search returned shard failures: {}", - Arrays.toString(searchResponse.getShardFailures())); - } + ActionListener> searchStatsListener = ActionListener.wrap( + stats -> { List allStateAndStats = response.getTransformsStateAndStats(); - for(SearchHit hit : searchResponse.getHits().getHits()) { - BytesReference source = hit.getSourceRef(); - try { - DataFrameIndexerTransformStats stats = parseFromSource(source); - allStateAndStats.add(DataFrameTransformStateAndStats.initialStateAndStats(stats.getTransformId(), stats)); - transformsWithoutTasks.remove(stats.getTransformId()); - } catch (IOException e) { - listener.onFailure(new ElasticsearchParseException("Could not parse data frame transform stats", e)); - return; - } - } + allStateAndStats.addAll(stats); + transformsWithoutTasks.removeAll( + stats.stream().map(DataFrameTransformStateAndStats::getId).collect(Collectors.toSet())); + + // Transforms that have not been started and have no state or stats. transformsWithoutTasks.forEach(transformId -> allStateAndStats.add(DataFrameTransformStateAndStats.initialStateAndStats(transformId))); @@ -190,7 +148,7 @@ private void collectStatsForTransformsWithoutTasks(Request request, // it can easily become arbitrarily ordered based on which transforms don't have a task or stats docs allStateAndStats.sort(Comparator.comparing(DataFrameTransformStateAndStats::getId)); - listener.onResponse(new Response(allStateAndStats, response.getTaskFailures(), nodeFailures)); + listener.onResponse(new Response(allStateAndStats, response.getTaskFailures(), response.getNodeFailures())); }, e -> { if (e instanceof IndexNotFoundException) { @@ -201,26 +159,6 @@ private void collectStatsForTransformsWithoutTasks(Request request, } ); - QueryBuilder builder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .filter(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), transformsWithoutTasks)) - .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameIndexerTransformStats.NAME))); - - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) - .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) - .setQuery(builder) - .request(); - - ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, - searchRequest, - searchStatsListener, client::search); - } - - private static DataFrameIndexerTransformStats parseFromSource(BytesReference source) throws IOException { - try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - return DataFrameIndexerTransformStats.fromXContent(parser); - } + dataFrameTransformsConfigManager.getTransformStats(transformsWithoutTasks, searchStatsListener); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java index 9f016b58f3b5f..f8e3a3f1e852f 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java @@ -59,7 +59,7 @@ protected void doExecute(Task task, StartDataFrameTransformTaskAction.Request re protected void taskOperation(StartDataFrameTransformTaskAction.Request request, DataFrameTransformTask transformTask, ActionListener listener) { if (transformTask.getTransformId().equals(request.getId())) { - transformTask.start(listener); + transformTask.start(null, listener); } else { listener.onFailure(new RuntimeException("ID of data frame transform task [" + transformTask.getTransformId() + "] does not match request's ID [" + request.getId() + "]")); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java index 17a49d8b7e834..e28f8005448d9 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java @@ -17,6 +17,9 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; @@ -50,7 +53,7 @@ public final class DataFrameInternalIndex { public static final String RAW = "raw"; // data types - public static final String DOUBLE = "double"; + public static final String FLOAT = "float"; public static final String LONG = "long"; public static final String KEYWORD = "keyword"; @@ -129,7 +132,7 @@ private static XContentBuilder mappings() throws IOException { // add the schema for transform configurations addDataFrameTransformsConfigMappings(builder); // add the schema for transform stats - addDataFrameTransformsStatsMappings(builder); + addDataFrameTransformStateAndStatsMappings(builder); // end type builder.endObject(); // end properties @@ -140,37 +143,76 @@ private static XContentBuilder mappings() throws IOException { } - private static XContentBuilder addDataFrameTransformsStatsMappings(XContentBuilder builder) throws IOException { + private static XContentBuilder addDataFrameTransformStateAndStatsMappings(XContentBuilder builder) throws IOException { return builder - .startObject(DataFrameIndexerTransformStats.NUM_PAGES.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_INPUT_DOCUMENTS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_OUTPUT_DOCUMENTS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_INVOCATIONS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_TIME_IN_MS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_TIME_IN_MS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_TOTAL.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_TOTAL.getPreferredName()) - .field(TYPE, LONG) + .startObject(DataFrameTransformStateAndStats.STATE_FIELD.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DataFrameTransformState.TASK_STATE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataFrameTransformState.INDEXER_STATE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataFrameTransformState.CURRENT_POSITION.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(DataFrameTransformState.CHECKPOINT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameTransformState.REASON.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataFrameTransformState.PROGRESS.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DataFrameTransformProgress.TOTAL_DOCS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameTransformProgress.DOCS_REMAINING.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameTransformProgress.PERCENT_COMPLETE) + .field(TYPE, FLOAT) + .endObject() + .endObject() + .endObject() + .endObject() .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_FAILURES.getPreferredName()) - .field(TYPE, LONG) + .startObject(DataFrameField.STATS_FIELD.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DataFrameIndexerTransformStats.NUM_PAGES.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.NUM_INPUT_DOCUMENTS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.NUM_OUTPUT_DOCUMENTS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.NUM_INVOCATIONS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.INDEX_TIME_IN_MS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.SEARCH_TIME_IN_MS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.INDEX_TOTAL.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.SEARCH_TOTAL.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.SEARCH_FAILURES.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.INDEX_FAILURES.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .endObject() .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_FAILURES.getPreferredName()) - .field(TYPE, LONG) + .startObject(DataFrameTransformStateAndStats.CHECKPOINTING_INFO_FIELD.getPreferredName()) + .field(ENABLED, false) .endObject(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java index e8c1e012b7b30..ab893545a0d50 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java @@ -44,13 +44,14 @@ import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -274,13 +275,13 @@ public void deleteTransform(String transformId, ActionListener listener })); } - public void putOrUpdateTransformStats(DataFrameIndexerTransformStats stats, ActionListener listener) { + public void putOrUpdateTransformStats(DataFrameTransformStateAndStats stats, ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = stats.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.INDEX_NAME) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .id(DataFrameIndexerTransformStats.documentId(stats.getTransformId())) + .id(DataFrameTransformStateAndStats.documentId(stats.getTransformId())) .source(source); executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( @@ -297,8 +298,8 @@ public void putOrUpdateTransformStats(DataFrameIndexerTransformStats stats, Acti } } - public void getTransformStats(String transformId, ActionListener resultListener) { - GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameIndexerTransformStats.documentId(transformId)); + public void getTransformStats(String transformId, ActionListener resultListener) { + GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameTransformStateAndStats.documentId(transformId)); executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap(getResponse -> { if (getResponse.isExists() == false) { @@ -310,7 +311,7 @@ public void getTransformStats(String transformId, ActionListener transformIds, ActionListener> listener) { + + QueryBuilder builder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), transformIds)) + .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformStateAndStats.NAME))); + + SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) + .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) + .setQuery(builder) + .request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, searchRequest, + ActionListener.wrap( + searchResponse -> { + List stats = new ArrayList<>(); + for (SearchHit hit : searchResponse.getHits().getHits()) { + BytesReference source = hit.getSourceRef(); + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + stats.add(DataFrameTransformStateAndStats.fromXContent(parser)); + } catch (IOException e) { + listener.onFailure( + new ElasticsearchParseException("failed to parse data frame stats from search hit", e)); + return; + } + } + + listener.onResponse(stats); + }, + e -> { + if (e.getClass() == IndexNotFoundException.class) { + listener.onResponse(Collections.emptyList()); + } else { + listener.onFailure(e); + } + } + ), client::search); + } + private void parseTransformLenientlyFromSource(BytesReference source, String transformId, ActionListener transformListener) { try (InputStream stream = source.streamInput(); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index d0f15197c3cca..9ed8da61d8feb 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -26,10 +26,10 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; @@ -106,46 +106,47 @@ static List verifyIndicesPrimaryShardsAreActive(ClusterState clusterStat protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTransform params, PersistentTaskState state) { final String transformId = params.getId(); final DataFrameTransformTask buildTask = (DataFrameTransformTask) task; - final SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(DataFrameTransformTask.SCHEDULE_NAME + "_" + transformId, - next()); - final DataFrameTransformState transformState = (DataFrameTransformState) state; + final DataFrameTransformState transformPTaskState = (DataFrameTransformState) state; final DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder = - new DataFrameTransformTask.ClientDataFrameIndexerBuilder() + new DataFrameTransformTask.ClientDataFrameIndexerBuilder(transformId) .setAuditor(auditor) .setClient(client) - .setIndexerState(currentIndexerState(transformState)) - .setInitialPosition(transformState == null ? null : transformState.getPosition()) - // If the state is `null` that means this is a "first run". We can safely assume the - // task will attempt to gather the initial progress information - // if we have state, this may indicate the previous execution node crashed, so we should attempt to retrieve - // the progress from state to keep an accurate measurement of our progress - .setProgress(transformState == null ? null : transformState.getProgress()) + .setIndexerState(currentIndexerState(transformPTaskState)) + // If the transform persistent task state is `null` that means this is a "first run". + // If we have state then the task has relocated from another node in which case this + // state is preferred + .setInitialPosition(transformPTaskState == null ? null : transformPTaskState.getPosition()) + .setProgress(transformPTaskState == null ? null : transformPTaskState.getProgress()) .setTransformsCheckpointService(dataFrameTransformsCheckpointService) - .setTransformsConfigManager(transformsConfigManager) - .setTransformId(transformId); + .setTransformsConfigManager(transformsConfigManager); ActionListener startTaskListener = ActionListener.wrap( response -> logger.info("Successfully completed and scheduled task in node operation"), failure -> logger.error("Failed to start task ["+ transformId +"] in node operation", failure) ); + Long previousCheckpoint = transformPTaskState != null ? transformPTaskState.getCheckpoint() : null; + // <3> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) // Since we don't create the task until `_start` is called, if we see that the task state is stopped, attempt to start // Schedule execution regardless - ActionListener transformStatsActionListener = ActionListener.wrap( - stats -> { - indexerBuilder.setInitialStats(stats); - buildTask.initializeIndexer(indexerBuilder); - scheduleAndStartTask(buildTask, schedulerJob, startTaskListener); + ActionListener transformStatsActionListener = ActionListener.wrap( + stateAndStats -> { + indexerBuilder.setInitialStats(stateAndStats.getTransformStats()); + if (transformPTaskState == null) { // prefer the persistent task state + indexerBuilder.setInitialPosition(stateAndStats.getTransformState().getPosition()); + indexerBuilder.setProgress(stateAndStats.getTransformState().getProgress()); + } + + final Long checkpoint = previousCheckpoint != null ? previousCheckpoint : stateAndStats.getTransformState().getCheckpoint(); + startTask(buildTask, indexerBuilder, checkpoint, startTaskListener); }, error -> { if (error instanceof ResourceNotFoundException == false) { logger.error("Unable to load previously persisted statistics for transform [" + params.getId() + "]", error); } - indexerBuilder.setInitialStats(new DataFrameIndexerTransformStats(transformId)); - buildTask.initializeIndexer(indexerBuilder); - scheduleAndStartTask(buildTask, schedulerJob, startTaskListener); + startTask(buildTask, indexerBuilder, previousCheckpoint, startTaskListener); } ); @@ -218,30 +219,24 @@ private void markAsFailed(DataFrameTransformTask task, String reason) { } } - private void scheduleAndStartTask(DataFrameTransformTask buildTask, - SchedulerEngine.Job schedulerJob, - ActionListener listener) { - // Note that while the task is added to the scheduler here, the internal state will prevent - // it from doing any work until the task is "started" via the StartTransform api - schedulerEngine.register(buildTask); - schedulerEngine.add(schedulerJob); - logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); + private void startTask(DataFrameTransformTask buildTask, + DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder, + Long previousCheckpoint, + ActionListener listener) { // If we are stopped, and it is an initial run, this means we have never been started, // attempt to start the task + + buildTask.initializeIndexer(indexerBuilder); + // TODO isInitialRun is false after relocation?? if (buildTask.getState().getTaskState().equals(DataFrameTransformTaskState.STOPPED) && buildTask.isInitialRun()) { - buildTask.start(listener); + logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); + buildTask.start(previousCheckpoint, listener); } else { logger.debug("No need to start task. Its current state is: {}", buildTask.getState().getIndexerState()); listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); } } - static SchedulerEngine.Schedule next() { - return (startTime, now) -> { - return now + 1000; // to be fixed, hardcode something - }; - } - @Override protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index bfe0e4f4d77b1..9df6b5e3ab337 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -29,9 +29,11 @@ import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.indexing.IndexerState; @@ -181,7 +183,13 @@ boolean isInitialRun() { return getIndexer() != null && getIndexer().initialRun(); } - public synchronized void start(ActionListener listener) { + /** + * Start the background indexer and set the task's state to started + * @param startingCheckpoint Set the current checkpoint to this value. If null the + * current checkpoint is not set + * @param listener Started listener + */ + public synchronized void start(Long startingCheckpoint, ActionListener listener) { if (getIndexer() == null) { listener.onFailure(new ElasticsearchException("Task for transform [{}] not fully initialized. Try again later", getTransformId())); @@ -195,6 +203,9 @@ public synchronized void start(ActionListener listener) { } stateReason.set(null); taskState.set(DataFrameTransformTaskState.STARTED); + if (startingCheckpoint != null) { + currentCheckpoint.set(startingCheckpoint); + } final DataFrameTransformState state = new DataFrameTransformState( DataFrameTransformTaskState.STARTED, @@ -208,6 +219,10 @@ public synchronized void start(ActionListener listener) { persistStateToClusterState(state, ActionListener.wrap( task -> { auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); + long now = System.currentTimeMillis(); + // kick off the indexer + triggered(new Event(schedulerJobName(), now, now)); + registerWithSchedulerJob(); listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); }, exc -> { @@ -238,7 +253,7 @@ public synchronized void triggered(Event event) { return; } // for now no rerun, so only trigger if checkpoint == 0 - if (currentCheckpoint.get() == 0 && event.getJobName().equals(SCHEDULE_NAME + "_" + transform.getId())) { + if (currentCheckpoint.get() == 0 && event.getJobName().equals(schedulerJobName())) { logger.debug("Data frame indexer [{}] schedule has triggered, state: [{}]", event.getJobName(), getIndexer().getState()); getIndexer().maybeTriggerAsyncJob(System.currentTimeMillis()); } @@ -249,13 +264,7 @@ public synchronized void triggered(Event event) { * This tries to remove the job from the scheduler and completes the persistent task */ synchronized void shutdown() { - try { - schedulerEngine.remove(SCHEDULE_NAME + "_" + transform.getId()); - schedulerEngine.unregister(this); - } catch (Exception e) { - markAsFailed(e); - return; - } + deregisterSchedulerJob(); markAsCompleted(); } @@ -311,6 +320,27 @@ public synchronized void onCancelled() { } } + private void registerWithSchedulerJob() { + schedulerEngine.register(this); + final SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(schedulerJobName(), next()); + schedulerEngine.add(schedulerJob); + } + + private void deregisterSchedulerJob() { + schedulerEngine.remove(schedulerJobName()); + schedulerEngine.unregister(this); + } + + private String schedulerJobName() { + return DataFrameTransformTask.SCHEDULE_NAME + "_" + getTransformId(); + } + + private SchedulerEngine.Schedule next() { + return (startTime, now) -> { + return now + 1000; // to be fixed, hardcode something + }; + } + synchronized void initializeIndexer(ClientDataFrameIndexerBuilder indexerBuilder) { indexer.set(indexerBuilder.build(this)); } @@ -328,6 +358,11 @@ static class ClientDataFrameIndexerBuilder { private Map initialPosition; private DataFrameTransformProgress progress; + ClientDataFrameIndexerBuilder(String transformId) { + this.transformId = transformId; + this.initialStats = new DataFrameIndexerTransformStats(transformId); + } + ClientDataFrameIndexer build(DataFrameTransformTask parentTask) { return new ClientDataFrameIndexer(this.transformId, this.transformsConfigManager, @@ -519,7 +554,9 @@ protected void doSaveState(IndexerState indexerState, Map positi task -> { // Only persist the stats if something has actually changed if (previouslyPersistedStats == null || previouslyPersistedStats.equals(getStats()) == false) { - transformsConfigManager.putOrUpdateTransformStats(getStats(), + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, state, getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null ActionListener.wrap( r -> { previouslyPersistedStats = getStats(); @@ -580,7 +617,18 @@ protected void onFinish(ActionListener listener) { protected void onStop() { auditor.info(transformConfig.getId(), "Indexer has stopped"); logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); - transformTask.shutdown(); + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, transformTask.getState(), getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null + ActionListener.wrap( + r -> { + transformTask.shutdown(); + }, + statsExc -> { + transformTask.shutdown(); + logger.error("Updating saving stats of transform [" + transformConfig.getId() + "] failed", statsExc); + } + )); } @Override diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java index 36ae4f3f162a0..9c7af3efa5333 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java @@ -14,12 +14,17 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointTests; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStatsTests; import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; import org.junit.Before; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -217,4 +222,40 @@ public void testExpandIds() throws Exception { }); } + + public void testStateAndStats() throws InterruptedException { + String transformId = "transform_test_stats_create_read_update"; + + DataFrameTransformStateAndStats stateAndStats = + DataFrameTransformStateAndStatsTests.randomDataFrameTransformStateAndStats(transformId); + + assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStats(stateAndStats, listener), Boolean.TRUE, null, null); + assertAsync(listener -> transformsConfigManager.getTransformStats(transformId, listener), stateAndStats, null, null); + + DataFrameTransformStateAndStats updated = + DataFrameTransformStateAndStatsTests.randomDataFrameTransformStateAndStats(transformId); + assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStats(updated, listener), Boolean.TRUE, null, null); + assertAsync(listener -> transformsConfigManager.getTransformStats(transformId, listener), updated, null, null); + } + + public void testGetStateAndStatsMultiple() throws InterruptedException { + int numStats = randomInt(5); + List expectedStats = new ArrayList<>(); + for (int i=0; i transformsConfigManager.putOrUpdateTransformStats(stat, listener), Boolean.TRUE, null, null); + } + + // remove one of the put stats so we don't retrieve all + if (expectedStats.size() > 1) { + expectedStats.remove(expectedStats.size() -1); + } + List ids = expectedStats.stream().map(DataFrameTransformStateAndStats::getId).collect(Collectors.toList()); + + // get stats will be ordered by id + expectedStats.sort(Comparator.comparing(DataFrameTransformStateAndStats::getId)); + assertAsync(listener -> transformsConfigManager.getTransformStats(ids, listener), expectedStats, null, null); + } } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index b0f5a556ac627..57ded2d069d30 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.List; @@ -19,8 +18,7 @@ public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { - Version createdWith = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, - VersionUtils.getPreviousVersion(Version.V_7_0_0)); + Version createdWith = Version.fromString("1.0.0"); IndexMetaData indexMetaData = IndexMetaData.builder("test") .settings(settings(createdWith)) .numberOfShards(1) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index 03860ea9ae044..98d089e544b4e 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -124,7 +124,7 @@ public void testTooManyByFields() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(31500000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(35000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(25000000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } @@ -173,7 +173,7 @@ public void testTooManyByAndOverFields() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(31500000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(33000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(24000000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } @@ -223,7 +223,7 @@ public void testManyDistinctOverFields() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(116000000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(117000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(90000000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 714b2712367a4..de945b9bc6c3d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -300,7 +300,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final Setting MIN_DISK_SPACE_OFF_HEAP = Setting.byteSizeSetting("xpack.ml.min_disk_space_off_heap", new ByteSizeValue(5, ByteSizeUnit.GB), Setting.Property.NodeScope); - private static final Logger logger = LogManager.getLogger(XPackPlugin.class); + private static final Logger logger = LogManager.getLogger(MachineLearning.class); private final Settings settings; private final Environment env; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java index d1673dd3c914c..1d230d93792fc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -40,7 +40,6 @@ import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; @@ -513,18 +512,7 @@ public static Job updateJobForMigration(Job job) { String version = job.getJobVersion() != null ? job.getJobVersion().toString() : null; custom.put(MIGRATED_FROM_VERSION, version); builder.setCustomSettings(custom); - // Increase the model memory limit for 6.1 - 6.3 jobs Version jobVersion = job.getJobVersion(); - if (jobVersion != null && jobVersion.onOrAfter(Version.V_6_1_0) && jobVersion.before(Version.V_6_3_0)) { - // Increase model memory limit if < 512MB - if (job.getAnalysisLimits() != null && job.getAnalysisLimits().getModelMemoryLimit() != null && - job.getAnalysisLimits().getModelMemoryLimit() < 512L) { - long updatedModelMemoryLimit = (long) (job.getAnalysisLimits().getModelMemoryLimit() * 1.3); - AnalysisLimits limits = new AnalysisLimits(updatedModelMemoryLimit, - job.getAnalysisLimits().getCategorizationExamplesLimit()); - builder.setAnalysisLimits(limits); - } - } // Pre v5.5 (ml beta) jobs do not have a version. // These jobs cannot be opened, we rely on the missing version // to indicate this. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java index 14e434099f843..a81ee5f86e982 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java @@ -125,7 +125,7 @@ private void getForecastRequestStats(String jobId, String forecastId, ActionList } static void validate(Job job, ForecastJobAction.Request request) { - if (job.getJobVersion() == null || job.getJobVersion().before(Version.V_6_1_0)) { + if (job.getJobVersion() == null || job.getJobVersion().before(Version.fromString("6.1.0"))) { throw ExceptionsHelper.badRequestException( "Cannot run forecast because jobs created prior to version 6.1 are not supported"); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index b476e3e465463..36e5e91b4326b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -76,6 +76,12 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex } // Don't add the last message, as it might be partial and mess up subsequent pattern finding + if (sampleMessages.isEmpty()) { + throw new IllegalArgumentException("Failed to create more than one message from the sample lines provided. (The " + + "last is discarded in case the sample is incomplete.) If your sample does contain multiple messages the " + + "problem is probably that the primary timestamp format has been incorrectly detected, so try overriding it."); + } + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.SEMI_STRUCTURED_TEXT) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java index 29d141717ccc8..4516686b65202 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java @@ -294,8 +294,14 @@ private void notifyModelMemoryStatusChange(Context context, ModelSizeStats model if (memoryStatus == ModelSizeStats.MemoryStatus.SOFT_LIMIT) { auditor.warning(context.jobId, Messages.getMessage(Messages.JOB_AUDIT_MEMORY_STATUS_SOFT_LIMIT)); } else if (memoryStatus == ModelSizeStats.MemoryStatus.HARD_LIMIT) { - auditor.error(context.jobId, Messages.getMessage(Messages.JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT, + if (modelSizeStats.getModelBytesMemoryLimit() == null || modelSizeStats.getModelBytesExceeded() == null) { + auditor.error(context.jobId, Messages.getMessage(Messages.JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT_PRE_7_2, new ByteSizeValue(modelSizeStats.getModelBytes(), ByteSizeUnit.BYTES).toString())); + } else { + auditor.error(context.jobId, Messages.getMessage(Messages.JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT, + new ByteSizeValue(modelSizeStats.getModelBytesMemoryLimit(), ByteSizeUnit.BYTES).toString(), + new ByteSizeValue(modelSizeStats.getModelBytesExceeded(), ByteSizeUnit.BYTES).toString())); + } } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java index f3afa98b55a46..b831f1b0aee62 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -133,19 +132,14 @@ public AutodetectResult(StreamInput in) throws IOException { this.flushAcknowledgement = null; } - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - if (in.readBoolean()) { - this.forecast = new Forecast(in); - } else { - this.forecast = null; - } - if (in.readBoolean()) { - this.forecastRequestStats = new ForecastRequestStats(in); - } else { - this.forecastRequestStats = null; - } + if (in.readBoolean()) { + this.forecast = new Forecast(in); } else { this.forecast = null; + } + if (in.readBoolean()) { + this.forecastRequestStats = new ForecastRequestStats(in); + } else { this.forecastRequestStats = null; } } @@ -161,11 +155,8 @@ public void writeTo(StreamOutput out) throws IOException { writeNullable(modelPlot, out); writeNullable(categoryDefinition, out); writeNullable(flushAcknowledgement, out); - - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - writeNullable(forecast, out); - writeNullable(forecastRequestStats, out); - } + writeNullable(forecast, out); + writeNullable(forecastRequestStats, out); } private static void writeNullable(Writeable writeable, StreamOutput out) throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java index 287bd22f91f92..9e0692672349d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java @@ -8,17 +8,13 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; @@ -34,7 +30,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; @@ -61,63 +56,50 @@ public void testMachineLearningPutJobActionRestricted() { License.OperationMode mode = randomInvalidLicenseType(); enableLicensing(mode); assertMLAllowed(false); + // test that license restricted apis do not work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture. newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), listener); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), listener); listener.actionGet(); - fail("put job action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), listener); - PutJobAction.Response response = listener.actionGet(); - assertNotNull(response); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), listener); + PutJobAction.Response response = listener.actionGet(); + assertNotNull(response); } public void testMachineLearningOpenJobActionRestricted() throws Exception { String jobId = "testmachinelearningopenjobactionrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response response = putJobListener.actionGet(); - assertNotNull(response); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response response = putJobListener.actionGet(); + assertNotNull(response); // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); enableLicensing(mode); assertMLAllowed(false); // test that license restricted apis do not work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), listener); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), listener); listener.actionGet(); - fail("open job action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); @@ -131,13 +113,10 @@ public void testMachineLearningOpenJobActionRestricted() throws Exception { }); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), listener); - AcknowledgedResponse response = listener.actionGet(); - assertNotNull(response); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), listener); + AcknowledgedResponse response2 = listener.actionGet(); + assertNotNull(response2); } public void testMachineLearningPutDatafeedActionRestricted() throws Exception { @@ -145,46 +124,36 @@ public void testMachineLearningPutDatafeedActionRestricted() throws Exception { String datafeedId = jobId + "-datafeed"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); enableLicensing(mode); assertMLAllowed(false); // test that license restricted apis do not work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); listener.actionGet(); - fail("put datafeed action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); - PutDatafeedAction.Response response = listener.actionGet(); - assertNotNull(response); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); + PutDatafeedAction.Response response = listener.actionGet(); + assertNotNull(response); } public void testAutoCloseJobWithDatafeed() throws Exception { @@ -194,31 +163,29 @@ public void testAutoCloseJobWithDatafeed() throws Exception { String datafeedIndex = jobId + "-data"; prepareCreate(datafeedIndex).addMapping("type", "{\"type\":{\"properties\":{\"time\":{\"type\":\"date\"}}}}", XContentType.JSON).get(); - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - // put job - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - // put datafeed - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(datafeedIndex))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - // open job - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - // start datafeed - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); - listener.actionGet(); - } + + // put job + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + // put datafeed + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(datafeedIndex))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); + // open job + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); + // start datafeed + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); + listener.actionGet(); + if (randomBoolean()) { enableLicensing(randomInvalidLicenseType()); @@ -245,18 +212,15 @@ public void testAutoCloseJobWithDatafeed() throws Exception { enableLicensing(randomValidLicenseType()); assertMLAllowed(true); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - // open job - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - // start datafeed - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); - listener.actionGet(); - } + // open job + PlainActionFuture openJobListener2 = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener2); + AcknowledgedResponse openJobResponse3 = openJobListener2.actionGet(); + assertNotNull(openJobResponse3); + // start datafeed + PlainActionFuture listener2 = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener2); + listener2.actionGet(); assertBusy(() -> { JobState jobState = getJobStats(jobId).getState(); @@ -299,24 +263,20 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception prepareCreate(datafeedIndex).addMapping("type", "{\"type\":{\"properties\":{\"time\":{\"type\":\"date\"}}}}", XContentType.JSON).get(); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(datafeedIndex))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(datafeedIndex))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); @@ -333,36 +293,30 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception }); // test that license restricted apis do not work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); listener.actionGet(); - fail("start datafeed action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - // re-open job now that the license is valid again - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); - AcknowledgedResponse response = listener.actionGet(); - assertNotNull(response); - } + // re-open job now that the license is valid again + PlainActionFuture openJobListener2 = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener2); + AcknowledgedResponse openJobResponse3 = openJobListener2.actionGet(); + assertNotNull(openJobResponse3); + + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); + AcknowledgedResponse response = listener.actionGet(); + assertNotNull(response); } public void testMachineLearningStopDatafeedActionNotRestricted() throws Exception { @@ -373,29 +327,25 @@ public void testMachineLearningStopDatafeedActionNotRestricted() throws Exceptio prepareCreate(datafeedIndex).addMapping("type", "{\"type\":{\"properties\":{\"time\":{\"type\":\"date\"}}}}", XContentType.JSON).get(); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(datafeedIndex))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - PlainActionFuture startDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed( - new StartDatafeedAction.Request(datafeedId, 0L), startDatafeedListener); - AcknowledgedResponse startDatafeedResponse = startDatafeedListener.actionGet(); - assertNotNull(startDatafeedResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(datafeedIndex))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); + PlainActionFuture startDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed( + new StartDatafeedAction.Request(datafeedId, 0L), startDatafeedListener); + AcknowledgedResponse startDatafeedResponse = startDatafeedListener.actionGet(); + assertNotNull(startDatafeedResponse); boolean invalidLicense = randomBoolean(); if (invalidLicense) { @@ -404,30 +354,27 @@ public void testMachineLearningStopDatafeedActionNotRestricted() throws Exceptio enableLicensing(randomValidLicenseType()); } - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).stopDatafeed(new StopDatafeedAction.Request(datafeedId), listener); - if (invalidLicense) { - // the stop datafeed due to invalid license happens async, so check if the datafeed turns into stopped state: - assertBusy(() -> { - GetDatafeedsStatsAction.Response response = - new MachineLearningClient(client) - .getDatafeedsStats(new GetDatafeedsStatsAction.Request(datafeedId)).actionGet(); - assertEquals(DatafeedState.STOPPED, response.getResponse().results().get(0).getDatafeedState()); - }); - } else { - listener.actionGet(); - } - - if (invalidLicense) { - // the close due to invalid license happens async, so check if the job turns into closed state: - assertBusy(() -> { - GetJobsStatsAction.Response response = - new MachineLearningClient(client).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); - assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); - }); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).stopDatafeed(new StopDatafeedAction.Request(datafeedId), listener); + if (invalidLicense) { + // the stop datafeed due to invalid license happens async, so check if the datafeed turns into stopped state: + assertBusy(() -> { + GetDatafeedsStatsAction.Response response = + new MachineLearningClient(client()) + .getDatafeedsStats(new GetDatafeedsStatsAction.Request(datafeedId)).actionGet(); + assertEquals(DatafeedState.STOPPED, response.getResponse().results().get(0).getDatafeedState()); + }); + } else { + listener.actionGet(); + } + + if (invalidLicense) { + // the close due to invalid license happens async, so check if the job turns into closed state: + assertBusy(() -> { + GetJobsStatsAction.Response response = + new MachineLearningClient(client()).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); + assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); + }); } } @@ -435,18 +382,14 @@ public void testMachineLearningCloseJobActionNotRestricted() throws Exception { String jobId = "testmachinelearningclosejobactionnotrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); boolean invalidLicense = randomBoolean(); if (invalidLicense) { @@ -455,22 +398,19 @@ public void testMachineLearningCloseJobActionNotRestricted() throws Exception { enableLicensing(randomValidLicenseType()); } - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - CloseJobAction.Request request = new CloseJobAction.Request(jobId); - request.setCloseTimeout(TimeValue.timeValueSeconds(20)); - if (invalidLicense) { - // the close due to invalid license happens async, so check if the job turns into closed state: - assertBusy(() -> { - GetJobsStatsAction.Response response = - new MachineLearningClient(client).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); - assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); - }); - } else { - new MachineLearningClient(client).closeJob(request, listener); - listener.actionGet(); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + CloseJobAction.Request request = new CloseJobAction.Request(jobId); + request.setCloseTimeout(TimeValue.timeValueSeconds(20)); + if (invalidLicense) { + // the close due to invalid license happens async, so check if the job turns into closed state: + assertBusy(() -> { + GetJobsStatsAction.Response response = + new MachineLearningClient(client()).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); + assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); + }); + } else { + new MachineLearningClient(client()).closeJob(request, listener); + listener.actionGet(); } } @@ -478,25 +418,18 @@ public void testMachineLearningDeleteJobActionNotRestricted() throws Exception { String jobId = "testmachinelearningclosejobactionnotrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); // Pick a random license License.OperationMode mode = randomLicenseType(); enableLicensing(mode); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).deleteJob(new DeleteJobAction.Request(jobId), listener); - listener.actionGet(); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).deleteJob(new DeleteJobAction.Request(jobId), listener); + listener.actionGet(); } public void testMachineLearningDeleteDatafeedActionNotRestricted() throws Exception { @@ -504,31 +437,24 @@ public void testMachineLearningDeleteDatafeedActionNotRestricted() throws Except String datafeedId = jobId + "-datafeed"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(jobId))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(jobId))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); // Pick a random license License.OperationMode mode = randomLicenseType(); enableLicensing(mode); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).deleteDatafeed(new DeleteDatafeedAction.Request(datafeedId), listener); - listener.actionGet(); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).deleteDatafeed(new DeleteDatafeedAction.Request(datafeedId), listener); + listener.actionGet(); } private static OperationMode randomInvalidLicenseType() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java index df447d7ec6c35..2de1f79c64aa6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.ToXContent; @@ -29,11 +28,11 @@ import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; -import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -295,21 +294,6 @@ public void testNodeCount() throws Exception { source = new XContentSource(builder); } assertThat(source.getValue("node_count"), equalTo(nodeCount)); - - BytesStreamOutput oldOut = new BytesStreamOutput(); - oldOut.setVersion(Version.V_6_0_0); - usage.writeTo(oldOut); - StreamInput oldInput = oldOut.bytes().streamInput(); - oldInput.setVersion(Version.V_6_0_0); - XPackFeatureSet.Usage oldSerializedUsage = new MachineLearningFeatureSetUsage(oldInput); - - XContentSource oldSource; - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - oldSerializedUsage.toXContent(builder, ToXContent.EMPTY_PARAMS); - oldSource = new XContentSource(builder); - } - - assertNull(oldSource.getValue("node_count")); } public void testUsageGivenMlMetadataNotInstalled() throws Exception { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java index be115af6bcd72..e60e86cc54960 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java @@ -23,7 +23,7 @@ public class TransportForecastJobActionRequestTests extends ESTestCase { public void testValidate_jobVersionCannonBeBefore61() { Job.Builder jobBuilder = createTestJob("forecast-it-test-job-version"); - jobBuilder.setJobVersion(Version.V_6_0_1); + jobBuilder.setJobVersion(Version.fromString("6.0.1")); ForecastJobAction.Request request = new ForecastJobAction.Request(); Exception e = expectThrows(ElasticsearchStatusException.class, () -> TransportForecastJobAction.validate(jobBuilder.build(), request)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 040ed5e1d0ed4..1065503e091d4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -370,11 +371,14 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); + Version node1Version = VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion(Version.CURRENT)); + Version node2Version = randomValueOtherThan(node1Version, + () -> VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion(Version.CURRENT))); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), node1Version)) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_1_0)) + nodeAttr, Collections.emptySet(), node2Version)) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -386,7 +390,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() Job job = BaseMlIntegTestCase.createFareQuoteJob("job_with_incompatible_model_snapshot") .setModelSnapshotId("incompatible_snapshot") - .setModelSnapshotMinVersion(Version.V_6_3_0) + .setModelSnapshotMinVersion(Version.CURRENT) .build(new Date()); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); @@ -394,7 +398,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_incompatible_model_snapshot", job, cs.build(), 10, 2, 30, memoryTracker, isMemoryTrackerRecentlyRefreshed, logger); assertThat(result.getExplanation(), containsString( - "because the job's model snapshot requires a node of version [6.3.0] or higher")); + "because the job's model snapshot requires a node of version [" + Version.CURRENT + "] or higher")); assertNull(result.getExecutorNode()); } @@ -402,11 +406,13 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); + Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_6_4_0)); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), version)) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_3_0)) + nodeAttr, Collections.emptySet(), version)) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -433,9 +439,9 @@ public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion( nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), Version.fromString("6.2.0"))) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_4_0)) + nodeAttr, Collections.emptySet(), Version.fromString("6.4.0"))) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index 7ed5518c65077..6cf4d61cf176c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -232,6 +232,27 @@ public void testCreateConfigsGivenElasticsearchLogAndImpossibleGrokPatternOverri "\\[%{JAVACLASS:class} *\\] %{JAVALOGMESSAGE:message}] does not match sample messages", e.getMessage()); } + public void testErrorOnIncorrectMessageFormation() { + + // This sample causes problems because the (very weird) primary timestamp format + // is not detected but a secondary format that only occurs in one line is detected + String sample = "Day 21 Month 1 Year 2019 11:04 INFO [localhost] - starting\n" + + "Day 21 Month 1 Year 2019 11:04 INFO [localhost] - startup date [Mon Jan 21 11:04:19 CET 2019]\n" + + "Day 21 Month 1 Year 2019 11:04 DEBUG [localhost] - details\n" + + "Day 21 Month 1 Year 2019 11:04 DEBUG [localhost] - more details\n" + + "Day 21 Month 1 Year 2019 11:04 WARN [localhost] - something went wrong\n"; + + String charset = randomFrom(POSSIBLE_CHARSETS); + Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, FileStructureOverrides.EMPTY_OVERRIDES, + NOOP_TIMEOUT_CHECKER)); + + assertEquals("Failed to create more than one message from the sample lines provided. (The last is discarded in " + + "case the sample is incomplete.) If your sample does contain multiple messages the problem is probably that " + + "the primary timestamp format has been incorrectly detected, so try overriding it.", e.getMessage()); + } + public void testCreateMultiLineMessageStartRegexGivenNoPrefaces() { for (TimestampFormatFinder.CandidateTimestampFormat candidateTimestampFormat : TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS) { String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java index 86f408a42d43b..40a1d3f969157 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java @@ -301,7 +301,8 @@ public void testProcessResult_modelSizeStatsWithMemoryStatusChanges() { // Now with hard_limit modelSizeStats = new ModelSizeStats.Builder(JOB_ID) .setMemoryStatus(ModelSizeStats.MemoryStatus.HARD_LIMIT) - .setModelBytes(new ByteSizeValue(512, ByteSizeUnit.MB).getBytes()) + .setModelBytesMemoryLimit(new ByteSizeValue(512, ByteSizeUnit.MB).getBytes()) + .setModelBytesExceeded(new ByteSizeValue(1, ByteSizeUnit.KB).getBytes()) .build(); when(result.getModelSizeStats()).thenReturn(modelSizeStats); processorUnderTest.processResult(context, result); @@ -311,9 +312,9 @@ public void testProcessResult_modelSizeStatsWithMemoryStatusChanges() { when(result.getModelSizeStats()).thenReturn(modelSizeStats); processorUnderTest.processResult(context, result); - // We should have only fired to notifications: one for soft_limit and one for hard_limit + // We should have only fired two notifications: one for soft_limit and one for hard_limit verify(auditor).warning(JOB_ID, Messages.getMessage(Messages.JOB_AUDIT_MEMORY_STATUS_SOFT_LIMIT)); - verify(auditor).error(JOB_ID, Messages.getMessage(Messages.JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT, "512mb")); + verify(auditor).error(JOB_ID, Messages.getMessage(Messages.JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT, "512mb", "1kb")); verifyNoMoreInteractions(auditor); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java index 1a06a9a4037f9..d644a63e7bcaa 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ObjectPath; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; @@ -63,7 +64,7 @@ public void testEnabledDefault() { public void testUsage() throws Exception { // anything prior to 6.3 does not include collection_enabled (so defaults it to null) - final Version serializedVersion = randomFrom(Version.CURRENT, Version.V_6_3_0, Version.V_6_2_2); + final Version serializedVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); final boolean collectionEnabled = randomBoolean(); int localCount = randomIntBetween(0, 5); List exporterList = new ArrayList<>(); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 4eead794e9439..558242dfb7fa4 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -97,15 +97,12 @@ protected String getJobId() { @Override protected void onStart(long now, ActionListener listener) { try { - // this is needed to exclude buckets that can still receive new documents. + // this is needed to exclude buckets that can still receive new documents DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); - long rounded = dateHisto.createRounding().round(now); - if (dateHisto.getDelay() != null) { - // if the job has a delay we filter all documents that appear before it. - maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); - } else { - maxBoundary = rounded; - } + // if the job has a delay we filter all documents that appear before it + long delay = dateHisto.getDelay() != null ? + TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis() : 0; + maxBoundary = dateHisto.createRounding().round(now - delay); listener.onResponse(null); } catch (Exception e) { listener.onFailure(e); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 7346cf6f855e1..1fe01a8246267 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -326,6 +326,58 @@ public void testSimpleDateHistoWithDelay() throws Exception { }); } + public void testSimpleDateHistoWithOverlappingDelay() throws Exception { + String rollupIndex = randomAlphaOfLengthBetween(5, 10); + String field = "the_histo"; + DateHistogramGroupConfig dateHistoConfig = + new FixedInterval(field, new DateHistogramInterval("1h"), new DateHistogramInterval("15m"), null); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); + final List> dataset = new ArrayList<>(); + long now = asLong("2015-04-01T10:30:00.000Z"); + dataset.addAll( + Arrays.asList( + asMap("the_histo", now - TimeValue.timeValueMinutes(135).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(120).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(105).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(90).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(75).getMillis()), + asMap("the_histo", now - TimeValue.timeValueHours(1).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(45).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(30).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(15).getMillis()), + asMap("the_histo", now) + ) + ); + final Rounding rounding = dateHistoConfig.createRounding(); + executeTestCase(dataset, job, now, (resp) -> { + assertThat(resp.size(), equalTo(2)); + IndexRequest request = resp.get(0); + assertThat(request.index(), equalTo(rollupIndex)); + assertThat(request.sourceAsMap(), equalTo( + asMap( + "_rollup.version", 2, + "the_histo.date_histogram.timestamp", rounding.round(now - TimeValue.timeValueHours(2).getMillis()), + "the_histo.date_histogram.interval", "1h", + "the_histo.date_histogram._count", 3, + "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), + "_rollup.id", job.getId() + ) + )); + request = resp.get(1); + assertThat(request.index(), equalTo(rollupIndex)); + assertThat(request.sourceAsMap(), equalTo( + asMap( + "_rollup.version", 2, + "the_histo.date_histogram.timestamp", rounding.round(now - TimeValue.timeValueHours(1).getMillis()), + "the_histo.date_histogram.interval", "1h", + "the_histo.date_histogram._count", 4, + "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), + "_rollup.id", job.getId() + ) + )); + }); + } + public void testSimpleDateHistoWithTimeZone() throws Exception { final List> dataset = new ArrayList<>(); long now = asLong("2015-04-01T10:00:00.000Z"); diff --git a/x-pack/plugin/security/qa/basic-enable-security/build.gradle b/x-pack/plugin/security/qa/basic-enable-security/build.gradle new file mode 100644 index 0000000000000..a21e3c68d3fc4 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/build.gradle @@ -0,0 +1,68 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +task integTestNoSecurity(type: RestIntegTestTask) { + description = "Run tests against a cluster that doesn't have security" +} +tasks.getByName("integTestNoSecurityRunner").configure { + systemProperty 'tests.has_security', 'false' +} +check.dependsOn(integTestNoSecurity) + +task integTestSecurity(type: RestIntegTestTask) { + dependsOn integTestNoSecurity + description = "Run tests against a cluster that has security" +} +tasks.getByName("integTestSecurityRunner").configure { + systemProperty 'tests.has_security', 'true' +} +check.dependsOn(integTestSecurity) + +configure(extensions.findByName("integTestNoSecurityCluster")) { + clusterName = "enable-security-on-basic" + numNodes = 2 + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.enabled', 'false' +} + +Task noSecurityTest = tasks.findByName("integTestNoSecurity") +configure(extensions.findByName("integTestSecurityCluster")) { + clusterName = "basic-license" + numNodes = 2 + dataDir = { nodeNum -> noSecurityTest.nodes[nodeNum].dataDir } + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.authc.anonymous.roles', 'anonymous' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.certificate', 'transport.crt' + setting 'xpack.security.transport.ssl.key', 'transport.key' + setting 'xpack.security.transport.ssl.key_passphrase', 'transport-password' + setting 'xpack.security.transport.ssl.certificate_authorities', 'ca.crt' + + extraConfigFile 'transport.key', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.key').toFile() + extraConfigFile 'transport.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.crt').toFile() + extraConfigFile 'ca.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/ca.crt').toFile() + + setupCommand 'setupAdminUser', + 'bin/elasticsearch-users', 'useradd', 'admin_user', '-p', 'admin-password', '-r', 'superuser' + setupCommand 'setupTestUser' , + 'bin/elasticsearch-users', 'useradd', 'security_test_user', '-p', 'security-test-password', '-r', 'security_test_role' + extraConfigFile 'roles.yml', project.projectDir.toPath().resolve('src/test/resources/roles.yml').toFile() +} + +integTest.enabled = false diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java b/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java new file mode 100644 index 0000000000000..fa64a89f2f633 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.security.authc.InternalRealms; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class EnableSecurityOnBasicLicenseIT extends ESRestTestCase { + + private static boolean securityEnabled; + + @BeforeClass + public static void checkTestMode() { + final String hasSecurity = System.getProperty("tests.has_security"); + securityEnabled = Booleans.parseBoolean(hasSecurity); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("security_test_user", new SecureString("security-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + // If this is the first run (security not yet enabled), then don't clean up afterwards because we want to test restart with data + return securityEnabled == false; + } + + public void testSecuritySetup() throws Exception { + logger.info("Security status: {}", securityEnabled); + logger.info("Cluster:\n{}", getClusterInfo()); + logger.info("Indices:\n{}", getIndices()); + checkBasicLicenseType(); + + checkSecurityStatus(securityEnabled); + if (securityEnabled) { + checkAuthentication(); + } + + checkAllowedWrite("index_allowed"); + // Security runs second, and should see the doc from the first (non-security) run + final int expectedIndexCount = securityEnabled ? 2 : 1; + checkIndexCount("index_allowed", expectedIndexCount); + + final String otherIndex = "index_" + randomAlphaOfLengthBetween(2, 6).toLowerCase(Locale.ROOT); + if (securityEnabled) { + checkDeniedWrite(otherIndex); + } else { + checkAllowedWrite(otherIndex); + } + } + + private String getClusterInfo() throws IOException { + Map info = getAsMap("/"); + assertThat(info, notNullValue()); + return info.toString(); + } + + private String getIndices() throws IOException { + final Request request = new Request("GET", "/_cat/indices"); + Response response = client().performRequest(request); + return EntityUtils.toString(response.getEntity()); + } + + private void checkBasicLicenseType() throws IOException { + Map license = getAsMap("/_license"); + assertThat(license, notNullValue()); + assertThat(ObjectPath.evaluate(license, "license.type"), equalTo("basic")); + } + + private void checkSecurityStatus(boolean expectEnabled) throws IOException { + Map usage = getAsMap("/_xpack/usage"); + assertThat(usage, notNullValue()); + assertThat(ObjectPath.evaluate(usage, "security.available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.enabled"), equalTo(expectEnabled)); + if (expectEnabled) { + for (String realm : Arrays.asList("file", "native")) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(true)); + } + for (String realm : InternalRealms.getConfigurableRealmsTypes()) { + if (realm.equals("file") == false && realm.equals("native") == false) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(false)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(false)); + } + } + } + } + + private void checkAuthentication() throws IOException { + final Map auth = getAsMap("/_security/_authenticate"); + // From file realm, configured in build.gradle + assertThat(ObjectPath.evaluate(auth, "username"), equalTo("security_test_user")); + assertThat(ObjectPath.evaluate(auth, "roles"), contains("security_test_role")); + } + + private void checkAllowedWrite(String indexName) throws IOException { + final Request request = new Request("POST", "/" + indexName + "/_doc"); + request.setJsonEntity("{ \"key\" : \"value\" }"); + Response response = client().performRequest(request); + final Map result = entityAsMap(response); + assertThat(ObjectPath.evaluate(result, "_index"), equalTo(indexName)); + assertThat(ObjectPath.evaluate(result, "result"), equalTo("created")); + } + + private void checkDeniedWrite(String indexName) { + final Request request = new Request("POST", "/" + indexName + "/_doc"); + request.setJsonEntity("{ \"key\" : \"value\" }"); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("unauthorized for user [security_test_user]")); + } + + private void checkIndexCount(String indexName, int expectedCount) throws IOException { + final Request request = new Request("POST", "/" + indexName + "/_refresh"); + adminClient().performRequest(request); + + final Map result = getAsMap("/" + indexName + "/_count"); + assertThat(ObjectPath.evaluate(result, "count"), equalTo(expectedCount)); + } +} diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml new file mode 100644 index 0000000000000..eb6c3ec45786b --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml @@ -0,0 +1,14 @@ +# A basic role that is used to test security +security_test_role: + cluster: + - monitor + - "cluster:admin/xpack/license/*" + indices: + - names: [ "index_allowed" ] + privileges: [ "read", "write", "create_index" ] + - names: [ "*" ] + privileges: [ "monitor" ] + +anonymous: + cluster: + - monitor \ No newline at end of file diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc new file mode 100644 index 0000000000000..b3729f42d17b0 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc @@ -0,0 +1,30 @@ += Keystore Details + +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating certificates +The certificates in this directory have been generated using elasticsearch-certutil (7.0.0 SNAPSHOT) + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --out=ca.zip --pass="ca-password" --days=3500 +unzip ca.zip +mv ca/ca.* ./ + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=transport --out=transport.zip --pass="transport-password" --days=3500 \ + --ca-cert=ca.crt --ca-key=ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip transport.zip +mv transport/transport.* ./ + +rm transport.zip +rmdir transport +----------------------------------------------------------------------------------------------------------- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt new file mode 100644 index 0000000000000..95068217a612a --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVAL0RCyWTbBDd2ntuWoqRwW0IE9+9MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDQzMDAzNTQwN1oXDTI4MTEyODAzNTQwN1owNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDA4VwADiyl+Xl15D27gtpS +TXZfHt40MUx12FY0MEd3A3hU+Fp4PaLE2ejECx04yrq8Rfc0Yltux/Fc5zE98XM8 +dY4j0QN/e6C/f0mrBI0KaJ25nv0MWFvoqS/D3vWvDFLUP1a3OZICWWoBDG+zCHe5 +Aq0qwge+FU9IUc7G2WPJeUp4e0+EzLxFInls3rTX1xkyq8Q6PT3gi0RZKvHqIudL +DAXDVEGWNxEX9KwQ1nMtRkDZICx/W665kZiBD4XC3WuEkYlDL1ISVw3cmsbYdhb4 +IusIK5zNERi4ewTgDDxic8TbRpkQW189/M3IglrQipH5ixfF6oNSyoRVAa3KZqj5 +AgMBAAGjUzBRMB0GA1UdDgQWBBRI4mOaeunbu60GfjWTpHcvhb6/YTAfBgNVHSME +GDAWgBRI4mOaeunbu60GfjWTpHcvhb6/YTAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQCUOXddlGoU+Ni85D0cRjYYxyx8a5Rwngp+kztttT/5l3Ch +5JMZyl/xcaTryh37BG3+NuqKR1zHtcLpq/+xaCrwBQ8glJofF+1n9w4zBL9nrH5c +O5NgG7+u/sfB+xdqMVdoBBqfm1Roq7O1T/kBXis1+5ZtBlj+7WIKeWWTZGLTrHV+ +MW5RDOmMoLkqT5qzpR9Yf7UChPVrvKGs4Kd+fYJeb0R5W6mvZQ6/FrsLwAWLC2Q1 +rW1u4zIkO0ih5qd52dl/73u7SWqzWxPy1ynwqJefD4AA0uaJYtMlXHK2vYjutHvY +K7301gzc5fueqo1YMmPgsjjsj+ErR1t0ve7faOBy +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key new file mode 100644 index 0000000000000..a6de1f9958d32 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,0F6B57727499DA47 + +OmK77UnFtk/zNEbNTxNJz73D2XWFDWLyHCDZPEXkX55vch/pXkkfVbWbPBFv35nA +LKni0j802Qnc1D4V3BUSmVWHk9SfjI5nlcDkSELbgCOpuZkf6Bmk8FgLfV42BFxn +lAiY+oBB4VV+rxA+HUV6CiWWrTgSjkvFyXCBZzcTEPdF2ifWerjsWKOjQZJtmvMX +J5DhYCCp1/n4R/OQpYxQiOqJdUxbKx4k0h139ySK2PggdL17w1a7AuQnHwJO3+ic +1IntPKD/ZhpAPPzq8A5R5jZyvrSj9Dgv94PXAQ5xTZWnZd2nuJtbkrYJ47pBR3Re +R2aZdF/N8ljG1TYHuJXdiL3A80Y3AS00TFNgSAZKSz5Ktt6zI2EAZu9xdHd8EfUm +m3qJmfce9P9cCBzo7DLGHwRMfu9hEFWN9dRD8KWNcB+ahQ1/jItzi25yZM6vD6+S +ZVUzegybeYlMwPks3YObX9IdUSwAd9F76SVwHCsziKQW4RfETaShG/oRNqq04nqA +E//KUl5bfTuv8jumyMlg6iiqIDQAUvzI74mWe2lIy6rglm2rR39SN4NxSrnTwoz4 +KAf+kHWJVyxFqEYs+dqboRWpRfQac3+iYoIlZFob/nRhNyKnccTkHtjh7+1C8CXI +sYXhuJZLCoiXh990M9t1ct0hqfWLNALlEsJesfRG8/fvi+LZd9i3fyCjrM+z96/G +/2zQzdga4bOs3ZEBluYFYkhHRJw1rAF3LTcWYvjP0gjZYVQki7AsLb0me1selS6O +P1bXaLaSUvMsAVO0wOtHMXAoBgEybP4+OonLiMScjdQZ2KRQ8L8OwzuGt0yguPRy +7wQv4NrH8LQu+X7tlQox28kascZUNHxORbh9M/wWx/2htw88uXWb5vxbDe30Rras +mTg0Gxky/88ZWvYxr7PlhBRrrfkJQ9sF/RyygUFhpQaXTwspkpF+MZv+1X6ROHqR +OueSa606FrptZ5n4RRPjq0hVZQgWKMAlIxNSum+gFn/Z7Q9I6gKrGFxjkD65L1kK +BbvbHAomiTyphrMtBRP52VqsFr4NxCWzxr/ZSlwaxTEid2vYg3zm7ls4dHYjUiNR +cs/JZJTkXn2aVaILSQkr9/I0eOOH9t/APSXHY8urQuYsDdmOOL7J2tlh3w1ivP8A +vVeomdUr2jgn53pBzbaLlTfsZ9+UneuLcztLfqN+BydQq1bKWvn2j3GvUkmhE//M ++fpo+uGlslMLh8rjtRH1y9rtCKhLgIxLO4U/ZJksFcJAqF3mR+Xxkrf82LUrAg8x +Oj++3QhOJE7f+vKog8b0gGrySSwzII2Ar7KiJDVJaZpmbbXToBlcC7xoksN3Ra0E +15WxKBSRqb7gi2+ml02rwtFMzq93H05Uoa9mG8uf1QH8t/+o6fniFx5N5kKWmPMy +shXjaYg7NzEBAkxI4VO41faMxEj/CUV0klQDPbnAsTCrcYu7CS2lml3e0zVf6RB8 +plXee99DiWpHZTRoGzpInK3DpnGRP1Frgl1KyhT+HayFZeYSMHfVSFPk3CKKmtEp +r+J/SrpGnEx0NKK3f+MxflZfnMIvgjGxkHdgSaDpz9iTjveq176Bq1GmNLALotOq +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt new file mode 100644 index 0000000000000..8ffb02e3d5794 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIVAOSHUsKiRx+ekWEEmfI2Q2q3B5hoMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDQzMDAzNTU0NloXDTI4MTEyODAzNTU0NlowFDESMBAG +A1UEAxMJdHJhbnNwb3J0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +wBaoGJ9vv9yFxCOg24CsVfwSThOPnea8oujexGZYDgKkCdtcVn03tlyomjOra/dL +PJ0zOvUyktTxv022VQNhkJ/PO+w/NKpHBHaAVZE0o2zvUf8xQqXoHw0S6rAhurs5 +50r8QRkh1Z3ky3uOcFs0pXYCR/2ZVmQNSBhqmhUSK5y0VURot1MtPMw1SeqyabZQ +upDTJ6um/zk2LalfChKJ3vGQGEW7AGfv10eIWSmqQx6rLWAGO4MDelbZhUUr5iFc +D4fW0/MNUXJHTBO5Dyq6n63Wsm0jTYK72bSVw8LZS+uabQCtcHtKUZh38uUEUCjp +MDVY7YmDv0i8qx/MvWasbwIDAQABo4HgMIHdMB0GA1UdDgQWBBQwoESvk9jbbTax +/+c5MCAFEvWW5TAfBgNVHSMEGDAWgBRI4mOaeunbu60GfjWTpHcvhb6/YTCBjwYD +VR0RBIGHMIGEgglsb2NhbGhvc3SCF2xvY2FsaG9zdDYubG9jYWxkb21haW42hwR/ +AAABhxAAAAAAAAAAAAAAAAAAAAABggpsb2NhbGhvc3Q0ggpsb2NhbGhvc3Q2ghVs +b2NhbGhvc3QubG9jYWxkb21haW6CF2xvY2FsaG9zdDQubG9jYWxkb21haW40MAkG +A1UdEwQCMAAwDQYJKoZIhvcNAQELBQADggEBAIQ8/PLfsZ1eKOWW74a4h/Uh5eh8 +u9Led1v+U9tszmULN8JoYSEgyql6zy2pJOuIVLwI9cUvrcypUSDL53NmWhTGAjEL +jbww/G1cngBh5cBzAPq3lRL2lwc8j3ZZ16I1eNyWastvBDdtANlDArCUamZoboBm +HE/jrssC9DOQhxAraiitH3YqjquqztEp1zIuqRI0qYTDFNPzyfyXIyCFIT+3eVI5 +22MqjFL+9IDuoET+VU1i22LhF32TEPotz2hfZTFddql0V1IOJQuVkDkQGFvaJMFy +Xw7d4orV3sxzQYd7muCoaao7g/F675KqpZiiVHqKxTOLafF/MPcfLhH6xZk= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key new file mode 100644 index 0000000000000..f540e17202492 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,0B9EFA0829A750FB + +NCrPD7gkQ4Jr5/xIiohWILW3nO/WmNjApqOIc5g/wX/xJpk/554f8zCZ8dUD0D2E +ZW+z7Yj8GWKB0E6+hQZ+3ZUHLYASYSpSDVjg8UaaCxQyoVcUhshahFprqlzgU/An +Er8TbrGvhH0VmNlcQhaImqCOk41Hf8gjrxrtoLKbk3DfTk/Uuv4Jlsz4X+oSBVZN +fezIN70IZvGLKu7O3T9DeVLV1bLL6hNGIXnYe+FzLomMck2UoFv6uGS1VyFIGNf0 +ly80NGgdWTGxzLmiiGCgm5gbqbIehMsei1CC3jZIcfgfGyp4NVvF4HxFxZLTR3kY +YqzBWta/PoY6XXOlLFZupYt/YMt9hU6It9xdudPyNxwSuFXU66Fc08Ljj151iyhv +Ggf88jo9xSVvKOlqqHN6dY/xo9CfzTyuldG4jsKVHgGosSGghksjZ+PpHc7Mo5aP +S/UofhQgApJgU30TQPiQuJ+my/h9CiJyIgP7HnZtltwxg1k3dj+LxlpRKvjTOfuc +epOFmPeIdPkrQDir0j9+h+yoMgeqoT2unUYXw/qx5SVQxB5ckajLmJkUJPej9U3O +wASqNcWCTBEkGt102RU8o6lywdzBvfTB7gegR6oDvRfaxHOiUrRT/IwgszRfIdoC +fZa7Pb9pUuR3oY4uduDYgIKnxJhhQF2ERVXsfQeyxdiHEXvRnBFoAhoDjO8rWv07 +xiFPVMCAqXPImmdI34QezuzV2MUIVlKyeovbf+Kjv/Uat3zTj5FbmyVHcmPXpTY7 +t5iTQG+nQwz6UGcM5lF40EWrRdCzHEXNszwEY3Oz8D5rgBa6kxHYjcG9rzbTGlk2 +gsKdKA0am0hnCCJdTxbK5AkDcCWn/eclw0RPpbhFv5anvHTJ5WAWE7ZaACRuSfvy +UbNRGiWo4cNcR7+PGgV5184zjwJOql1mz+I79tlpxtK/FazP61WAYKOeEx1paKXX +syq+WDWgoZu/RzKDyTu10NUgq9J/IXDBn8/JjOVPCmPhMMLxNdoUhMfO4Ij9+3Jv +mH6ZaU6E+NZuc5N4Ivws42PwNY9FoyuLLgMBbezjhepQrDveHUK5v0weWqEapZ7Z +4KkFAeK7pjuItn5Of+233cp9Y68G8NrwMLQzI23kebNJwwzUMf3DnUJCXiy3PvrF +WpA0Q6/FspJgG3x2AXKo2QsHxydW+4w4pkawS9TCl0E03D7V6Gf17/HOxPDSH972 ++Yzzv8IkaOw5g+paeX9+tHjDFaxuvKiFyn/J7xYZAAQUoa2uQu440RakE73qLO34 +wtWdRzvIYitwLNJSfSojQDNoXuv8eyI/hP573cs6pmbheKXG1XKsWfpfj8sI7OkH +AdjRyeToSKbZ8yCn2vp0jyaRocOucu5oo7c0v+IocWOgdw+913EToJ6G3ck1heVR +b/U04VqKkXowO1YK7xDBAalMxyWq40spIKCC8HBBlng3vfUKqF46q9bMpesXnwPr +/00JfDVhFbqkJbqB8UYpjs9MN+vV5A7lsYbObom4pV25FSnwNSyxK0bhWGfZgutI +pjeQDkvHNG606AsqLz6SmIJP/GBBSMwvT3PGMPOO5XcayKeK3cbOQYJ0Yh7Muoqe +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate deleted file mode 100755 index 183722d9c9364..0000000000000 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License; -# you may not use this file except in compliance with the Elastic License. - -ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \ - ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ - "`dirname "$0"`"/elasticsearch-cli \ - "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat deleted file mode 100644 index a50bc1a384ed0..0000000000000 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat +++ /dev/null @@ -1,19 +0,0 @@ -@echo off - -rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -rem or more contributor license agreements. Licensed under the Elastic License; -rem you may not use this file except in compliance with the Elastic License. - -setlocal enabledelayedexpansion -setlocal enableextensions - -set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool -set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env -call "%~dp0elasticsearch-cli.bat" ^ - %%* ^ - || goto exit - -endlocal -endlocal -:exit -exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index a36a004c7f413..a6218522fb7e5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -258,8 +258,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7; public class Security extends Plugin implements ActionPlugin, IngestPlugin, NetworkPlugin, ClusterPlugin, @@ -1002,7 +1002,7 @@ public Function> getFieldFilter() { public BiConsumer getJoinValidator() { if (enabled) { return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), - DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)) + DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings), settings) .andThen(new ValidateUpgradedSecurityIndex()) .andThen(new ValidateLicenseForFIPS(XPackSettings.FIPS_MODE_ENABLED.get(settings))); } @@ -1012,18 +1012,21 @@ public BiConsumer getJoinValidator() { static final class ValidateTLSOnJoin implements BiConsumer { private final boolean isTLSEnabled; private final String discoveryType; + private final Settings settings; - ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType) { + ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType, Settings settings) { this.isTLSEnabled = isTLSEnabled; this.discoveryType = discoveryType; + this.settings = settings; } @Override public void accept(DiscoveryNode node, ClusterState state) { License license = LicenseService.getLicense(state.metaData()); - if (license != null && license.isProductionLicense() && - isTLSEnabled == false && "single-node".equals(discoveryType) == false) { - throw new IllegalStateException("TLS setup is required for license type [" + license.operationMode().name() + "]"); + if (isTLSEnabled == false && "single-node".equals(discoveryType) == false + && XPackLicenseState.isTransportTlsRequired(license, settings)) { + throw new IllegalStateException("Transport TLS ([" + XPackSettings.TRANSPORT_SSL_ENABLED.getKey() + + "]) is required for license type [" + license.operationMode().description() + "] when security is enabled"); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java index 1b4aff064a0c3..4bab16cf92115 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java @@ -7,6 +7,8 @@ import com.nimbusds.oauth2.sdk.id.State; import com.nimbusds.openid.connect.sdk.Nonce; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -36,6 +38,7 @@ public class TransportOpenIdConnectAuthenticateAction private final ThreadPool threadPool; private final AuthenticationService authenticationService; private final TokenService tokenService; + private static final Logger logger = LogManager.getLogger(TransportOpenIdConnectAuthenticateAction.class); @Inject public TransportOpenIdConnectAuthenticateAction(ThreadPool threadPool, TransportService transportService, @@ -67,9 +70,8 @@ protected void doExecute(Task task, OpenIdConnectAuthenticateRequest request, .get(OpenIdConnectRealm.CONTEXT_TOKEN_DATA); tokenService.createOAuth2Tokens(authentication, originatingAuthentication, tokenMetadata, true, ActionListener.wrap(tuple -> { - final String tokenString = tokenService.getAccessTokenAsString(tuple.v1()); final TimeValue expiresIn = tokenService.getExpirationDelay(); - listener.onResponse(new OpenIdConnectAuthenticateResponse(authentication.getUser().principal(), tokenString, + listener.onResponse(new OpenIdConnectAuthenticateResponse(authentication.getUser().principal(), tuple.v1(), tuple.v2(), expiresIn)); }, listener::onFailure)); }, e -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java index 6b61742eed262..96eec7e8fd6c7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -63,10 +63,9 @@ protected void doExecute(Task task, SamlAuthenticateRequest request, ActionListe final Map tokenMeta = (Map) result.getMetadata().get(SamlRealm.CONTEXT_TOKEN_DATA); tokenService.createOAuth2Tokens(authentication, originatingAuthentication, tokenMeta, true, ActionListener.wrap(tuple -> { - final String tokenString = tokenService.getAccessTokenAsString(tuple.v1()); final TimeValue expiresIn = tokenService.getExpirationDelay(); listener.onResponse( - new SamlAuthenticateResponse(authentication.getUser().principal(), tokenString, tuple.v2(), expiresIn)); + new SamlAuthenticateResponse(authentication.getUser().principal(), tuple.v1(), tuple.v2(), expiresIn)); }, listener::onFailure)); }, e -> { logger.debug(() -> new ParameterizedMessage("SamlToken [{}] could not be authenticated", saml), e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java index 4b648d5ed4bc0..65456ccd2af51 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java @@ -88,9 +88,8 @@ private void createToken(CreateTokenRequest request, Authentication authenticati boolean includeRefreshToken, ActionListener listener) { tokenService.createOAuth2Tokens(authentication, originatingAuth, Collections.emptyMap(), includeRefreshToken, ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getAccessTokenAsString(tuple.v1()); final String scope = getResponseScopeValue(request.getScope()); - final CreateTokenResponse response = new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, + final CreateTokenResponse response = new CreateTokenResponse(tuple.v1(), tokenService.getExpirationDelay(), scope, tuple.v2()); listener.onResponse(response); }, listener::onFailure)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java index 71aeb64bc4276..5c161d889cfb1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java @@ -31,11 +31,9 @@ public TransportRefreshTokenAction(TransportService transportService, ActionFilt @Override protected void doExecute(Task task, CreateTokenRequest request, ActionListener listener) { tokenService.refreshToken(request.getRefreshToken(), ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getAccessTokenAsString(tuple.v1()); final String scope = getResponseScopeValue(request.getScope()); - final CreateTokenResponse response = - new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); + new CreateTokenResponse(tuple.v1(), tokenService.getExpirationDelay(), scope, tuple.v2()); listener.onResponse(response); }, listener::onFailure)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 6f96c9bf7dd88..a8f68870556e6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -86,6 +86,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.AuthenticationType; import org.elasticsearch.xpack.core.security.authc.KeyAndTimestamp; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.TokensInvalidationResult; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -157,11 +158,12 @@ public final class TokenService { * Cheat Sheet and the * NIST Digital Identity Guidelines */ - private static final int ITERATIONS = 100000; + static final int TOKEN_SERVICE_KEY_ITERATIONS = 100000; + static final int TOKENS_ENCRYPTION_KEY_ITERATIONS = 1024; private static final String KDF_ALGORITHM = "PBKDF2withHMACSHA512"; - private static final int SALT_BYTES = 32; + static final int SALT_BYTES = 32; private static final int KEY_BYTES = 64; - private static final int IV_BYTES = 12; + static final int IV_BYTES = 12; private static final int VERSION_BYTES = 4; private static final String ENCRYPTION_CIPHER = "AES/GCM/NoPadding"; private static final String EXPIRED_TOKEN_WWW_AUTH_VALUE = "Bearer realm=\"" + XPackField.SECURITY + @@ -179,14 +181,18 @@ public final class TokenService { TimeValue.MINUS_ONE, Property.NodeScope); static final String TOKEN_DOC_TYPE = "token"; + private static final int HASHED_TOKEN_LENGTH = 44; + // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars + private static final int TOKEN_LENGTH = 22; private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_"; - static final int MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; + static final int LEGACY_MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; + static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; + static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); + static final Version VERSION_HASHED_TOKENS = Version.V_7_2_0; static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_2_0; static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_2_0; static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_2_0; - // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars - private static final int TOKEN_ID_LENGTH = 22; private static final Logger logger = LogManager.getLogger(TokenService.class); private final SecureRandom secureRandom = new SecureRandom(); @@ -235,31 +241,71 @@ public TokenService(Settings settings, Clock clock, Client client, XPackLicenseS } /** - * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with an - * auto-generated token document id. The created tokens are stored in the security index. + * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with + * auto-generated values. The created tokens are stored in the security index for versions up to + * {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a specific security tokens index for later versions. */ - public void createOAuth2Tokens(Authentication authentication, Authentication originatingClientAuth, - Map metadata, boolean includeRefreshToken, - ActionListener> listener) { + public void createOAuth2Tokens(Authentication authentication, Authentication originatingClientAuth, Map metadata, + boolean includeRefreshToken, ActionListener> listener) { // the created token is compatible with the oldest node version in the cluster final Version tokenVersion = getTokenVersionCompatibility(); // tokens moved to a separate index in newer versions final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); // the id of the created tokens ought be unguessable - final String userTokenId = UUIDs.randomBase64UUID(); - createOAuth2Tokens(userTokenId, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, includeRefreshToken, - listener); + final String accessToken = UUIDs.randomBase64UUID(); + final String refreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; + createOAuth2Tokens(accessToken, refreshToken, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, listener); } /** - * Create an access token and optionally a refresh token as well, based on the provided authentication and metadata, with the given - * token document id. The created tokens are be stored in the security index. + * Creates an access token and optionally a refresh token as well from predefined values, based on the provided authentication and + * metadata. The created tokens are stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a + * specific security tokens index for later versions. + */ + //public for testing + public void createOAuth2Tokens(String accessToken, String refreshToken, Authentication authentication, + Authentication originatingClientAuth, + Map metadata, ActionListener> listener) { + // the created token is compatible with the oldest node version in the cluster + final Version tokenVersion = getTokenVersionCompatibility(); + // tokens moved to a separate index in newer versions + final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); + createOAuth2Tokens(accessToken, refreshToken, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, listener); + } + + /** + * Create an access token and optionally a refresh token as well from predefined values, based on the provided authentication and + * metadata. + * + * @param accessToken The predefined seed value for the access token. This will then be + *
    + *
  • Encrypted before stored for versions before {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs
  • + *
+ * @param refreshToken The predefined seed value for the access token. This will then be + *
    + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs for + * versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
+ * @param tokenVersion The version of the nodes with which these tokens will be compatible. + * @param tokensIndex The security tokens index + * @param authentication The authentication object representing the user for which the tokens are created + * @param originatingClientAuth The authentication object representing the client that called the related API + * @param metadata A map with metadata to be stored in the token document + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client */ - private void createOAuth2Tokens(String userTokenId, Version tokenVersion, SecurityIndexManager tokensIndex, + private void createOAuth2Tokens(String accessToken, String refreshToken, Version tokenVersion, SecurityIndexManager tokensIndex, Authentication authentication, Authentication originatingClientAuth, Map metadata, - boolean includeRefreshToken, ActionListener> listener) { - assert userTokenId.length() == TOKEN_ID_LENGTH : "We assume token ids have a fixed length for nodes of a certain version." - + " When changing the token length, be careful that the inferences about its length still hold."; + ActionListener> listener) { + assert accessToken.length() == TOKEN_LENGTH : "We assume token ids have a fixed length for nodes of a certain version." + + " When changing the token length, be careful that the inferences about its length still hold."; ensureEnabled(); if (authentication == null) { listener.onFailure(traceLog("create token", new IllegalArgumentException("authentication must be provided"))); @@ -269,10 +315,19 @@ private void createOAuth2Tokens(String userTokenId, Version tokenVersion, Securi } else { final Authentication tokenAuth = new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), authentication.getLookedUpBy(), tokenVersion, AuthenticationType.TOKEN, authentication.getMetadata()); - final UserToken userToken = new UserToken(userTokenId, tokenVersion, tokenAuth, getExpirationTime(), metadata); - final String plainRefreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; - final BytesReference tokenDocument = createTokenDocument(userToken, plainRefreshToken, originatingClientAuth); - final String documentId = getTokenDocumentId(userToken); + final String storedAccessToken; + final String storedRefreshToken; + if (tokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { + storedAccessToken = hashTokenString(accessToken); + storedRefreshToken = (null == refreshToken) ? null : hashTokenString(refreshToken); + } else { + storedAccessToken = accessToken; + storedRefreshToken = refreshToken; + } + final UserToken userToken = new UserToken(storedAccessToken, tokenVersion, tokenAuth, getExpirationTime(), metadata); + final BytesReference tokenDocument = createTokenDocument(userToken, storedRefreshToken, originatingClientAuth); + final String documentId = getTokenDocumentId(storedAccessToken); + final IndexRequest indexTokenRequest = client.prepareIndex(tokensIndex.aliasName(), SINGLE_MAPPING_NAME, documentId) .setOpType(OpType.CREATE) .setSource(tokenDocument, XContentType.JSON) @@ -283,15 +338,17 @@ private void createOAuth2Tokens(String userTokenId, Version tokenVersion, Securi () -> executeAsyncWithOrigin(client, SECURITY_ORIGIN, IndexAction.INSTANCE, indexTokenRequest, ActionListener.wrap(indexResponse -> { if (indexResponse.getResult() == Result.CREATED) { + final String versionedAccessToken = prependVersionAndEncodeAccessToken(tokenVersion, accessToken); if (tokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - final String versionedRefreshToken = plainRefreshToken != null - ? prependVersionAndEncode(tokenVersion, plainRefreshToken) - : null; - listener.onResponse(new Tuple<>(userToken, versionedRefreshToken)); + final String versionedRefreshToken = refreshToken != null + ? prependVersionAndEncodeRefreshToken(tokenVersion, refreshToken) + : null; + listener.onResponse(new Tuple<>(versionedAccessToken, versionedRefreshToken)); } else { - // prior versions are not version-prepended, as nodes on those versions don't expect it. + // prior versions of the refresh token are not version-prepended, as nodes on those + // versions don't expect it. // Such nodes might exist in a mixed cluster during a rolling upgrade. - listener.onResponse(new Tuple<>(userToken, plainRefreshToken)); + listener.onResponse(new Tuple<>(versionedAccessToken, refreshToken)); } } else { listener.onFailure(traceLog("create token", @@ -301,6 +358,15 @@ private void createOAuth2Tokens(String userTokenId, Version tokenVersion, Securi } } + /** + * Hashes an access or refresh token String so that it can safely be persisted in the index. We don't salt + * the values as these are v4 UUIDs that have enough entropy by themselves. + */ + // public for testing + public static String hashTokenString(String accessTokenString) { + return new String(Hasher.SHA256.hash(new SecureString(accessTokenString.toCharArray()))); + } + /** * Looks in the context to see if the request provided a header with a user token and if so the * token is validated, which might include authenticated decryption and verification that the token @@ -406,13 +472,24 @@ void decodeToken(String token, ActionListener listener) { final Version version = Version.readVersion(in); in.setVersion(version); if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { - // The token was created in a > VERSION_ACCESS_TOKENS_UUIDS cluster so it contains the tokenId as a String - String usedTokenId = in.readString(); - getUserTokenFromId(usedTokenId, version, listener); + // The token was created in a > VERSION_ACCESS_TOKENS_UUIDS cluster + if (in.available() < MINIMUM_BYTES) { + logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BYTES); + listener.onResponse(null); + return; + } + final String accessToken = in.readString(); + // TODO Remove this conditional after backporting to 7.x + if (version.onOrAfter(VERSION_HASHED_TOKENS)) { + final String userTokenId = hashTokenString(accessToken); + getUserTokenFromId(userTokenId, version, listener); + } else { + getUserTokenFromId(accessToken, version, listener); + } } else { // The token was created in a < VERSION_ACCESS_TOKENS_UUIDS cluster so we need to decrypt it to get the tokenId - if (in.available() < MINIMUM_BASE64_BYTES) { - logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BASE64_BYTES); + if (in.available() < LEGACY_MINIMUM_BYTES) { + logger.debug("invalid token, smaller than [{}] bytes", LEGACY_MINIMUM_BYTES); listener.onResponse(null); return; } @@ -709,8 +786,12 @@ private void indexInvalidation(Collection tokenIds, SecurityIndexManager /** * Called by the transport action in order to start the process of refreshing a token. + * + * @param refreshToken The refresh token as provided by the client + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client */ - public void refreshToken(String refreshToken, ActionListener> listener) { + public void refreshToken(String refreshToken, ActionListener> listener) { ensureEnabled(); final Instant refreshRequested = clock.instant(); final Iterator backoff = DEFAULT_BACKOFF.iterator(); @@ -718,36 +799,49 @@ public void refreshToken(String refreshToken, ActionListener { final Authentication clientAuth = Authentication.readFromContext(client.threadPool().getThreadContext()); - innerRefresh(tokenDocHit.getId(), tokenDocHit.getSourceAsMap(), tokenDocHit.getSeqNo(), tokenDocHit.getPrimaryTerm(), - clientAuth, backoff, refreshRequested, listener); + innerRefresh(refreshToken, tokenDocHit.getId(), tokenDocHit.getSourceAsMap(), tokenDocHit.getSeqNo(), + tokenDocHit.getPrimaryTerm(), + clientAuth, backoff, refreshRequested, listener); }, listener::onFailure)); } /** - * Inferes the format and version of the passed in {@code refreshToken}. Delegates the actual search of the token document to + * Infers the format and version of the passed in {@code refreshToken}. Delegates the actual search of the token document to * {@code #findTokenFromRefreshToken(String, SecurityIndexManager, Iterator, ActionListener)} . */ private void findTokenFromRefreshToken(String refreshToken, Iterator backoff, ActionListener listener) { - if (refreshToken.length() == TOKEN_ID_LENGTH) { + if (refreshToken.length() == TOKEN_LENGTH) { // first check if token has the old format before the new version-prepended one logger.debug("Assuming an unversioned refresh token [{}], generated for node versions" - + " prior to the introduction of the version-header format.", refreshToken); + + " prior to the introduction of the version-header format.", refreshToken); findTokenFromRefreshToken(refreshToken, securityMainIndex, backoff, listener); } else { - try { - final Tuple versionAndRefreshTokenTuple = unpackVersionAndPayload(refreshToken); - final Version refreshTokenVersion = versionAndRefreshTokenTuple.v1(); - final String unencodedRefreshToken = versionAndRefreshTokenTuple.v2(); - if (false == refreshTokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED) - || unencodedRefreshToken.length() != TOKEN_ID_LENGTH) { - logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, refreshTokenVersion); + if (refreshToken.length() == HASHED_TOKEN_LENGTH) { + logger.debug("Assuming a hashed refresh token [{}] retrieved from the tokens index", refreshToken); + findTokenFromRefreshToken(refreshToken, securityTokensIndex, backoff, listener); + } else { + logger.debug("Assuming a refresh token [{}] provided from a client", refreshToken); + try { + final Tuple versionAndRefreshTokenTuple = unpackVersionAndPayload(refreshToken); + final Version refreshTokenVersion = versionAndRefreshTokenTuple.v1(); + final String unencodedRefreshToken = versionAndRefreshTokenTuple.v2(); + if (refreshTokenVersion.before(VERSION_TOKENS_INDEX_INTRODUCED) || unencodedRefreshToken.length() != TOKEN_LENGTH) { + logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, + refreshTokenVersion); + listener.onFailure(malformedTokenException()); + } else { + // TODO Remove this conditional after backporting to 7.x + if (refreshTokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { + final String hashedRefreshToken = hashTokenString(unencodedRefreshToken); + findTokenFromRefreshToken(hashedRefreshToken, securityTokensIndex, backoff, listener); + } else { + findTokenFromRefreshToken(unencodedRefreshToken, securityTokensIndex, backoff, listener); + } + } + } catch (IOException e) { + logger.debug(() -> new ParameterizedMessage("Could not decode refresh token [{}].", refreshToken), e); listener.onFailure(malformedTokenException()); - } else { - findTokenFromRefreshToken(unencodedRefreshToken, securityTokensIndex, backoff, listener); } - } catch (IOException e) { - logger.debug("Could not decode refresh token [" + refreshToken + "].", e); - listener.onFailure(malformedTokenException()); } } } @@ -763,7 +857,7 @@ private void findTokenFromRefreshToken(String refreshToken, SecurityIndexManager final Consumer maybeRetryOnFailure = ex -> { if (backoff.hasNext()) { final TimeValue backofTimeValue = backoff.next(); - logger.debug("retrying after [" + backofTimeValue + "] back off"); + logger.debug("retrying after [{}] back off", backofTimeValue); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() .preserveContext(() -> findTokenFromRefreshToken(refreshToken, tokensIndexManager, backoff, listener)); client.threadPool().schedule(retryWithContextRunnable, backofTimeValue, GENERIC); @@ -821,13 +915,14 @@ private void findTokenFromRefreshToken(String refreshToken, SecurityIndexManager * supersedes this one. The new document that contains the new access token and refresh token is created and finally the new access * token and refresh token are returned to the listener. */ - private void innerRefresh(String tokenDocId, Map source, long seqNo, long primaryTerm, Authentication clientAuth, - Iterator backoff, Instant refreshRequested, ActionListener> listener) { + private void innerRefresh(String refreshToken, String tokenDocId, Map source, long seqNo, long primaryTerm, + Authentication clientAuth, Iterator backoff, Instant refreshRequested, + ActionListener> listener) { logger.debug("Attempting to refresh token stored in token document [{}]", tokenDocId); final Consumer onFailure = ex -> listener.onFailure(traceLog("refresh token", tokenDocId, ex)); final Tuple> checkRefreshResult; try { - checkRefreshResult = checkTokenDocumentForRefresh(clock.instant(), clientAuth, source); + checkRefreshResult = checkTokenDocumentForRefresh(refreshRequested, clientAuth, source); } catch (DateTimeException | IllegalStateException e) { onFailure.accept(new ElasticsearchSecurityException("invalid token document", e)); return; @@ -838,23 +933,29 @@ private void innerRefresh(String tokenDocId, Map source, long se } final RefreshTokenStatus refreshTokenStatus = checkRefreshResult.v1(); if (refreshTokenStatus.isRefreshed()) { - logger.debug("Token document [{}] was recently refreshed, when a new token document [{}] was generated. Reusing that result.", - tokenDocId, refreshTokenStatus.getSupersededBy()); - getSupersedingTokenDocAsyncWithRetry(refreshTokenStatus, backoff, listener); + logger.debug("Token document [{}] was recently refreshed, when a new token document was generated. Reusing that result.", + tokenDocId); + decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, listener); } else { - final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newAccessTokenString = UUIDs.randomBase64UUID(); + final String newRefreshTokenString = UUIDs.randomBase64UUID(); final Version newTokenVersion = getTokenVersionCompatibility(); final Map updateMap = new HashMap<>(); updateMap.put("refreshed", true); - updateMap.put("refresh_time", clock.instant().toEpochMilli()); - if (newTokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - // the superseding token document reference is formated as "|"; - // for now, only the ".security-tokens|" is a valid reference format - updateMap.put("superseded_by", securityTokensIndex.aliasName() + "|" + getTokenDocumentId(newUserTokenId)); - } else { - // preservers the format of the reference (without the alias prefix) - // so that old nodes in a mixed cluster can still understand it - updateMap.put("superseded_by", getTokenDocumentId(newUserTokenId)); + if (newTokenVersion.onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { + updateMap.put("refresh_time", clock.instant().toEpochMilli()); + try { + final byte[] iv = getRandomBytes(IV_BYTES); + final byte[] salt = getRandomBytes(SALT_BYTES); + String encryptedAccessAndRefreshToken = encryptSupersedingTokens(newAccessTokenString, + newRefreshTokenString, refreshToken, iv, salt); + updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); + updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); + updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); + } catch (GeneralSecurityException e) { + logger.warn("could not encrypt access token and refresh token string", e); + onFailure.accept(invalidGrantException("could not refresh the requested token")); + } } assert seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO : "expected an assigned sequence number"; assert primaryTerm != SequenceNumbers.UNASSIGNED_PRIMARY_TERM : "expected an assigned primary term"; @@ -875,14 +976,15 @@ private void innerRefresh(String tokenDocId, Map source, long se updateResponse.getGetResult().sourceAsMap())); final Tuple parsedTokens = parseTokensFromDocument(source, null); final UserToken toRefreshUserToken = parsedTokens.v1(); - createOAuth2Tokens(newUserTokenId, newTokenVersion, getTokensIndexForVersion(newTokenVersion), - toRefreshUserToken.getAuthentication(), clientAuth, toRefreshUserToken.getMetadata(), true, listener); + createOAuth2Tokens(newAccessTokenString, newRefreshTokenString, newTokenVersion, + getTokensIndexForVersion(newTokenVersion), toRefreshUserToken.getAuthentication(), clientAuth, + toRefreshUserToken.getMetadata(), listener); } else if (backoff.hasNext()) { logger.info("failed to update the original token document [{}], the update result was [{}]. Retrying", tokenDocId, updateResponse.getResult()); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> innerRefresh(tokenDocId, source, seqNo, primaryTerm, clientAuth, backoff, - refreshRequested, listener)); + .preserveContext(() -> innerRefresh(refreshToken, tokenDocId, source, seqNo, primaryTerm, clientAuth, + backoff, refreshRequested, listener)); client.threadPool().schedule(retryWithContextRunnable, backoff.next(), GENERIC); } else { logger.info("failed to update the original token document [{}] after all retries, the update result was [{}]. ", @@ -898,8 +1000,8 @@ private void innerRefresh(String tokenDocId, Map source, long se @Override public void onResponse(GetResponse response) { if (response.isExists()) { - innerRefresh(tokenDocId, response.getSource(), response.getSeqNo(), response.getPrimaryTerm(), - clientAuth, backoff, refreshRequested, listener); + innerRefresh(refreshToken, tokenDocId, response.getSource(), response.getSeqNo(), + response.getPrimaryTerm(), clientAuth, backoff, refreshRequested, listener); } else { logger.warn("could not find token document [{}] for refresh", tokenDocId); onFailure.accept(invalidGrantException("could not refresh the requested token")); @@ -927,8 +1029,8 @@ public void onFailure(Exception e) { if (backoff.hasNext()) { logger.debug("failed to update the original token document [{}], retrying", tokenDocId); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> innerRefresh(tokenDocId, source, seqNo, primaryTerm, clientAuth, backoff, - refreshRequested, listener)); + .preserveContext(() -> innerRefresh(refreshToken, tokenDocId, source, seqNo, primaryTerm, + clientAuth, backoff, refreshRequested, listener)); client.threadPool().schedule(retryWithContextRunnable, backoff.next(), GENERIC); } else { logger.warn("failed to update the original token document [{}], after all retries", tokenDocId); @@ -941,72 +1043,47 @@ public void onFailure(Exception e) { } } - private void getSupersedingTokenDocAsyncWithRetry(RefreshTokenStatus refreshTokenStatus, Iterator backoff, - ActionListener> listener) { - final Consumer onFailure = ex -> listener - .onFailure(traceLog("get superseding token", refreshTokenStatus.getSupersededBy(), ex)); - getSupersedingTokenDocAsync(refreshTokenStatus, new ActionListener() { - private final Consumer maybeRetryOnFailure = ex -> { - if (backoff.hasNext()) { - final TimeValue backofTimeValue = backoff.next(); - logger.debug("retrying after [" + backofTimeValue + "] back off"); - final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> getSupersedingTokenDocAsync(refreshTokenStatus, this)); - client.threadPool().schedule(retryWithContextRunnable, backofTimeValue, GENERIC); - } else { - logger.warn("back off retries exhausted"); - onFailure.accept(ex); - } - }; - - @Override - public void onResponse(GetResponse response) { - if (response.isExists()) { - logger.debug("found superseding token document [{}] in index [{}] by following the [{}] reference", response.getId(), - response.getIndex(), refreshTokenStatus.getSupersededBy()); - final Tuple parsedTokens; - try { - parsedTokens = parseTokensFromDocument(response.getSource(), null); - } catch (IllegalStateException | DateTimeException e) { - logger.error("unable to decode existing user token", e); - listener.onFailure(new ElasticsearchSecurityException("could not refresh the requested token", e)); - return; - } - listener.onResponse(parsedTokens); - } else { - // We retry this since the creation of the superseding token document might already be in flight but not - // yet completed, triggered by a refresh request that came a few milliseconds ago - logger.info("could not find superseding token document from [{}] reference, retrying", - refreshTokenStatus.getSupersededBy()); - maybeRetryOnFailure.accept(invalidGrantException("could not refresh the requested token")); - } - } - - @Override - public void onFailure(Exception e) { - if (isShardNotAvailableException(e)) { - logger.info("could not find superseding token document from reference [{}], retrying", - refreshTokenStatus.getSupersededBy()); - maybeRetryOnFailure.accept(invalidGrantException("could not refresh the requested token")); - } else { - logger.warn("could not find superseding token document from reference [{}]", refreshTokenStatus.getSupersededBy()); - onFailure.accept(invalidGrantException("could not refresh the requested token")); - } + /** + * Decrypts the values of the superseding access token and the refresh token, using a key derived from the superseded refresh token. It + * encodes the version and serializes the tokens before calling the listener, in the same manner as {@link #createOAuth2Tokens } does. + * + * @param refreshToken The refresh token that the user sent in the request, used to derive the decryption key + * @param refreshTokenStatus The {@link RefreshTokenStatus} containing information about the superseding tokens as retrieved from the + * index + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client + */ + void decryptAndReturnSupersedingTokens(String refreshToken, RefreshTokenStatus refreshTokenStatus, + ActionListener> listener) { + final byte[] iv = Base64.getDecoder().decode(refreshTokenStatus.getIv()); + final byte[] salt = Base64.getDecoder().decode(refreshTokenStatus.getSalt()); + final byte[] encryptedSupersedingTokens = Base64.getDecoder().decode(refreshTokenStatus.getSupersedingTokens()); + try { + Cipher cipher = getDecryptionCipher(iv, refreshToken, salt); + final String supersedingTokens = new String(cipher.doFinal(encryptedSupersedingTokens), StandardCharsets.UTF_8); + final String[] decryptedTokens = supersedingTokens.split("\\|"); + if (decryptedTokens.length != 2) { + logger.warn("Decrypted tokens string is not correctly formatted"); + listener.onFailure(invalidGrantException("could not refresh the requested token")); } - }); + listener.onResponse(new Tuple<>(prependVersionAndEncodeAccessToken(refreshTokenStatus.getVersion(), decryptedTokens[0]), + prependVersionAndEncodeRefreshToken(refreshTokenStatus.getVersion(), decryptedTokens[1]))); + } catch (GeneralSecurityException | IOException e) { + logger.warn("Could not get stored superseding token values", e); + listener.onFailure(invalidGrantException("could not refresh the requested token")); + } } - private void getSupersedingTokenDocAsync(RefreshTokenStatus refreshTokenStatus, ActionListener listener) { - final String supersedingDocReference = refreshTokenStatus.getSupersededBy(); - if (supersedingDocReference.startsWith(securityTokensIndex.aliasName() + "|")) { - // superseding token doc is stored on the new tokens index, irrespective of where the superseded token doc resides - final String supersedingDocId = supersedingDocReference.substring(securityTokensIndex.aliasName().length() + 1); - getTokenDocAsync(supersedingDocId, securityTokensIndex, listener); - } else { - assert false == supersedingDocReference - .contains("|") : "The superseding doc reference appears to contain an alias name but should not"; - getTokenDocAsync(supersedingDocReference, securityMainIndex, listener); - } + /* + * Encrypts the values of the superseding access token and the refresh token, using a key derived from the superseded refresh token. + * The tokens are concatenated to a string separated with `|` before encryption so that we only perform one encryption operation + * and that we only need to store one field + */ + String encryptSupersedingTokens(String supersedingAccessToken, String supersedingRefreshToken, + String refreshToken, byte[] iv, byte[] salt) throws GeneralSecurityException { + Cipher cipher = getEncryptionCipher(iv, refreshToken, salt); + final String supersedingTokens = supersedingAccessToken + "|" + supersedingRefreshToken; + return Base64.getEncoder().encodeToString(cipher.doFinal(supersedingTokens.getBytes(StandardCharsets.UTF_8))); } private void getTokenDocAsync(String tokenDocId, SecurityIndexManager tokensIndex, ActionListener listener) { @@ -1016,7 +1093,7 @@ private void getTokenDocAsync(String tokenDocId, SecurityIndexManager tokensInde () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, listener, client::get)); } - private Version getTokenVersionCompatibility() { + Version getTokenVersionCompatibility() { // newly minted tokens are compatible with the min node version in the cluster return clusterService.state().nodes().getMinNodeVersion(); } @@ -1029,13 +1106,13 @@ public static Boolean isTokenServiceEnabled(Settings settings) { * A refresh token has a fixed maximum lifetime of {@code ExpiredTokenRemover#MAXIMUM_TOKEN_LIFETIME_HOURS} hours. This checks if the * token document represents a valid token wrt this time interval. */ - private static Optional checkTokenDocumentExpired(Instant now, Map source) { - final Long creationEpochMilli = (Long) source.get("creation_time"); + private static Optional checkTokenDocumentExpired(Instant refreshRequested, Map src) { + final Long creationEpochMilli = (Long) src.get("creation_time"); if (creationEpochMilli == null) { throw new IllegalStateException("token document is missing creation time value"); } else { final Instant creationTime = Instant.ofEpochMilli(creationEpochMilli); - if (now.isAfter(creationTime.plus(ExpiredTokenRemover.MAXIMUM_TOKEN_LIFETIME_HOURS, ChronoUnit.HOURS))) { + if (refreshRequested.isAfter(creationTime.plus(ExpiredTokenRemover.MAXIMUM_TOKEN_LIFETIME_HOURS, ChronoUnit.HOURS))) { return Optional.of(invalidGrantException("token document has expired")); } else { return Optional.empty(); @@ -1048,17 +1125,17 @@ private static Optional checkTokenDocumentExpire * parsed {@code RefreshTokenStatus} together with an {@code Optional} validation exception that encapsulates the various logic about * when and by who a token can be refreshed. */ - private static Tuple> checkTokenDocumentForRefresh(Instant now, - Authentication clientAuth, Map source) throws IllegalStateException, DateTimeException { + private static Tuple> checkTokenDocumentForRefresh( + Instant refreshRequested, Authentication clientAuth, Map source) throws IllegalStateException, DateTimeException { final RefreshTokenStatus refreshTokenStatus = RefreshTokenStatus.fromSourceMap(getRefreshTokenSourceMap(source)); final UserToken userToken = UserToken.fromSourceMap(getUserTokenSourceMap(source)); refreshTokenStatus.setVersion(userToken.getVersion()); - final ElasticsearchSecurityException validationException = checkTokenDocumentExpired(now, source).orElseGet(() -> { + final ElasticsearchSecurityException validationException = checkTokenDocumentExpired(refreshRequested, source).orElseGet(() -> { if (refreshTokenStatus.isInvalidated()) { return invalidGrantException("token has been invalidated"); } else { return checkClientCanRefresh(refreshTokenStatus, clientAuth) - .orElse(checkMultipleRefreshes(now, refreshTokenStatus).orElse(null)); + .orElse(checkMultipleRefreshes(refreshRequested, refreshTokenStatus).orElse(null)); } }); return new Tuple<>(refreshTokenStatus, Optional.ofNullable(validationException)); @@ -1111,13 +1188,14 @@ private static Map getUserTokenSourceMap(Map sou * @return An {@code Optional} containing the exception in case this refresh token cannot be reused, or an empty Optional if * refreshing is allowed. */ - private static Optional checkMultipleRefreshes(Instant now, RefreshTokenStatus refreshTokenStatus) { + private static Optional checkMultipleRefreshes(Instant refreshRequested, + RefreshTokenStatus refreshTokenStatus) { if (refreshTokenStatus.isRefreshed()) { if (refreshTokenStatus.getVersion().onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { - if (now.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { + if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); } - if (now.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { + if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { return Optional .of(invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great")); } @@ -1269,7 +1347,7 @@ private void sourceIndicesWithTokensAndRun(ActionListener> listener private BytesReference createTokenDocument(UserToken userToken, @Nullable String refreshToken, @Nullable Authentication originatingClientAuth) { assert refreshToken == null || originatingClientAuth != null : "non-null refresh token " + refreshToken - + " requires non-null client authn " + originatingClientAuth; + + " requires non-null client authn " + originatingClientAuth; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); builder.field("doc_type", TOKEN_DOC_TYPE); @@ -1332,21 +1410,14 @@ private Tuple filterAndParseHit(SearchHit hit, @Nullable Pred */ private Tuple parseTokensFromDocument(Map source, @Nullable Predicate> filter) throws IllegalStateException, DateTimeException { - final String plainRefreshToken = (String) ((Map) source.get("refresh_token")).get("token"); + final String hashedRefreshToken = (String) ((Map) source.get("refresh_token")).get("token"); final Map userTokenSource = (Map) ((Map) source.get("access_token")).get("user_token"); if (null != filter && filter.test(userTokenSource) == false) { return null; } final UserToken userToken = UserToken.fromSourceMap(userTokenSource); - if (userToken.getVersion().onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - final String versionedRefreshToken = plainRefreshToken != null ? - prependVersionAndEncode(userToken.getVersion(), plainRefreshToken) : null; - return new Tuple<>(userToken, versionedRefreshToken); - } else { - // do not prepend version to refresh token as the audience node version cannot deal with it - return new Tuple<>(userToken, plainRefreshToken); - } + return new Tuple<>(userToken, hashedRefreshToken); } private static String getTokenDocumentId(UserToken userToken) { @@ -1450,7 +1521,7 @@ public TimeValue getExpirationDelay() { return expirationDelay; } - private Instant getExpirationTime() { + Instant getExpirationTime() { return clock.instant().plusSeconds(expirationDelay.getSeconds()); } @@ -1478,38 +1549,34 @@ private String getFromHeader(ThreadContext threadContext) { return null; } - /** - * Serializes a token to a String containing the minimum compatible node version for decoding it back and either an encrypted - * representation of the token id for versions earlier to {@code #VERSION_ACCESS_TOKENS_UUIDS} or the token itself for versions after - * {@code #VERSION_ACCESS_TOKENS_UUIDS} - */ - public String getAccessTokenAsString(UserToken userToken) throws IOException, GeneralSecurityException { - if (userToken.getVersion().onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { + String prependVersionAndEncodeAccessToken(Version version, String accessToken) throws IOException, GeneralSecurityException { + if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { try (ByteArrayOutputStream os = new ByteArrayOutputStream(MINIMUM_BASE64_BYTES); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(userToken.getVersion()); - Version.writeVersion(userToken.getVersion(), out); - out.writeString(userToken.getId()); + out.setVersion(version); + Version.writeVersion(version, out); + out.writeString(accessToken); return new String(os.toByteArray(), StandardCharsets.UTF_8); } } else { // we know that the minimum length is larger than the default of the ByteArrayOutputStream so set the size to this explicitly - try (ByteArrayOutputStream os = new ByteArrayOutputStream(MINIMUM_BASE64_BYTES); + try (ByteArrayOutputStream os = new ByteArrayOutputStream(LEGACY_MINIMUM_BASE64_BYTES); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(userToken.getVersion()); + out.setVersion(version); KeyAndCache keyAndCache = keyCache.activeKeyCache; - Version.writeVersion(userToken.getVersion(), out); + Version.writeVersion(version, out); out.writeByteArray(keyAndCache.getSalt().bytes); out.writeByteArray(keyAndCache.getKeyHash().bytes); - final byte[] initializationVector = getNewInitializationVector(); + final byte[] initializationVector = getRandomBytes(IV_BYTES); out.writeByteArray(initializationVector); try (CipherOutputStream encryptedOutput = - new CipherOutputStream(out, getEncryptionCipher(initializationVector, keyAndCache, userToken.getVersion())); + new CipherOutputStream(out, getEncryptionCipher(initializationVector, keyAndCache, version)); StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput)) { - encryptedStreamOutput.setVersion(userToken.getVersion()); - encryptedStreamOutput.writeString(userToken.getId()); + encryptedStreamOutput.setVersion(version); + encryptedStreamOutput.writeString(accessToken); + // StreamOutput needs to be closed explicitly because it wraps CipherOutputStream encryptedStreamOutput.close(); return new String(os.toByteArray(), StandardCharsets.UTF_8); } @@ -1517,7 +1584,7 @@ public String getAccessTokenAsString(UserToken userToken) throws IOException, Ge } } - private static String prependVersionAndEncode(Version version, String payload) { + static String prependVersionAndEncodeRefreshToken(Version version, String payload) { try (ByteArrayOutputStream os = new ByteArrayOutputStream(); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { @@ -1563,6 +1630,17 @@ Cipher getEncryptionCipher(byte[] iv, KeyAndCache keyAndCache, Version version) return cipher; } + /** + * Initialize the encryption cipher using the provided password to derive the encryption key. + */ + Cipher getEncryptionCipher(byte[] iv, String password, byte[] salt) throws GeneralSecurityException { + SecretKey key = computeSecretKey(password.toCharArray(), salt, TOKENS_ENCRYPTION_KEY_ITERATIONS); + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + cipher.init(Cipher.ENCRYPT_MODE, key, new GCMParameterSpec(128, iv), secureRandom); + cipher.updateAAD(salt); + return cipher; + } + private void getKeyAsync(BytesKey decodedSalt, KeyAndCache keyAndCache, ActionListener listener) { final SecretKey decodeKey = keyAndCache.getKey(decodedSalt); if (decodeKey != null) { @@ -1595,21 +1673,31 @@ private Cipher getDecryptionCipher(byte[] iv, SecretKey key, Version version, By return cipher; } - // Package private for testing - byte[] getNewInitializationVector() { - final byte[] initializationVector = new byte[IV_BYTES]; - secureRandom.nextBytes(initializationVector); - return initializationVector; + /** + * Initialize the decryption cipher using the provided password to derive the decryption key. + */ + private Cipher getDecryptionCipher(byte[] iv, String password, byte[] salt) throws GeneralSecurityException { + SecretKey key = computeSecretKey(password.toCharArray(), salt, TOKENS_ENCRYPTION_KEY_ITERATIONS); + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + cipher.init(Cipher.DECRYPT_MODE, key, new GCMParameterSpec(128, iv), secureRandom); + cipher.updateAAD(salt); + return cipher; + } + + byte[] getRandomBytes(int length) { + final byte[] bytes = new byte[length]; + secureRandom.nextBytes(bytes); + return bytes; } /** * Generates a secret key based off of the provided password and salt. - * This method is computationally expensive. + * This method can be computationally expensive. */ - static SecretKey computeSecretKey(char[] rawPassword, byte[] salt) + static SecretKey computeSecretKey(char[] rawPassword, byte[] salt, int iterations) throws NoSuchAlgorithmException, InvalidKeySpecException { SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(KDF_ALGORITHM); - PBEKeySpec keySpec = new PBEKeySpec(rawPassword, salt, ITERATIONS, 128); + PBEKeySpec keySpec = new PBEKeySpec(rawPassword, salt, iterations, 128); SecretKey tmp = secretKeyFactory.generateSecret(keySpec); return new SecretKeySpec(tmp.getEncoded(), "AES"); } @@ -2003,7 +2091,7 @@ private KeyAndCache(KeyAndTimestamp keyAndTimestamp, BytesKey salt) { .setMaximumWeight(500L) .build(); try { - SecretKey secretKey = computeSecretKey(keyAndTimestamp.getKey().getChars(), salt.bytes); + SecretKey secretKey = computeSecretKey(keyAndTimestamp.getKey().getChars(), salt.bytes, TOKEN_SERVICE_KEY_ITERATIONS); keyCache.put(salt, secretKey); } catch (Exception e) { throw new IllegalStateException(e); @@ -2019,7 +2107,7 @@ private SecretKey getKey(BytesKey salt) { public SecretKey getOrComputeKey(BytesKey decodedSalt) throws ExecutionException { return keyCache.computeIfAbsent(decodedSalt, (salt) -> { try (SecureString closeableChars = keyAndTimestamp.getKey().clone()) { - return computeSecretKey(closeableChars.getChars(), salt.bytes); + return computeSecretKey(closeableChars.getChars(), salt.bytes, TOKEN_SERVICE_KEY_ITERATIONS); } }); } @@ -2074,24 +2162,32 @@ KeyAndCache get(BytesKey passphraseHash) { /** * Contains metadata associated with the refresh token that is used for validity checks, but does not contain the proper token string. */ - private static final class RefreshTokenStatus { + static final class RefreshTokenStatus { private final boolean invalidated; private final String associatedUser; private final String associatedRealm; private final boolean refreshed; @Nullable private final Instant refreshInstant; - @Nullable private final String supersededBy; + @Nullable + private final String supersedingTokens; + @Nullable + private final String iv; + @Nullable + private final String salt; private Version version; - private RefreshTokenStatus(boolean invalidated, String associatedUser, String associatedRealm, boolean refreshed, - Instant refreshInstant, String supersededBy) { + // pkg-private for testing + RefreshTokenStatus(boolean invalidated, String associatedUser, String associatedRealm, boolean refreshed, Instant refreshInstant, + String supersedingTokens, String iv, String salt) { this.invalidated = invalidated; this.associatedUser = associatedUser; this.associatedRealm = associatedRealm; this.refreshed = refreshed; this.refreshInstant = refreshInstant; - this.supersededBy = supersededBy; + this.supersedingTokens = supersedingTokens; + this.iv = iv; + this.salt = salt; } boolean isInvalidated() { @@ -2114,8 +2210,19 @@ boolean isRefreshed() { return refreshInstant; } - @Nullable String getSupersededBy() { - return supersededBy; + @Nullable + String getSupersedingTokens() { + return supersedingTokens; + } + + @Nullable + String getIv() { + return iv; + } + + @Nullable + String getSalt() { + return salt; } Version getVersion() { @@ -2149,8 +2256,11 @@ static RefreshTokenStatus fromSourceMap(Map refreshTokenSource) } final Long refreshEpochMilli = (Long) refreshTokenSource.get("refresh_time"); final Instant refreshInstant = refreshEpochMilli == null ? null : Instant.ofEpochMilli(refreshEpochMilli); - final String supersededBy = (String) refreshTokenSource.get("superseded_by"); - return new RefreshTokenStatus(invalidated, associatedUser, associatedRealm, refreshed, refreshInstant, supersededBy); + final String supersedingTokens = (String) refreshTokenSource.get("superseding.encrypted_tokens"); + final String iv = (String) refreshTokenSource.get("superseding.encryption_iv"); + final String salt = (String) refreshTokenSource.get("superseding.encryption_salt"); + return new RefreshTokenStatus(invalidated, associatedUser, associatedRealm, refreshed, refreshInstant, supersedingTokens, + iv, salt); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java index 2bcf0849084bc..f46aa42a24450 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java @@ -50,7 +50,7 @@ public final class UserToken implements Writeable, ToXContentObject { /** * Create a new token with an autogenerated id */ - UserToken(Version version, Authentication authentication, Instant expirationTime, Map metadata) { + private UserToken(Version version, Authentication authentication, Instant expirationTime, Map metadata) { this(UUIDs.randomBase64UUID(), version, authentication, expirationTime, metadata); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java deleted file mode 100644 index 6368f4a7510c9..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.authc.esnative; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.Appender; -import org.apache.logging.log4j.core.LogEvent; -import org.apache.logging.log4j.core.LoggerContext; -import org.apache.logging.log4j.core.appender.AbstractAppender; -import org.apache.logging.log4j.core.config.Configuration; -import org.apache.logging.log4j.core.config.LoggerConfig; -import org.apache.logging.log4j.core.layout.PatternLayout; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; -import org.elasticsearch.cli.LoggingAwareMultiCommand; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.Terminal.Verbosity; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.core.common.socket.SocketAccess; -import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.ssl.SSLConfiguration; -import org.elasticsearch.xpack.security.authz.store.FileRolesStore; -import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.authc.file.FileUserPasswdStore; -import org.elasticsearch.xpack.security.authc.file.FileUserRolesStore; - -import javax.net.ssl.HttpsURLConnection; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.HttpURLConnection; -import java.net.URI; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; - -/** - * This is the command-line tool used for migrating users and roles from the file-based realm into the new native realm using the API for - * import. It reads from the files and tries its best to add the users, showing an error if it was incapable of importing them. Any existing - * users or roles are skipped. - */ -public class ESNativeRealmMigrateTool extends LoggingAwareMultiCommand { - - public static void main(String[] args) throws Exception { - exit(new ESNativeRealmMigrateTool().main(args, Terminal.DEFAULT)); - } - - public ESNativeRealmMigrateTool() { - super("Imports file-based users and roles to the native security realm"); - subcommands.put("native", newMigrateUserOrRoles()); - } - - protected MigrateUserOrRoles newMigrateUserOrRoles() { - return new MigrateUserOrRoles(); - } - - /** - * Command to migrate users and roles to the native realm - */ - public static class MigrateUserOrRoles extends EnvironmentAwareCommand { - - private final OptionSpec username; - private final OptionSpec password; - private final OptionSpec url; - private final OptionSpec usersToMigrateCsv; - private final OptionSpec rolesToMigrateCsv; - - public MigrateUserOrRoles() { - super("Migrates users or roles from file to native realm"); - this.username = parser.acceptsAll(Arrays.asList("u", "username"), - "User used to authenticate with Elasticsearch") - .withRequiredArg().required(); - this.password = parser.acceptsAll(Arrays.asList("p", "password"), - "Password used to authenticate with Elasticsearch") - .withRequiredArg().required(); - this.url = parser.acceptsAll(Arrays.asList("U", "url"), - "URL of Elasticsearch host") - .withRequiredArg(); - this.usersToMigrateCsv = parser.acceptsAll(Arrays.asList("n", "users"), - "Users to migrate from file to native realm") - .withRequiredArg(); - this.rolesToMigrateCsv = parser.acceptsAll(Arrays.asList("r", "roles"), - "Roles to migrate from file to native realm") - .withRequiredArg(); - } - - // Visible for testing - public OptionParser getParser() { - return this.parser; - } - - @Override - protected void printAdditionalHelp(Terminal terminal) { - terminal.println("This tool migrates file based users[1] and roles[2] to the native realm in"); - terminal.println("elasticsearch, saving the administrator from needing to manually transition"); - terminal.println("them from the file."); - } - - // Visible for testing - @Override - public void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - terminal.println("starting migration of users and roles..."); - importUsers(terminal, env, options); - importRoles(terminal, env, options); - terminal.println("users and roles imported."); - } - - @SuppressForbidden(reason = "We call connect in doPrivileged and provide SocketPermission") - private String postURL(Settings settings, Environment env, String method, String urlString, - OptionSet options, @Nullable String bodyString) throws Exception { - URI uri = new URI(urlString); - URL url = uri.toURL(); - HttpURLConnection conn; - // If using SSL, need a custom service because it's likely a self-signed certificate - if ("https".equalsIgnoreCase(uri.getScheme())) { - final SSLService sslService = new SSLService(settings, env); - final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration("xpack.security.http.ssl"); - final HttpsURLConnection httpsConn = (HttpsURLConnection) url.openConnection(); - AccessController.doPrivileged((PrivilegedAction) () -> { - // Requires permission java.lang.RuntimePermission "setFactory"; - httpsConn.setSSLSocketFactory(sslService.sslSocketFactory(sslConfiguration)); - return null; - }); - conn = httpsConn; - } else { - conn = (HttpURLConnection) url.openConnection(); - } - conn.setRequestMethod(method); - conn.setReadTimeout(30 * 1000); // 30 second timeout - // Add basic-auth header - conn.setRequestProperty("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(username.value(options), - new SecureString(password.value(options).toCharArray()))); - conn.setRequestProperty("Content-Type", XContentType.JSON.mediaType()); - conn.setDoOutput(true); // we'll be sending a body - SocketAccess.doPrivileged(conn::connect); - if (bodyString != null) { - try (OutputStream out = conn.getOutputStream()) { - out.write(bodyString.getBytes(StandardCharsets.UTF_8)); - } catch (Exception e) { - try { - conn.disconnect(); - } catch (Exception e2) { - // Ignore exceptions if we weren't able to close the connection after an error - } - throw e; - } - } - try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { - StringBuilder sb = new StringBuilder(); - String line = null; - while ((line = reader.readLine()) != null) { - sb.append(line); - } - return sb.toString(); - } catch (IOException e) { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getErrorStream(), StandardCharsets.UTF_8))) { - StringBuilder sb = new StringBuilder(); - String line = null; - while ((line = reader.readLine()) != null) { - sb.append(line); - } - throw new IOException(sb.toString(), e); - } - } finally { - conn.disconnect(); - } - } - - Set getUsersThatExist(Terminal terminal, Settings settings, Environment env, OptionSet options) throws Exception { - Set existingUsers = new HashSet<>(); - String allUsersJson = postURL(settings, env, "GET", this.url.value(options) + "/_security/user/", options, null); - // EMPTY is safe here because we never use namedObject - try (XContentParser parser = JsonXContent.jsonXContent - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, allUsersJson)) { - XContentParser.Token token = parser.nextToken(); - String userName; - if (token == XContentParser.Token.START_OBJECT) { - while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) { - userName = parser.currentName(); - existingUsers.add(userName); - parser.nextToken(); - parser.skipChildren(); - } - } else { - throw new ElasticsearchException("failed to retrieve users, expecting an object but got: " + token); - } - } - terminal.println("found existing users: " + existingUsers); - return existingUsers; - } - - static String createUserJson(String[] roles, char[] password) throws IOException { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - { - builder.field("password_hash", new String(password)); - builder.startArray("roles"); - for (String role : roles) { - builder.value(role); - } - builder.endArray(); - } - builder.endObject(); - return Strings.toString(builder); - } - - void importUsers(Terminal terminal, Environment env, OptionSet options) throws FileNotFoundException { - String usersCsv = usersToMigrateCsv.value(options); - String[] usersToMigrate = (usersCsv != null) ? usersCsv.split(",") : Strings.EMPTY_ARRAY; - Path usersFile = FileUserPasswdStore.resolveFile(env); - Path usersRolesFile = FileUserRolesStore.resolveFile(env); - if (Files.exists(usersFile) == false) { - throw new FileNotFoundException("users file [" + usersFile + "] does not exist"); - } else if (Files.exists(usersRolesFile) == false) { - throw new FileNotFoundException("users_roles file [" + usersRolesFile + "] does not exist"); - } - - terminal.println("importing users from [" + usersFile + "]..."); - final Logger logger = getTerminalLogger(terminal); - Map userToHashedPW = FileUserPasswdStore.parseFile(usersFile, logger, env.settings()); - Map userToRoles = FileUserRolesStore.parseFile(usersRolesFile, logger); - Set existingUsers; - try { - existingUsers = getUsersThatExist(terminal, env.settings(), env, options); - } catch (Exception e) { - throw new ElasticsearchException("failed to get users that already exist, skipping user import", e); - } - if (usersToMigrate.length == 0) { - usersToMigrate = userToHashedPW.keySet().toArray(new String[userToHashedPW.size()]); - } - for (String user : usersToMigrate) { - if (userToHashedPW.containsKey(user) == false) { - terminal.println("user [" + user + "] was not found in files, skipping"); - continue; - } else if (existingUsers.contains(user)) { - terminal.println("user [" + user + "] already exists, skipping"); - continue; - } - terminal.println("migrating user [" + user + "]"); - String reqBody = "n/a"; - try { - reqBody = createUserJson(userToRoles.get(user), userToHashedPW.get(user)); - String resp = postURL(env.settings(), env, "POST", - this.url.value(options) + "/_security/user/" + user, options, reqBody); - terminal.println(resp); - } catch (Exception e) { - throw new ElasticsearchException("failed to migrate user [" + user + "] with body: " + reqBody, e); - } - } - } - - Set getRolesThatExist(Terminal terminal, Settings settings, Environment env, OptionSet options) throws Exception { - Set existingRoles = new HashSet<>(); - String allRolesJson = postURL(settings, env, "GET", this.url.value(options) + "/_security/role/", options, null); - // EMPTY is safe here because we never use namedObject - try (XContentParser parser = JsonXContent.jsonXContent - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, allRolesJson)) { - XContentParser.Token token = parser.nextToken(); - String roleName; - if (token == XContentParser.Token.START_OBJECT) { - while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) { - roleName = parser.currentName(); - existingRoles.add(roleName); - parser.nextToken(); - parser.skipChildren(); - } - } else { - throw new ElasticsearchException("failed to retrieve roles, expecting an object but got: " + token); - } - } - terminal.println("found existing roles: " + existingRoles); - return existingRoles; - } - - static String createRoleJson(RoleDescriptor rd) throws IOException { - XContentBuilder builder = jsonBuilder(); - rd.toXContent(builder, ToXContent.EMPTY_PARAMS, true); - return Strings.toString(builder); - } - - void importRoles(Terminal terminal, Environment env, OptionSet options) throws FileNotFoundException { - String rolesCsv = rolesToMigrateCsv.value(options); - String[] rolesToMigrate = (rolesCsv != null) ? rolesCsv.split(",") : Strings.EMPTY_ARRAY; - Path rolesFile = FileRolesStore.resolveFile(env).toAbsolutePath(); - if (Files.exists(rolesFile) == false) { - throw new FileNotFoundException("roles.yml file [" + rolesFile + "] does not exist"); - } - terminal.println("importing roles from [" + rolesFile + "]..."); - Logger logger = getTerminalLogger(terminal); - Map roles = FileRolesStore.parseRoleDescriptors(rolesFile, logger, true, Settings.EMPTY); - Set existingRoles; - try { - existingRoles = getRolesThatExist(terminal, env.settings(), env, options); - } catch (Exception e) { - throw new ElasticsearchException("failed to get roles that already exist, skipping role import", e); - } - if (rolesToMigrate.length == 0) { - rolesToMigrate = roles.keySet().toArray(new String[roles.size()]); - } - for (String roleName : rolesToMigrate) { - if (roles.containsKey(roleName) == false) { - terminal.println("no role [" + roleName + "] found, skipping"); - continue; - } else if (existingRoles.contains(roleName)) { - terminal.println("role [" + roleName + "] already exists, skipping"); - continue; - } - terminal.println("migrating role [" + roleName + "]"); - String reqBody = "n/a"; - try { - reqBody = createRoleJson(roles.get(roleName)); - String resp = postURL(env.settings(), env, "POST", - this.url.value(options) + "/_security/role/" + roleName, options, reqBody); - terminal.println(resp); - } catch (Exception e) { - throw new ElasticsearchException("failed to migrate role [" + roleName + "] with body: " + reqBody, e); - } - } - } - } - - /** - * Creates a new Logger that is detached from the ROOT logger and only has an appender that will output log messages to the terminal - */ - static Logger getTerminalLogger(final Terminal terminal) { - final Logger logger = LogManager.getLogger(ESNativeRealmMigrateTool.class); - Loggers.setLevel(logger, Level.ALL); - - final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); - final Configuration config = ctx.getConfiguration(); - - // create appender - final Appender appender = new AbstractAppender(ESNativeRealmMigrateTool.class.getName(), null, - PatternLayout.newBuilder() - // Specify the configuration so log4j doesn't re-initialize - .withConfiguration(config) - .withPattern("%m") - .build()) { - @Override - public void append(LogEvent event) { - switch (event.getLevel().getStandardLevel()) { - case FATAL: - case ERROR: - terminal.println(Verbosity.NORMAL, event.getMessage().getFormattedMessage()); - break; - case OFF: - break; - default: - terminal.println(Verbosity.VERBOSE, event.getMessage().getFormattedMessage()); - break; - } - } - }; - appender.start(); - - // get the config, detach from parent, remove appenders, add custom appender - final LoggerConfig loggerConfig = config.getLoggerConfig(ESNativeRealmMigrateTool.class.getName()); - loggerConfig.setParent(null); - loggerConfig.getAppenders().forEach((s, a) -> Loggers.removeAppender(logger, a)); - Loggers.addAppender(logger, appender); - return logger; - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 9fc75d0e3385c..7e498efa4df2e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -241,7 +241,7 @@ private Version getDefinedVersion(String username) { case RemoteMonitoringUser.NAME: return RemoteMonitoringUser.DEFINED_SINCE; default: - return Version.V_6_0_0; + return Version.CURRENT.minimumIndexCompatibilityVersion(); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 1b6da7f68ca4e..bb98dddbe1ddf 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -329,7 +330,12 @@ public void onSecurityIndexStateChange(SecurityIndexManager.State previousState, } private void refreshRealms(ActionListener listener, Result result) { - String[] realmNames = this.realmsToRefresh.toArray(new String[realmsToRefresh.size()]); + if (realmsToRefresh.isEmpty()) { + listener.onResponse(result); + return; + } + + final String[] realmNames = this.realmsToRefresh.toArray(Strings.EMPTY_ARRAY); final SecurityClient securityClient = new SecurityClient(client); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, securityClient.prepareClearRealmCache().realms(realmNames).request(), @@ -340,7 +346,7 @@ private void refreshRealms(ActionListener listener, Result resu listener.onResponse(result); }, ex -> { - logger.warn("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)); + logger.warn(new ParameterizedMessage("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)), ex); listener.onFailure(ex); }), securityClient::clearRealmCache); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java index 2ceb14a172fe4..384401edaf510 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -6,12 +6,14 @@ package org.elasticsearch.integration; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.client.Request; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -139,7 +141,6 @@ public void testThatClusterPrivilegesWorkAsExpectedViaHttp() throws Exception { assertAccessIsDenied("user_d", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/38030") public void testThatSnapshotAndRestore() throws Exception { String repoJson = Strings.toString(jsonBuilder().startObject().field("type", "fs").startObject("settings").field("location", repositoryLocation.toString()).endObject().endObject()); @@ -203,6 +204,11 @@ private void waitForSnapshotToFinish(String repo, String snapshot) throws Except assertBusy(() -> { SnapshotsStatusResponse response = client().admin().cluster().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); + // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for + // it to disappear from the cluster state as well + SnapshotsInProgress snapshotsInProgress = + client().admin().cluster().state(new ClusterStateRequest()).get().getState().custom(SnapshotsInProgress.TYPE); + assertThat(snapshotsInProgress.entries(), Matchers.empty()); }); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index 02b9cf61e4a39..c115aac11d732 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -19,11 +19,8 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.node.MockNode; @@ -36,14 +33,8 @@ import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.junit.After; import org.junit.Before; @@ -60,7 +51,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; @@ -156,7 +146,7 @@ public void testEnableDisableBehaviour() throws Exception { assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); refresh(); - final Client client = internalCluster().transportClient(); + final Client client = internalCluster().client(); disableLicensing(); @@ -216,57 +206,6 @@ public void testRestAuthenticationByLicenseType() throws Exception { assertThat(authorizedAuthenticateResponse.getStatusLine().getStatusCode(), is(200)); } - public void testSecurityActionsByLicenseType() throws Exception { - // security actions should not work! - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - new SecurityClient(client).preparePutUser("john", "password".toCharArray(), Hasher.BCRYPT).get(); - fail("security actions should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - } - - // enable a license that enables security - License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, - License.OperationMode.PLATINUM, License.OperationMode.STANDARD, OperationMode.BASIC); - enableLicensing(mode); - // security actions should work! - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PutUserResponse response = new SecurityClient(client).preparePutUser("john", "password".toCharArray(), Hasher.BCRYPT).get(); - assertNotNull(response); - } - } - - public void testTransportClientAuthenticationByLicenseType() throws Exception { - Settings.Builder builder = Settings.builder() - .put(internalCluster().transportClient().settings()); - // remove user info - builder.remove(SecurityField.USER_SETTING.getKey()); - builder.remove(ThreadContext.PREFIX + "." + UsernamePasswordToken.BASIC_AUTH_HEADER); - - // basic has no auth - try (TransportClient client = new TestXPackTransportClient(builder.build(), LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - assertGreenClusterState(client); - } - - // enable a license that enables security - License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, - License.OperationMode.PLATINUM, License.OperationMode.STANDARD); - enableLicensing(mode); - - try (TransportClient client = new TestXPackTransportClient(builder.build(), LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - client.admin().cluster().prepareHealth().get(); - fail("should not have been able to connect to a node!"); - } catch (NoNodeAvailableException e) { - // expected - } - } - public void testNodeJoinWithoutSecurityExplicitlyEnabled() throws Exception { License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.PLATINUM, License.OperationMode.STANDARD); enableLicensing(mode); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 02db3a1e11a46..8e6e00f32a90e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -54,7 +54,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -67,8 +66,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -253,17 +252,45 @@ public void testTLSJoinValidator() throws Exception { int numIters = randomIntBetween(1, 10); for (int i = 0; i < numIters; i++) { boolean tlsOn = randomBoolean(); + boolean securityExplicitlyEnabled = randomBoolean(); String discoveryType = randomFrom("single-node", ZEN2_DISCOVERY_TYPE, randomAlphaOfLength(4)); - Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType); + + final Settings settings; + if (securityExplicitlyEnabled) { + settings = Settings.builder().put("xpack.security.enabled", true).build(); + } else { + settings = Settings.EMPTY; + } + Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType, settings); MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + License.OperationMode licenseMode = randomFrom(License.OperationMode.values()); + License license = TestUtils.generateSignedLicense(licenseMode.description(), TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); - EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - if (productionModes.contains(license.operationMode()) && tlsOn == false && "single-node".equals(discoveryType) == false) { + + final boolean expectFailure; + switch (licenseMode) { + case PLATINUM: + case GOLD: + case STANDARD: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false; + break; + case BASIC: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false && securityExplicitlyEnabled; + break; + case MISSING: + case TRIAL: + expectFailure = false; + break; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } + logger.info("Test TLS join; Lic:{} TLS:{} Disco:{} Settings:{} ; Expect Failure: {}", + licenseMode, tlsOn, discoveryType, settings.toDelimitedString(','), expectFailure); + if (expectFailure) { IllegalStateException ise = expectThrows(IllegalStateException.class, () -> validator.accept(node, state)); - assertEquals("TLS setup is required for license type [" + license.operationMode().name() + "]", ise.getMessage()); + assertEquals("Transport TLS ([xpack.security.transport.ssl.enabled]) is required for license type [" + + license.operationMode().description() + "] when security is enabled", ise.getMessage()); } else { validator.accept(node, state); } @@ -295,12 +322,15 @@ public void testIndexJoinValidator_Old_And_Rolling() throws Exception { createComponents(Settings.EMPTY); BiConsumer joinValidator = security.getJoinValidator(); assertNotNull(joinValidator); + Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_7_0_0)); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_MAIN_ALIAS) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_MAIN_INDEX_FORMAT - 1)) + .settings(settings(version) + .put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_MAIN_INDEX_FORMAT - 1)) .numberOfShards(1).numberOfReplicas(0) .build(); - DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), version); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .nodes(discoveryNodes) @@ -318,7 +348,7 @@ public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); int indexFormat = randomBoolean() ? INTERNAL_MAIN_INDEX_FORMAT : INTERNAL_MAIN_INDEX_FORMAT - 1; IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_MAIN_ALIAS) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) + .settings(settings(VersionUtils.randomIndexCompatibleVersion(random())).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) .numberOfShards(1).numberOfReplicas(0) .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.CURRENT); @@ -333,7 +363,7 @@ public void testIndexUpgradeValidatorWithUpToDateIndex() throws Exception { createComponents(Settings.EMPTY); BiConsumer joinValidator = security.getJoinValidator(); assertNotNull(joinValidator); - Version version = randomBoolean() ? Version.CURRENT : Version.V_6_1_0; + Version version = VersionUtils.randomIndexCompatibleVersion(random()); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_MAIN_ALIAS) .settings(settings(version).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_MAIN_INDEX_FORMAT)) @@ -352,7 +382,8 @@ public void testIndexUpgradeValidatorWithMissingIndex() throws Exception { BiConsumer joinValidator = security.getJoinValidator(); assertNotNull(joinValidator); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); - DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), + VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .nodes(discoveryNodes).build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java index 78d6e22ac3645..80d7b4b4d00ad 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -77,7 +77,7 @@ public void init() throws Exception { ClusterState state = mock(ClusterState.class); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("id1", buildNewFakeTransportAddress(), Version.CURRENT)) - .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_6_0_0)) + .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.CURRENT.minimumCompatibilityVersion())) .build(); when(state.nodes()).thenReturn(nodes); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java index 69cedf6389f7f..0ab3c96167c2c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -47,7 +48,6 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectTestCase; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; @@ -195,20 +195,21 @@ public void testLogoutInvalidatesTokens() throws Exception { final JWT signedIdToken = generateIdToken(subject, randomAlphaOfLength(8), randomAlphaOfLength(8)); final User user = new User("oidc-user", new String[]{"superuser"}, null, null, null, true); final Authentication.RealmRef realmRef = new Authentication.RealmRef(oidcRealm.name(), OpenIdConnectRealmSettings.TYPE, "node01"); - final Authentication authentication = new Authentication(user, realmRef, null); - final Map tokenMetadata = new HashMap<>(); tokenMetadata.put("id_token_hint", signedIdToken.serialize()); tokenMetadata.put("oidc_realm", REALM_NAME); + final Authentication authentication = new Authentication(user, realmRef, null, null, Authentication.AuthenticationType.REALM, + tokenMetadata); - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, tokenMetadata, true, future); - final UserToken userToken = future.actionGet().v1(); - mockGetTokenFromId(userToken, false, client); - final String tokenString = tokenService.getAccessTokenAsString(userToken); + final PlainActionFuture> future = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, tokenMetadata, future); + final String accessToken = future.actionGet().v1(); + mockGetTokenFromId(tokenService, userTokenId, authentication, false, client); final OpenIdConnectLogoutRequest request = new OpenIdConnectLogoutRequest(); - request.setToken(tokenString); + request.setToken(accessToken); final PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request, listener); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 3f4ac8942089c..6a9c487bf2013 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -66,7 +67,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.saml.SamlLogoutRequestHandler; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; import org.elasticsearch.xpack.security.authc.saml.SamlRealm; @@ -252,9 +252,14 @@ public void cleanup() { } public void testInvalidateCorrectTokensFromLogoutRequest() throws Exception { + final String userTokenId1 = UUIDs.randomBase64UUID(); + final String refreshToken1 = UUIDs.randomBase64UUID(); + final String userTokenId2 = UUIDs.randomBase64UUID(); + final String refreshToken2 = UUIDs.randomBase64UUID(); storeToken(logoutRequest.getNameId(), randomAlphaOfLength(10)); - final Tuple tokenToInvalidate1 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); - final Tuple tokenToInvalidate2 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); + final Tuple tokenToInvalidate1 = storeToken(userTokenId1, refreshToken1, logoutRequest.getNameId(), + logoutRequest.getSession()); + storeToken(userTokenId2, refreshToken2, logoutRequest.getNameId(), logoutRequest.getSession()); storeToken(new SamlNameId(NameID.PERSISTENT, randomAlphaOfLength(16), null, null, null), logoutRequest.getSession()); assertThat(indexRequests.size(), equalTo(4)); @@ -316,27 +321,27 @@ public void testInvalidateCorrectTokensFromLogoutRequest() throws Exception { assertThat(filter1.get(1), instanceOf(TermQueryBuilder.class)); assertThat(((TermQueryBuilder) filter1.get(1)).fieldName(), equalTo("refresh_token.token")); assertThat(((TermQueryBuilder) filter1.get(1)).value(), - equalTo(TokenService.unpackVersionAndPayload(tokenToInvalidate1.v2()).v2())); + equalTo(TokenService.hashTokenString(TokenService.unpackVersionAndPayload(tokenToInvalidate1.v2()).v2()))); assertThat(bulkRequests.size(), equalTo(4)); // 4 updates (refresh-token + access-token) // Invalidate refresh token 1 assertThat(bulkRequests.get(0).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(0).requests().get(0).id(), equalTo("token_" + tokenToInvalidate1.v1().getId())); + assertThat(bulkRequests.get(0).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId1))); UpdateRequest updateRequest1 = (UpdateRequest) bulkRequests.get(0).requests().get(0); assertThat(updateRequest1.toString().contains("refresh_token"), equalTo(true)); // Invalidate access token 1 assertThat(bulkRequests.get(1).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(1).requests().get(0).id(), equalTo("token_" + tokenToInvalidate1.v1().getId())); + assertThat(bulkRequests.get(1).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId1))); UpdateRequest updateRequest2 = (UpdateRequest) bulkRequests.get(1).requests().get(0); assertThat(updateRequest2.toString().contains("access_token"), equalTo(true)); // Invalidate refresh token 2 assertThat(bulkRequests.get(2).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(2).requests().get(0).id(), equalTo("token_" + tokenToInvalidate2.v1().getId())); + assertThat(bulkRequests.get(2).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId2))); UpdateRequest updateRequest3 = (UpdateRequest) bulkRequests.get(2).requests().get(0); assertThat(updateRequest3.toString().contains("refresh_token"), equalTo(true)); // Invalidate access token 2 assertThat(bulkRequests.get(3).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(3).requests().get(0).id(), equalTo("token_" + tokenToInvalidate2.v1().getId())); + assertThat(bulkRequests.get(3).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId2))); UpdateRequest updateRequest4 = (UpdateRequest) bulkRequests.get(3).requests().get(0); assertThat(updateRequest4.toString().contains("access_token"), equalTo(true)); } @@ -359,13 +364,19 @@ private Function findTokenByRefreshToken(SearchHit[] }; } - private Tuple storeToken(SamlNameId nameId, String session) throws IOException { + private Tuple storeToken(String userTokenId, String refreshToken, SamlNameId nameId, String session) { Authentication authentication = new Authentication(new User("bob"), new RealmRef("native", NativeRealmSettings.TYPE, "node01"), null); final Map metadata = samlRealm.createTokenMetadata(nameId, session); - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, metadata, true, future); + final PlainActionFuture> future = new PlainActionFuture<>(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, metadata, future); return future.actionGet(); } + private Tuple storeToken(SamlNameId nameId, String session) { + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + return storeToken(userTokenId, refreshToken, nameId, session); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 1652122bf6e80..9b9dc79a29cd4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -55,7 +56,6 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; import org.elasticsearch.xpack.security.authc.saml.SamlRealm; import org.elasticsearch.xpack.security.authc.saml.SamlRealmTests; @@ -236,19 +236,21 @@ public void testLogoutInvalidatesToken() throws Exception { .map(); final User user = new User("punisher", new String[]{"superuser"}, null, null, userMetaData, true); final Authentication.RealmRef realmRef = new Authentication.RealmRef(samlRealm.name(), SamlRealmSettings.TYPE, "node01"); - final Authentication authentication = new Authentication(user, realmRef, null); - final Map tokenMetaData = samlRealm.createTokenMetadata( - new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); + new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); + final Authentication authentication = new Authentication(user, realmRef, null, null, Authentication.AuthenticationType.REALM, + tokenMetaData); + - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, tokenMetaData, true, future); - final UserToken userToken = future.actionGet().v1(); - mockGetTokenFromId(userToken, false, client); - final String tokenString = tokenService.getAccessTokenAsString(userToken); + final PlainActionFuture> future = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, tokenMetaData, future); + final String accessToken = future.actionGet().v1(); + mockGetTokenFromId(tokenService, userTokenId, authentication, false, client); final SamlLogoutRequest request = new SamlLogoutRequest(); - request.setToken(tokenString); + request.setToken(accessToken); final PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request, listener); final SamlLogoutResponse response = listener.get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index c7994888a2631..67ce5ce2b27af 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -1108,14 +1108,16 @@ public void testAuthenticateWithToken() throws Exception { User user = new User("_username", "r1"); final AtomicBoolean completed = new AtomicBoolean(false); final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createOAuth2Tokens(expected, originatingAuth, Collections.emptyMap(), true, tokenFuture); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, expected, originatingAuth, Collections.emptyMap(), tokenFuture); } - String token = tokenService.getAccessTokenAsString(tokenFuture.get().v1()); + String token = tokenFuture.get().v1(); when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); - mockGetTokenFromId(tokenFuture.get().v1(), false, client); + mockGetTokenFromId(tokenService, userTokenId, expected, false, client); when(securityIndex.isAvailable()).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { @@ -1191,13 +1193,15 @@ public void testExpiredToken() throws Exception { when(securityIndex.indexExists()).thenReturn(true); User user = new User("_username", "r1"); final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createOAuth2Tokens(expected, originatingAuth, Collections.emptyMap(), true, tokenFuture); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, expected, originatingAuth, Collections.emptyMap(), tokenFuture); } - String token = tokenService.getAccessTokenAsString(tokenFuture.get().v1()); - mockGetTokenFromId(tokenFuture.get().v1(), true, client); + String token = tokenFuture.get().v1(); + mockGetTokenFromId(tokenService, userTokenId, expected, true, client); doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArguments()[1]).run(); return null; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java index 6d5c6770bf2f5..2ce089f385896 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java @@ -5,35 +5,16 @@ */ package org.elasticsearch.xpack.security.authc; -import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; -import org.elasticsearch.xpack.security.LocalStateSecurity; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.BeforeClass; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; public class RunAsIntegTests extends SecurityIntegTestCase { @@ -86,43 +67,6 @@ protected boolean transportSSLEnabled() { return false; } - public void testUserImpersonation() throws Exception { - try (TransportClient client = getTransportClient(Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" + - SecuritySettingsSourceField.TEST_PASSWORD).build())) { - //ensure the client can connect - assertBusy(() -> assertThat(client.connectedNodes().size(), greaterThan(0))); - - // make sure the client can't get health - try { - client.admin().cluster().prepareHealth().get(); - fail("the client user should not have privileges to get the health"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unauthorized")); - } - - // let's run as without authorization - try { - Map headers = Collections.singletonMap(AuthenticationServiceField.RUN_AS_USER_HEADER, - SecuritySettingsSource.TEST_USER_NAME); - client.filterWithHeader(headers) - .admin().cluster().prepareHealth().get(); - fail("run as should be unauthorized for the transport client user"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unauthorized")); - assertThat(e.getMessage(), containsString("run as")); - } - - Map headers = new HashMap<>(); - headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, - new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); - headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, SecuritySettingsSource.TEST_USER_NAME); - // lets set the user - ClusterHealthResponse response = client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); - assertThat(response.isTimedOut(), is(false)); - } - } - public void testUserImpersonationUsingHttp() throws Exception { // use the transport client user and try to run as try { @@ -156,29 +100,6 @@ public void testUserImpersonationUsingHttp() throws Exception { getRestClient().performRequest(requestForUserRunAsUser(SecuritySettingsSource.TEST_USER_NAME)); } - public void testEmptyUserImpersonationHeader() throws Exception { - try (TransportClient client = getTransportClient(Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" - + SecuritySettingsSourceField.TEST_PASSWORD).build())) { - //ensure the client can connect - awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }); - - try { - Map headers = new HashMap<>(); - headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, - new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); - headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, ""); - - client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); - fail("run as header should not be allowed to be empty"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unable to authenticate")); - } - } - } - public void testEmptyHeaderUsingHttp() throws Exception { try { getRestClient().performRequest(requestForUserRunAsUser("")); @@ -188,29 +109,6 @@ public void testEmptyHeaderUsingHttp() throws Exception { } } - public void testNonExistentRunAsUser() throws Exception { - try (TransportClient client = getTransportClient(Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" + - SecuritySettingsSourceField.TEST_PASSWORD).build())) { - //ensure the client can connect - awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }); - - try { - Map headers = new HashMap<>(); - headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, - new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); - headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, "idontexist"); - - client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); - fail("run as header should not accept non-existent users"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unauthorized")); - } - } - } - public void testNonExistentRunAsUserUsingHttp() throws Exception { try { getRestClient().performRequest(requestForUserRunAsUser("idontexist")); @@ -228,21 +126,4 @@ private static Request requestForUserRunAsUser(String user) { request.setOptions(options); return request; } - - // build our own here to better mimic an actual client... - TransportClient getTransportClient(Settings extraSettings) { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put(extraSettings) - .put("cluster.name", clusterName) - .build(); - - return new TestXPackTransportClient(settings, LocalStateSecurity.class) - .addTransportAddress(publishAddress); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 7f09444784c6d..42101b1f4ec97 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -62,10 +60,7 @@ import org.junit.Before; import org.junit.BeforeClass; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.time.Clock; import java.time.Instant; @@ -75,7 +70,6 @@ import java.util.HashMap; import java.util.Map; -import javax.crypto.CipherOutputStream; import javax.crypto.SecretKey; import static java.time.Clock.systemUTC; @@ -169,15 +163,16 @@ public static void shutdownThreadpool() throws InterruptedException { public void testAttachAndGetToken() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + tokenService.getAccessTokenAsString(token)); + requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -214,16 +209,21 @@ public void testInvalidAuthorizationHeader() throws Exception { public void testRotateKey() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -240,15 +240,18 @@ public void testRotateKey() throws Exception { assertAuthentication(authentication, serialized.getAuthentication()); } - PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, newTokenFuture); - final UserToken newToken = newTokenFuture.get().v1(); - assertNotNull(newToken); - assertNotEquals(getDeprecatedAccessTokenString(tokenService, newToken), getDeprecatedAccessTokenString(tokenService, token)); + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(newUserTokenId, newRefreshToken, authentication, authentication, Collections.emptyMap(), + newTokenFuture); + final String newAccessToken = newTokenFuture.get().v1(); + assertNotNull(newAccessToken); + assertNotEquals(newAccessToken, accessToken); requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, newToken)); - mockGetTokenFromId(newToken, false); + storeTokenHeader(requestContext, newAccessToken); + mockGetTokenFromId(tokenService, newUserTokenId, authentication, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -267,6 +270,10 @@ private void rotateKeys(TokenService tokenService) { public void testKeyExchange() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } int numRotations = randomIntBetween(1, 5); for (int i = 0; i < numRotations; i++) { rotateKeys(tokenService); @@ -274,20 +281,21 @@ public void testKeyExchange() throws Exception { TokenService otherTokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); otherTokenService.getAndValidateToken(requestContext, future); UserToken serialized = future.get(); - assertEquals(authentication, serialized.getAuthentication()); + assertAuthentication(serialized.getAuthentication(), authentication); } rotateKeys(tokenService); @@ -298,22 +306,27 @@ public void testKeyExchange() throws Exception { PlainActionFuture future = new PlainActionFuture<>(); otherTokenService.getAndValidateToken(requestContext, future); UserToken serialized = future.get(); - assertEquals(authentication, serialized.getAuthentication()); + assertAuthentication(serialized.getAuthentication(), authentication); } } public void testPruneKeys() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -336,11 +349,14 @@ public void testPruneKeys() throws Exception { assertAuthentication(authentication, serialized.getAuthentication()); } - PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, newTokenFuture); - final UserToken newToken = newTokenFuture.get().v1(); - assertNotNull(newToken); - assertNotEquals(getDeprecatedAccessTokenString(tokenService, newToken), getDeprecatedAccessTokenString(tokenService, token)); + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(newUserTokenId, newRefreshToken, authentication, authentication, Collections.emptyMap(), + newTokenFuture); + final String newAccessToken = newTokenFuture.get().v1(); + assertNotNull(newAccessToken); + assertNotEquals(newAccessToken, accessToken); metaData = tokenService.pruneKeys(1); tokenService.refreshMetaData(metaData); @@ -353,8 +369,8 @@ public void testPruneKeys() throws Exception { } requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, newToken)); - mockGetTokenFromId(newToken, false); + storeTokenHeader(requestContext, newAccessToken); + mockGetTokenFromId(tokenService, newUserTokenId, authentication, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); tokenService.getAndValidateToken(requestContext, future); @@ -366,16 +382,21 @@ public void testPruneKeys() throws Exception { public void testPassphraseWorks() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -395,29 +416,40 @@ public void testPassphraseWorks() throws Exception { public void testGetTokenWhenKeyCacheHasExpired() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - UserToken token = tokenFuture.get().v1(); - assertThat(getDeprecatedAccessTokenString(tokenService, token), notNullValue()); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + String accessToken = tokenFuture.get().v1(); + assertThat(accessToken, notNullValue()); tokenService.clearActiveKeyCache(); - assertThat(getDeprecatedAccessTokenString(tokenService, token), notNullValue()); + + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + accessToken = tokenFuture.get().v1(); + assertThat(accessToken, notNullValue()); } public void testInvalidatedToken() throws Exception { when(securityMainIndex.indexExists()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, true); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, true); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -436,8 +468,10 @@ private void storeTokenHeader(ThreadContext requestContext, String tokenString) public void testComputeSecretKeyIsConsistent() throws Exception { byte[] saltArr = new byte[32]; random().nextBytes(saltArr); - SecretKey key = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); - SecretKey key2 = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); + SecretKey key = + TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr, TokenService.TOKEN_SERVICE_KEY_ITERATIONS); + SecretKey key2 = + TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr, TokenService.TOKEN_SERVICE_KEY_ITERATIONS); assertArrayEquals(key.getEncoded(), key2.getEncoded()); } @@ -468,14 +502,15 @@ public void testTokenExpiry() throws Exception { ClockMock clock = ClockMock.frozen(); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, clock); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken userToken = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + tokenService.getExpirationTime(), Collections.emptyMap()); + mockGetTokenFromId(userToken, false); + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // the clock is still frozen, so the cookie should be valid @@ -519,7 +554,7 @@ public void testTokenServiceDisabled() throws Exception { TokenService tokenService = new TokenService(Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) .build(), - Clock.systemUTC(), client, licenseState, securityMainIndex, securityTokensIndex, clusterService); + Clock.systemUTC(), client, licenseState, securityMainIndex, securityTokensIndex, clusterService); IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createOAuth2Tokens(null, null, null, true, null)); assertEquals("security tokens are not enabled", e.getMessage()); @@ -577,14 +612,15 @@ public void testMalformedToken() throws Exception { public void testIndexNotAvailable() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - //mockGetTokenFromId(token, false); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); doAnswer(invocationOnMock -> { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; @@ -620,34 +656,64 @@ public void testIndexNotAvailable() throws Exception { when(tokensIndex.isAvailable()).thenReturn(true); when(tokensIndex.indexExists()).thenReturn(true); - mockGetTokenFromId(token, false); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); future = new PlainActionFuture<>(); tokenService.getAndValidateToken(requestContext, future); - assertEquals(future.get().getAuthentication(), token.getAuthentication()); + assertAuthentication(future.get().getAuthentication(), authentication); } } public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - UserToken expired = new UserToken(authentication, Instant.now().minus(3L, ChronoUnit.DAYS)); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken expired = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + Instant.now().minus(3L, ChronoUnit.DAYS), Collections.emptyMap()); mockGetTokenFromId(expired, false); - String userTokenString = tokenService.getAccessTokenAsString(expired); + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); PlainActionFuture>> authFuture = new PlainActionFuture<>(); - tokenService.getAuthenticationAndMetaData(userTokenString, authFuture); + tokenService.getAuthenticationAndMetaData(accessToken, authFuture); Authentication retrievedAuth = authFuture.actionGet().v1(); - assertEquals(authentication, retrievedAuth); + assertAuthentication(authentication, retrievedAuth); + } + + public void testSupercedingTokenEncryption() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String refrehToken = UUIDs.randomBase64UUID(); + final String newAccessToken = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + final byte[] iv = tokenService.getRandomBytes(TokenService.IV_BYTES); + final byte[] salt = tokenService.getRandomBytes(TokenService.SALT_BYTES); + final Version version = tokenService.getTokenVersionCompatibility(); + String encryptedTokens = tokenService.encryptSupersedingTokens(newAccessToken, newRefreshToken, refrehToken, iv, + salt); + TokenService.RefreshTokenStatus refreshTokenStatus = new TokenService.RefreshTokenStatus(false, + authentication.getUser().principal(), authentication.getAuthenticatedBy().getName(), true, Instant.now().minusSeconds(5L), + encryptedTokens, Base64.getEncoder().encodeToString(iv), Base64.getEncoder().encodeToString(salt)); + refreshTokenStatus.setVersion(version); + tokenService.decryptAndReturnSupersedingTokens(refrehToken, refreshTokenStatus, tokenFuture); + if (version.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + // previous versions serialized the access token encrypted and the cipher text was different each time (due to different IVs) + assertThat(tokenService.prependVersionAndEncodeAccessToken(version, newAccessToken), equalTo(tokenFuture.get().v1())); + } + assertThat(TokenService.prependVersionAndEncodeRefreshToken(version, newRefreshToken), equalTo(tokenFuture.get().v2())); } public void testCannotValidateTokenIfLicenseDoesNotAllowTokens() throws Exception { when(licenseState.isTokenServiceAllowed()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - UserToken token = new UserToken(authentication, Instant.now().plusSeconds(180)); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken token = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + Instant.now().plusSeconds(180), Collections.emptyMap()); mockGetTokenFromId(token, false); - + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(threadContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(threadContext, tokenService.prependVersionAndEncodeAccessToken(token.getVersion(), accessToken)); PlainActionFuture authFuture = new PlainActionFuture<>(); when(licenseState.isTokenServiceAllowed()).thenReturn(false); @@ -660,18 +726,30 @@ private TokenService createTokenService(Settings settings, Clock clock) throws G return new TokenService(settings, clock, client, licenseState, securityMainIndex, securityTokensIndex, clusterService); } - private void mockGetTokenFromId(UserToken userToken, boolean isExpired) { - mockGetTokenFromId(userToken, isExpired, client); + private void mockGetTokenFromId(TokenService tokenService, String accessToken, Authentication authentication, boolean isExpired) { + mockGetTokenFromId(tokenService, accessToken, authentication, isExpired, client); } - public static void mockGetTokenFromId(UserToken userToken, boolean isExpired, Client client) { + public static void mockGetTokenFromId(TokenService tokenService, String userTokenId, Authentication authentication, boolean isExpired, + Client client) { doAnswer(invocationOnMock -> { GetRequest request = (GetRequest) invocationOnMock.getArguments()[0]; ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; GetResponse response = mock(GetResponse.class); - if (userToken.getId().equals(request.id().replace("token_", ""))) { + Version tokenVersion = tokenService.getTokenVersionCompatibility(); + final String possiblyHashedUserTokenId; + if (tokenVersion.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + possiblyHashedUserTokenId = TokenService.hashTokenString(userTokenId); + } else { + possiblyHashedUserTokenId = userTokenId; + } + if (possiblyHashedUserTokenId.equals(request.id().replace("token_", ""))) { when(response.isExists()).thenReturn(true); Map sourceMap = new HashMap<>(); + final Authentication tokenAuth = new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), + authentication.getLookedUpBy(), tokenVersion, AuthenticationType.TOKEN, authentication.getMetadata()); + final UserToken userToken = new UserToken(possiblyHashedUserTokenId, tokenVersion, tokenAuth, + tokenService.getExpirationTime(), authentication.getMetadata()); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { userToken.toXContent(builder, ToXContent.EMPTY_PARAMS); Map accessTokenMap = new HashMap<>(); @@ -687,35 +765,42 @@ public static void mockGetTokenFromId(UserToken userToken, boolean isExpired, Cl }).when(client).get(any(GetRequest.class), any(ActionListener.class)); } + private void mockGetTokenFromId(UserToken userToken, boolean isExpired) { + doAnswer(invocationOnMock -> { + GetRequest request = (GetRequest) invocationOnMock.getArguments()[0]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + GetResponse response = mock(GetResponse.class); + final String possiblyHashedUserTokenId; + if (userToken.getVersion().onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + possiblyHashedUserTokenId = TokenService.hashTokenString(userToken.getId()); + } else { + possiblyHashedUserTokenId = userToken.getId(); + } + if (possiblyHashedUserTokenId.equals(request.id().replace("token_", ""))) { + when(response.isExists()).thenReturn(true); + Map sourceMap = new HashMap<>(); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + userToken.toXContent(builder, ToXContent.EMPTY_PARAMS); + Map accessTokenMap = new HashMap<>(); + Map userTokenMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), + Strings.toString(builder), false); + userTokenMap.put("id", possiblyHashedUserTokenId); + accessTokenMap.put("user_token", userTokenMap); + accessTokenMap.put("invalidated", isExpired); + sourceMap.put("access_token", accessTokenMap); + } + when(response.getSource()).thenReturn(sourceMap); + } + listener.onResponse(response); + return Void.TYPE; + }).when(client).get(any(GetRequest.class), any(ActionListener.class)); + } + public static void assertAuthentication(Authentication result, Authentication expected) { assertEquals(expected.getUser(), result.getUser()); assertEquals(expected.getAuthenticatedBy(), result.getAuthenticatedBy()); assertEquals(expected.getLookedUpBy(), result.getLookedUpBy()); assertEquals(expected.getMetadata(), result.getMetadata()); - assertEquals(AuthenticationType.TOKEN, result.getAuthenticationType()); - } - - protected String getDeprecatedAccessTokenString(TokenService tokenService, UserToken userToken) throws IOException, - GeneralSecurityException { - try (ByteArrayOutputStream os = new ByteArrayOutputStream(TokenService.MINIMUM_BASE64_BYTES); - OutputStream base64 = Base64.getEncoder().wrap(os); - StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(Version.V_7_0_0); - TokenService.KeyAndCache keyAndCache = tokenService.getActiveKeyCache(); - Version.writeVersion(Version.V_7_0_0, out); - out.writeByteArray(keyAndCache.getSalt().bytes); - out.writeByteArray(keyAndCache.getKeyHash().bytes); - final byte[] initializationVector = tokenService.getNewInitializationVector(); - out.writeByteArray(initializationVector); - try (CipherOutputStream encryptedOutput = - new CipherOutputStream(out, tokenService.getEncryptionCipher(initializationVector, keyAndCache, Version.V_7_0_0)); - StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput)) { - encryptedStreamOutput.setVersion(Version.V_7_0_0); - encryptedStreamOutput.writeString(userToken.getId()); - encryptedStreamOutput.close(); - return new String(os.toByteArray(), StandardCharsets.UTF_8); - } - } } private DiscoveryNode addAnotherDataNodeWithVersion(ClusterService clusterService, Version version) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java deleted file mode 100644 index a73fc93f32e45..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.authc.esnative; - -import joptsimple.OptionException; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.NativeRealmIntegTestCase; -import org.elasticsearch.common.CharArrays; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; -import org.junit.BeforeClass; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Path; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; - -/** - * Integration tests for the {@code ESNativeMigrateTool} - */ -public class ESNativeMigrateToolTests extends NativeRealmIntegTestCase { - - // Randomly use SSL (or not) - private static boolean useSSL; - - @BeforeClass - public static void setSSL() { - useSSL = randomBoolean(); - } - - @Override - protected boolean addMockHttpTransport() { - return false; // enable http - } - - @Override - public Settings nodeSettings(int nodeOrdinal) { - logger.info("--> use SSL? {}", useSSL); - Settings.Builder builder = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)); - addSSLSettingsForNodePEMFiles(builder, "xpack.security.http.", true); - builder.put("xpack.security.http.ssl.enabled", useSSL); - return builder.build(); - } - - @Override - protected boolean transportSSLEnabled() { - return useSSL; - } - - @Override - protected boolean shouldSetReservedUserPasswords() { - return false; - } - - private Environment nodeEnvironment() throws Exception { - return internalCluster().getInstances(Environment.class).iterator().next(); - } - - public void testRetrieveUsers() throws Exception { - final Environment nodeEnvironment = nodeEnvironment(); - String home = Environment.PATH_HOME_SETTING.get(nodeEnvironment.settings()); - Path conf = nodeEnvironment.configFile(); - SecurityClient c = new SecurityClient(client()); - logger.error("--> creating users"); - int numToAdd = randomIntBetween(1,10); - Set addedUsers = new HashSet<>(numToAdd); - for (int i = 0; i < numToAdd; i++) { - String uname = randomAlphaOfLength(5); - c.preparePutUser(uname, "s3kirt".toCharArray(), getFastStoredHashAlgoForTests(), "role1", "user").get(); - addedUsers.add(uname); - } - logger.error("--> waiting for .security index"); - ensureGreen(RestrictedIndicesNames.SECURITY_MAIN_ALIAS); - - MockTerminal t = new MockTerminal(); - String username = nodeClientUsername(); - String password = new String(CharArrays.toUtf8Bytes(nodeClientPassword().getChars()), StandardCharsets.UTF_8); - String url = getHttpURL(); - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - - Settings.Builder builder = Settings.builder() - .put("path.home", home) - .put("path.conf", conf.toString()) - .put("xpack.security.http.ssl.client_authentication", "none"); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", - "testnode", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - "xpack.security.http.", - Collections.singletonList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - Settings settings = builder.build(); - logger.error("--> retrieving users using URL: {}, home: {}", url, home); - - OptionParser parser = muor.getParser(); - OptionSet options = parser.parse("-u", username, "-p", password, "-U", url); - logger.info("--> options: {}", options.asMap()); - Set users = muor.getUsersThatExist(t, settings, new Environment(settings, conf), options); - logger.info("--> output: \n{}", t.getOutput()); - for (String u : addedUsers) { - assertThat("expected list to contain: " + u + ", real list: " + users, users.contains(u), is(true)); - } - } - - public void testRetrieveRoles() throws Exception { - final Environment nodeEnvironment = nodeEnvironment(); - String home = Environment.PATH_HOME_SETTING.get(nodeEnvironment.settings()); - Path conf = nodeEnvironment.configFile(); - SecurityClient c = new SecurityClient(client()); - logger.error("--> creating roles"); - int numToAdd = randomIntBetween(1,10); - Set addedRoles = new HashSet<>(numToAdd); - for (int i = 0; i < numToAdd; i++) { - String rname = randomAlphaOfLength(5); - c.preparePutRole(rname) - .cluster("all", "none") - .runAs("root", "nobody") - .addIndices(new String[] { "index" }, new String[] { "read" }, new String[] { "body", "title" }, null, - new BytesArray("{\"query\": {\"match_all\": {}}}"), randomBoolean()) - .get(); - addedRoles.add(rname); - } - logger.error("--> waiting for .security index"); - ensureGreen(RestrictedIndicesNames.SECURITY_MAIN_ALIAS); - - MockTerminal t = new MockTerminal(); - String username = nodeClientUsername(); - String password = new String(CharArrays.toUtf8Bytes(nodeClientPassword().getChars()), StandardCharsets.UTF_8); - String url = getHttpURL(); - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - Settings.Builder builder = Settings.builder() - .put("path.home", home) - .put("xpack.security.http.ssl.client_authentication", "none"); - addSSLSettingsForPEMFiles(builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem", - "testclient", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt", - "xpack.security.http.", - Collections.singletonList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - Settings settings = builder.build(); - logger.error("--> retrieving roles using URL: {}, home: {}", url, home); - - OptionParser parser = muor.getParser(); - OptionSet options = parser.parse("-u", username, "-p", password, "-U", url); - Set roles = muor.getRolesThatExist(t, settings, new Environment(settings, conf), options); - logger.info("--> output: \n{}", t.getOutput()); - for (String r : addedRoles) { - assertThat("expected list to contain: " + r, roles.contains(r), is(true)); - } - } - - public void testMissingPasswordParameter() { - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - - final OptionException ex = expectThrows(OptionException.class, - () -> muor.getParser().parse("-u", "elastic", "-U", "http://localhost:9200")); - - assertThat(ex.getMessage(), containsString("password")); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java deleted file mode 100644 index 212fd4a8dab42..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.authc.esnative; - -import joptsimple.OptionSet; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.cli.Command; -import org.elasticsearch.cli.CommandTestCase; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.cli.Terminal.Verbosity; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; - -import java.io.FileNotFoundException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.isEmptyString; - -/** - * Unit tests for the {@code ESNativeRealmMigrateTool} - */ -public class ESNativeRealmMigrateToolTests extends CommandTestCase { - - @Override - protected Command newCommand() { - return new ESNativeRealmMigrateTool() { - @Override - protected MigrateUserOrRoles newMigrateUserOrRoles() { - return new MigrateUserOrRoles() { - - @Override - protected Environment createEnv(Map settings) throws UserException { - Settings.Builder builder = Settings.builder(); - settings.forEach((k, v) -> builder.put(k, v)); - return TestEnvironment.newEnvironment(builder.build()); - } - - }; - } - }; - } - - public void testUserJson() throws Exception { - assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createUserJson(Strings.EMPTY_ARRAY, "hash".toCharArray()), - equalTo("{\"password_hash\":\"hash\",\"roles\":[]}")); - assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createUserJson(new String[]{"role1", "role2"}, "hash".toCharArray()), - equalTo("{\"password_hash\":\"hash\",\"roles\":[\"role1\",\"role2\"]}")); - } - - public void testRoleJson() throws Exception { - RoleDescriptor.IndicesPrivileges ip = RoleDescriptor.IndicesPrivileges.builder() - .indices(new String[]{"i1", "i2", "i3"}) - .privileges(new String[]{"all"}) - .grantedFields("body") - .build(); - RoleDescriptor.IndicesPrivileges[] ips = new RoleDescriptor.IndicesPrivileges[1]; - ips[0] = ip; - String[] cluster = Strings.EMPTY_ARRAY; - String[] runAs = Strings.EMPTY_ARRAY; - RoleDescriptor rd = new RoleDescriptor("rolename", cluster, ips, runAs); - assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createRoleJson(rd), - equalTo("{\"cluster\":[]," + - "\"indices\":[{\"names\":[\"i1\",\"i2\",\"i3\"]," + - "\"privileges\":[\"all\"],\"field_security\":{\"grant\":[\"body\"]}," + - "\"allow_restricted_indices\":false}]," + - "\"applications\":[]," + - "\"run_as\":[],\"metadata\":{},\"type\":\"role\"}")); - } - - public void testTerminalLogger() throws Exception { - Logger terminalLogger = ESNativeRealmMigrateTool.getTerminalLogger(terminal); - assertThat(terminal.getOutput(), isEmptyString()); - - // only error and fatal gets logged at normal verbosity - terminal.setVerbosity(Verbosity.NORMAL); - List nonLoggingLevels = new ArrayList<>(Arrays.asList(Level.values())); - nonLoggingLevels.removeAll(Arrays.asList(Level.ERROR, Level.FATAL)); - for (Level level : nonLoggingLevels) { - terminalLogger.log(level, "this level should not log " + level.name()); - assertThat(terminal.getOutput(), isEmptyString()); - } - - terminalLogger.log(Level.ERROR, "logging an error"); - assertEquals("logging an error\n", terminal.getOutput()); - terminal.reset(); - assertThat(terminal.getOutput(), isEmptyString()); - - terminalLogger.log(Level.FATAL, "logging a fatal message"); - assertEquals("logging a fatal message\n", terminal.getOutput()); - terminal.reset(); - assertThat(terminal.getOutput(), isEmptyString()); - - // everything will get logged at verbose! - terminal.setVerbosity(Verbosity.VERBOSE); - List loggingLevels = new ArrayList<>(Arrays.asList(Level.values())); - loggingLevels.remove(Level.OFF); - for (Level level : loggingLevels) { - terminalLogger.log(level, "this level should log " + level.name()); - assertEquals("this level should log " + level.name() + "\n", terminal.getOutput()); - terminal.reset(); - assertThat(terminal.getOutput(), isEmptyString()); - } - } - - public void testMissingFiles() throws Exception { - Path homeDir = createTempDir(); - Path confDir = homeDir.resolve("config"); - Path xpackConfDir = confDir; - Files.createDirectories(xpackConfDir); - - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - - OptionSet options = muor.getParser().parse("-u", "elastic", "-p", SecuritySettingsSourceField.TEST_PASSWORD, - "-U", "http://localhost:9200"); - Settings settings = Settings.builder().put("path.home", homeDir).build(); - Environment environment = new Environment(settings, confDir); - - MockTerminal mockTerminal = new MockTerminal(); - - FileNotFoundException fnfe = expectThrows(FileNotFoundException.class, - () -> muor.importUsers(mockTerminal, environment, options)); - assertThat(fnfe.getMessage(), containsString("users file")); - - Files.createFile(xpackConfDir.resolve("users")); - fnfe = expectThrows(FileNotFoundException.class, - () -> muor.importUsers(mockTerminal, environment, options)); - assertThat(fnfe.getMessage(), containsString("users_roles file")); - - fnfe = expectThrows(FileNotFoundException.class, - () -> muor.importRoles(mockTerminal, environment, options)); - assertThat(fnfe.getMessage(), containsString("roles.yml file")); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index 070ea855800f7..ea1b6483fd795 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.authc.esnative; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.MockSecureSettings; @@ -167,7 +166,6 @@ private void verifySuccessfulAuthentication(boolean enabled) throws Exception { verify(usersStore, times(2)).getReservedUserInfo(eq(principal), any(ActionListener.class)); final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); verify(securityIndex, times(2)).checkMappingVersion(predicateCaptor.capture()); - verifyVersionPredicate(principal, predicateCaptor.getValue()); verifyNoMoreInteractions(usersStore); } @@ -186,7 +184,6 @@ public void testLookup() throws Exception { final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - verifyVersionPredicate(principal, predicateCaptor.getValue()); PlainActionFuture future = new PlainActionFuture<>(); reservedRealm.doLookupUser("foobar", future); @@ -234,7 +231,6 @@ public void testLookupThrows() throws Exception { final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - verifyVersionPredicate(principal, predicateCaptor.getValue()); verifyNoMoreInteractions(usersStore); } @@ -448,28 +444,4 @@ public static void mockGetAllReservedUserInfo(NativeUsersStore usersStore, Map versionPredicate) { - switch (principal) { - case LogstashSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); - break; - case BeatsSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_6_2_3), is(false)); - assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); - break; - case APMSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); - assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); - break; - case RemoteMonitoringUser.NAME: - assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); - assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); - break; - default: - assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); - break; - } - assertThat(versionPredicate.test(Version.V_7_0_0), is(true)); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index 9d05495449805..168f608951e09 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -90,6 +90,7 @@ private RealmConfig getRealmConfig(Settings settings) { return new RealmConfig(REALM_IDENTIFIER, settings, TestEnvironment.newEnvironment(settings), threadContext); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "cache.hash_algo", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java index 1ef36f4fdbdf7..c3698f32b6e32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java @@ -10,28 +10,19 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.util.EntityUtils; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.SecuritySingleNodeTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.common.socket.SocketAccess; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; -import org.elasticsearch.xpack.security.LocalStateSecurity; import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManager; - import java.net.InetSocketAddress; import java.security.SecureRandom; import java.util.Arrays; @@ -41,7 +32,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -85,36 +75,6 @@ protected boolean enableWarningsCheck() { return false; } - public void testTransportClientCanAuthenticateViaPki() { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", - "testnode", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); - try (TransportClient client = createTransportClient(builder.build())) { - client.addTransportAddress(randomFrom(node().injector().getInstance(Transport.class).boundAddress().boundAddresses())); - IndexResponse response = client.prepareIndex("foo", "bar").setSource("pki", "auth").get(); - assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); - } - } - - /** - * Test uses the testclient cert which is trusted by the SSL layer BUT it is not trusted by the PKI authentication - * realm - */ - public void testTransportClientAuthenticationFailure() { - try (TransportClient client = createTransportClient(Settings.EMPTY)) { - client.addTransportAddress(randomFrom(node().injector().getInstance(Transport.class).boundAddress().boundAddresses())); - client.prepareIndex("foo", "bar").setSource("pki", "auth").get(); - fail("transport client should not have been able to authenticate"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - public void testRestAuthenticationViaPki() throws Exception { SSLContext context = getRestSSLContext("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", "testnode", @@ -157,22 +117,6 @@ private SSLContext getRestSSLContext(String keyPath, String password, String cer return context; } - private TransportClient createTransportClient(Settings additionalSettings) { - Settings clientSettings = transportClientSettings(); - if (additionalSettings.getByPrefix("xpack.security.transport.ssl.").isEmpty() == false) { - clientSettings = clientSettings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false); - } - - Settings.Builder builder = Settings.builder() - .put("xpack.security.transport.ssl.enabled", true) - .put(clientSettings, false) - .put(additionalSettings) - .put("cluster.name", node().settings().get("cluster.name")); - builder.remove(SecurityField.USER_SETTING.getKey()); - builder.remove("request.headers.Authorization"); - return new TestXPackTransportClient(builder.build(), LocalStateSecurity.class); - } - private String getNodeUrl() { TransportAddress transportAddress = randomFrom(node().injector().getInstance(HttpServerTransport.class) .boundAddress().boundAddresses()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index 2fed720e23c09..8b30cb85fed78 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -64,6 +64,7 @@ public void stop() { } } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testCacheSettings() { String cachingHashAlgo = Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT); int maxUsers = randomIntBetween(10, 100); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java index 6086dc642d22f..e51945cd90418 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java @@ -50,6 +50,10 @@ public void testSSHA256SelfGenerated() throws Exception { testHasherSelfGenerated(Hasher.SSHA256); } + public void testSHA256SelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.SHA256); + } + public void testNoopSelfGenerated() throws Exception { testHasherSelfGenerated(Hasher.NOOP); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 3905bb6d3b4c1..e93b42a6d3a04 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -304,7 +304,7 @@ public void testSerialization() throws Exception { public void testSerializationPreV71() throws Exception { final ExpressionRoleMapping original = randomRoleMapping(false); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_0_1); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(version); original.writeTo(output); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 6bb6e0c7b5854..3cca6cc4fd380 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -143,7 +143,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { public void testCacheClearOnIndexHealthChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); - final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, true); int expectedInvalidation = 0; // existing to no longer present @@ -180,7 +180,7 @@ public void testCacheClearOnIndexHealthChange() { public void testCacheClearOnIndexOutOfDateChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); - final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, true); store.onSecurityIndexStateChange( new SecurityIndexManager.State(Instant.now(), false, true, true, null, concreteSecurityIndexName, null), @@ -193,40 +193,59 @@ public void testCacheClearOnIndexOutOfDateChange() { assertEquals(2, numInvalidation.get()); } - private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter) { + public void testCacheIsNotClearedIfNoRealmsAreAttached() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, false); + + final SecurityIndexManager.State noIndexState = dummyState(null); + final SecurityIndexManager.State greenIndexState = dummyState(ClusterHealthStatus.GREEN); + store.onSecurityIndexStateChange(noIndexState, greenIndexState); + assertEquals(0, numInvalidation.get()); + } + + private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter, boolean attachRealm) { final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); final ThreadPool threadPool = mock(ThreadPool.class); final ThreadContext threadContext = new ThreadContext(settings); when(threadPool.getThreadContext()).thenReturn(threadContext); + final String realmName = randomAlphaOfLengthBetween(4, 8); + final Client client = mock(Client.class); when(client.threadPool()).thenReturn(threadPool); when(client.settings()).thenReturn(settings); doAnswer(invocationOnMock -> { + assertThat(invocationOnMock.getArguments(), Matchers.arrayWithSize(3)); + final ClearRealmCacheRequest request = (ClearRealmCacheRequest) invocationOnMock.getArguments()[1]; + assertThat(request.realms(), Matchers.arrayContaining(realmName)); + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; invalidationCounter.incrementAndGet(); listener.onResponse(new ClearRealmCacheResponse(new ClusterName("cluster"), Collections.emptyList(), Collections.emptyList())); return null; }).when(client).execute(eq(ClearRealmCacheAction.INSTANCE), any(ClearRealmCacheRequest.class), any(ActionListener.class)); - final Environment env = TestEnvironment.newEnvironment(settings); - final RealmConfig realmConfig = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", getTestName()), - settings, env, threadContext); - final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm(realmConfig, threadPool) { - @Override - protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { - listener.onResponse(AuthenticationResult.notHandled()); - } - - @Override - protected void doLookupUser(String username, ActionListener listener) { - listener.onResponse(null); - } - }; final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class), mock(ScriptService.class)); - store.refreshRealmOnChange(mockRealm); + + if (attachRealm) { + final Environment env = TestEnvironment.newEnvironment(settings); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("ldap", realmName); + final RealmConfig realmConfig = new RealmConfig(identifier, settings, env, threadContext); + final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm(realmConfig, threadPool) { + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } + }; + store.refreshRealmOnChange(mockRealm); + } return store; } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index 9bd69d3eb1a77..be73972f3a1e3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -43,7 +43,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.is; public class IndicesPermissionTests extends ESTestCase { @@ -214,7 +213,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { assertEquals(readIndicesPrivileges, indicesPrivileges.build()); out = new BytesStreamOutput(); - out.setVersion(Version.V_6_0_0); + out.setVersion(Version.CURRENT); indicesPrivileges = RoleDescriptor.IndicesPrivileges.builder(); indicesPrivileges.grantedFields(allowed); indicesPrivileges.deniedFields(denied); @@ -224,7 +223,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { indicesPrivileges.build().writeTo(out); out.close(); in = out.bytes().streamInput(); - in.setVersion(Version.V_6_0_0); + in.setVersion(Version.CURRENT); RoleDescriptor.IndicesPrivileges readIndicesPrivileges2 = new RoleDescriptor.IndicesPrivileges(in); assertEquals(readIndicesPrivileges, readIndicesPrivileges2); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 3dd5395b1fea0..6e7a9806781b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -5,16 +5,6 @@ */ package org.elasticsearch.xpack.security.support; -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiConsumer; - import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.Action; @@ -52,12 +42,22 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.hamcrest.Matchers; import org.junit.Before; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; + import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.TEMPLATE_VERSION_PATTERN; import static org.hamcrest.Matchers.equalTo; @@ -429,10 +429,7 @@ public void testIndexTemplateVersionMatching() throws Exception { assertTrue(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7, clusterState, logger, - Version.V_6_0_0::before)); - assertFalse(SecurityIndexManager.checkTemplateExistsAndVersionMatches( - SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7, clusterState, logger, - Version.V_6_0_0::after)); + v -> Version.CURRENT.minimumCompatibilityVersion().before(v))); } public void testUpToDateMappingsAreIdentifiedAsUpToDate() throws IOException { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java deleted file mode 100644 index 30208a1158075..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.transport.netty4; - -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; -import org.elasticsearch.xpack.security.LocalStateSecurity; - -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.containsString; - -public class SslHostnameVerificationTests extends SecurityIntegTestCase { - - @Override - protected boolean transportSSLEnabled() { - return true; - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Settings settings = super.nodeSettings(nodeOrdinal); - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(settings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false), false); - Path keyPath; - Path certPath; - Path nodeCertPath; - try { - /* - * This keystore uses a cert without any subject alternative names and a CN of "Elasticsearch Test Node No SAN" - * that will not resolve to a DNS name and will always cause hostname verification failures - */ - keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.pem"); - certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.crt"); - nodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); - assert keyPath != null; - assert certPath != null; - assert nodeCertPath != null; - assertThat(Files.exists(certPath), is(true)); - assertThat(Files.exists(nodeCertPath), is(true)); - assertThat(Files.exists(keyPath), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> { - secureSettings.setString("xpack.security.transport.ssl.secure_key_passphrase", "testnode-no-subjaltname"); - }); - return settingsBuilder.put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .putList("xpack.security.transport.ssl.certificate_authorities", - Arrays.asList(certPath.toString(), nodeCertPath.toString())) - // disable hostname verification as this test uses certs without a valid SAN or DNS in the CN - .put("xpack.security.transport.ssl.verification_mode", "certificate") - .build(); - } - - @Override - protected Settings transportClientSettings() { - Path keyPath; - Path certPath; - Path nodeCertPath; - try { - keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.pem"); - certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.crt"); - nodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); - assert keyPath != null; - assert certPath != null; - assert nodeCertPath != null; - assertThat(Files.exists(certPath), is(true)); - assertThat(Files.exists(nodeCertPath), is(true)); - assertThat(Files.exists(keyPath), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - Settings settings = super.transportClientSettings(); - // remove all ssl settings - Settings.Builder builder = Settings.builder(); - builder.put(settings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false), false); - - builder.put("xpack.security.transport.ssl.verification_mode", "certificate") - .put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.key_passphrase", "testnode-no-subjaltname") - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .putList("xpack.security.transport.ssl.certificate_authorities", Arrays.asList(certPath.toString(), nodeCertPath.toString())); - return builder.build(); - } - - public void testThatHostnameMismatchDeniesTransportClientConnection() throws Exception { - Transport transport = internalCluster().getDataNodeInstance(Transport.class); - TransportAddress transportAddress = transport.boundAddress().publishAddress(); - InetSocketAddress inetSocketAddress = transportAddress.address(); - - Settings settings = Settings.builder().put(transportClientSettings()) - .put("xpack.security.transport.ssl.verification_mode", "full") - .build(); - - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - client.addTransportAddress(new TransportAddress(inetSocketAddress.getAddress(), inetSocketAddress.getPort())); - client.admin().cluster().prepareHealth().get(); - fail("Expected a NoNodeAvailableException due to hostname verification failures"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - - public void testTransportClientConnectionIgnoringHostnameVerification() throws Exception { - Client client = internalCluster().transportClient(); - assertGreenClusterState(client); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java index 5f25213beefa1..8488fe2a5e638 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java @@ -15,38 +15,26 @@ import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.common.socket.SocketAccess; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.LocalStateSecurity; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.TrustManagerFactory; import java.io.InputStreamReader; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.security.KeyStore; import java.security.SecureRandom; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; -import java.util.List; import java.util.Locale; -import java.util.Set; - -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLHandshakeException; -import javax.net.ssl.TrustManagerFactory; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; @@ -72,50 +60,6 @@ protected boolean transportSSLEnabled() { return true; } - // no SSL exception as this is the exception is returned when connecting - public void testThatUnconfiguredCiphersAreRejected() throws Exception { - Set supportedCiphers = Sets.newHashSet(SSLContext.getDefault().getSupportedSSLParameters().getCipherSuites()); - Set defaultXPackCiphers = Sets.newHashSet(XPackSettings.DEFAULT_CIPHERS); - final List unconfiguredCiphers = new ArrayList<>(Sets.difference(supportedCiphers, defaultXPackCiphers)); - Collections.shuffle(unconfiguredCiphers, random()); - assumeFalse("the unconfigured ciphers list is empty", unconfiguredCiphers.isEmpty()); - - try (TransportClient transportClient = new TestXPackTransportClient(Settings.builder() - .put(transportClientSettings()) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .putList("xpack.security.transport.ssl.cipher_suites", unconfiguredCiphers) - .build(), LocalStateSecurity.class)) { - - TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); - transportClient.addTransportAddress(transportAddress); - - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - - public void testThatTransportClientUsingSSLv3ProtocolIsRejected() { - assumeFalse("Can't run in a FIPS JVM as SSLv3 SSLContext not available", inFipsJvm()); - try (TransportClient transportClient = new TestXPackTransportClient(Settings.builder() - .put(transportClientSettings()) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .putList("xpack.security.transport.ssl.supported_protocols", new String[]{"SSLv3"}) - .build(), LocalStateSecurity.class)) { - - TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); - transportClient.addTransportAddress(transportAddress); - - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - public void testThatConnectionToHTTPWorks() throws Exception { Settings.Builder builder = Settings.builder(); addSSLSettingsForPEMFiles( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java deleted file mode 100644 index d07bff822a2f8..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.transport.ssl; - -import org.elasticsearch.bootstrap.JavaVersion; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.ssl.SSLClientAuth; -import org.elasticsearch.xpack.security.LocalStateSecurity; -import org.junit.BeforeClass; - -import java.net.InetAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import static org.elasticsearch.test.SecuritySettingsSource.TEST_USER_NAME; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; -import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.containsString; - -public class SslMultiPortTests extends SecurityIntegTestCase { - - private static int randomClientPort; - private static int randomNoClientAuthPort; - private static InetAddress localAddress; - - @BeforeClass - public static void getRandomPort() { - randomClientPort = randomIntBetween(49000, 65500); // ephemeral port - randomNoClientAuthPort = randomIntBetween(49000, 65500); - localAddress = InetAddress.getLoopbackAddress(); - } - - /** - * On each node sets up the following profiles: - *
    - *
  • default: testnode keypair. Requires client auth
  • - *
  • client: testnode-client-profile profile that only trusts the testclient cert. Requires client auth
  • - *
  • no_client_auth: testnode keypair. Does not require client auth
  • - *
- */ - @Override - protected Settings nodeSettings(int nodeOrdinal) { - String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); - String randomNoClientAuthPortRange = randomNoClientAuthPort + "-" + (randomNoClientAuthPort+100); - - Path trustCert; - try { - trustCert = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt"); - assertThat(Files.exists(trustCert), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); - addSSLSettingsForNodePEMFiles(builder, "transport.profiles.client.xpack.security.", true); - builder.put("transport.profiles.client.port", randomClientPortRange) - .put("transport.profiles.client.bind_host", NetworkAddress.format(localAddress)) - .put("transport.profiles.client.xpack.security.ssl.certificate_authorities", trustCert.toAbsolutePath()); - addSSLSettingsForNodePEMFiles(builder, "transport.profiles.no_client_auth.xpack.security.", true); - builder.put("transport.profiles.no_client_auth.port", randomNoClientAuthPortRange) - .put("transport.profiles.no_client_auth.bind_host", NetworkAddress.format(localAddress)) - .put("transport.profiles.no_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.NONE); - final Settings settings = builder.build(); - logger.info("node {} settings:\n{}", nodeOrdinal, settings); - return settings; - } - - @Override - protected boolean transportSSLEnabled() { - return true; - } - - private TransportClient createTransportClient(Settings additionalSettings) { - Settings settings = Settings.builder() - .put(transportClientSettings().filter(s -> s.startsWith("xpack.security.transport.ssl") == false)) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put(additionalSettings) - .build(); - //return new TestXPackTransportClient(settings, LocalStateSecurity.class); - logger.info("transport client settings:\n{}", settings); - return new TestXPackTransportClient(settings, LocalStateSecurity.class); - } - - /** - * Uses the internal cluster's transport client to test connection to the default profile. The internal transport - * client uses the same SSL settings as the default profile so a connection should always succeed - */ - public void testThatStandardTransportClientCanConnectToDefaultProfile() throws Exception { - assertGreenClusterState(internalCluster().transportClient()); - } - - /** - * Uses a transport client with the same settings as the internal cluster transport client to test connection to the - * no_client_auth profile. The internal transport client is not used here since we are connecting to a different - * profile. Since the no_client_auth profile does not require client authentication, the standard transport client - * connection should always succeed as the settings are the same as the default profile except for the port and - * disabling the client auth requirement - */ - public void testThatStandardTransportClientCanConnectToNoClientAuthProfile() throws Exception { - try(TransportClient transportClient = new TestXPackTransportClient(Settings.builder() - .put(transportClientSettings()) - .put("xpack.security.transport.ssl.enabled", true) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .build(), LocalStateSecurity.class)) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - } - } - - /** - * Uses a transport client with the same settings as the internal cluster transport client to test connection to the - * client profile. The internal transport client is not used here since we are connecting to a different - * profile. The client profile requires client auth and only trusts the certificate in the testclient-client-profile - * keystore so this connection will fail as the certificate presented by the standard transport client is not trusted - * by this profile - */ - public void testThatStandardTransportClientCannotConnectToClientProfile() throws Exception { - try (TransportClient transportClient = createTransportClient(Settings.EMPTY)) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with a custom key pair; TransportClient only trusts the testnode - * certificate and had its own self signed certificate. This test connects to the client profile, which is only - * set to trust the testclient-client-profile certificate so the connection should always succeed - */ - public void testThatProfileTransportClientCanConnectToClientProfile() throws Exception { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", - "testclient-client-profile", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); - try (TransportClient transportClient = createTransportClient(builder.build())) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - } - } - - /** - * Uses a transport client with a custom key pair; TransportClient only trusts the testnode - * certificate and had its own self signed certificate. This test connects to the no_client_auth profile, which - * uses a truststore that does not trust the testclient-client-profile certificate but does not require client - * authentication - */ - public void testThatProfileTransportClientCanConnectToNoClientAuthProfile() throws Exception { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", - "testclient-client-profile", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - List.of("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - builder.putList("xpack.security.transport.ssl.supported_protocols", getProtocols()); - try (TransportClient transportClient = createTransportClient(builder.build())) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - } - } - - /** - * Uses a transport client with a custom key pair; TransportClient only trusts the testnode - * certificate and had its own self signed certificate. This test connects to the default profile, which - * uses a truststore that does not trust the testclient-client-profile certificate and requires client authentication - * so the connection should always fail - */ - public void testThatProfileTransportClientCannotConnectToDefaultProfile() throws Exception { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", - "testclient-client-profile", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - try (TransportClient transportClient = createTransportClient(builder.build())) { - TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); - transportClient.addTransportAddress(transportAddress); - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with SSL disabled. This test connects to the default profile, which should always fail - * as a non-ssl transport client cannot connect to a ssl profile - */ - public void testThatTransportClientCannotConnectToDefaultProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with SSL disabled. This test connects to the client profile, which should always fail - * as a non-ssl transport client cannot connect to a ssl profile - */ - public void testThatTransportClientCannotConnectToClientProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with SSL disabled. This test connects to the no_client_auth profile, which should always fail - * as a non-ssl transport client cannot connect to a ssl profile - */ - public void testThatTransportClientCannotConnectToNoClientAuthProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client that only trusts the testnode certificate. This test connects to the no_client_auth profile, - * which uses the testnode certificate and does not require to present a certificate, so this connection should always succeed - */ - public void testThatTransportClientWithOnlyTruststoreCanConnectToNoClientAuthProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - } - } - - /** - * Uses a transport client that only trusts the testnode certificate. This test connects to the client profile, which uses - * the testnode certificate and requires the client to present a certificate, so this connection will never work as - * the client has no certificate to present - */ - public void testThatTransportClientWithOnlyTruststoreCannotConnectToClientProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client that only trusts the testnode certificate. This test connects to the default profile, which uses - * the testnode certificate and requires the client to present a certificate, so this connection will never work as - * the client has no certificate to present - */ - public void testThatTransportClientWithOnlyTruststoreCannotConnectToDefaultProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public - * certificate authorities. This test connects to the default profile, which uses a self-signed certificate that - * will never be trusted by the default truststore so the connection should always fail - */ - public void testThatSSLTransportClientWithNoTruststoreCannotConnectToDefaultProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.enabled", true) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public - * certificate authorities. This test connects to the client profile, which uses a self-signed certificate that - * will never be trusted by the default truststore so the connection should always fail - */ - public void testThatSSLTransportClientWithNoTruststoreCannotConnectToClientProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.enabled", true) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public - * certificate authorities. This test connects to the no_client_auth profile, which uses a self-signed certificate that - * will never be trusted by the default truststore so the connection should always fail - */ - public void testThatSSLTransportClientWithNoTruststoreCannotConnectToNoClientAuthProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.enabled", true) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - private static int getProfilePort(String profile) { - TransportAddress[] transportAddresses = - internalCluster().getInstance(Transport.class).profileBoundAddresses().get(profile).boundAddresses(); - for (TransportAddress address : transportAddresses) { - if (address.address().getAddress().equals(localAddress)) { - return address.address().getPort(); - } - } - throw new IllegalStateException("failed to find transport address equal to [" + NetworkAddress.format(localAddress) + "] " + - " in the following bound addresses " + Arrays.toString(transportAddresses)); - } - - /** - * TLSv1.3 when running in a JDK prior to 11.0.3 has a race condition when multiple simultaneous connections are established. See - * JDK-8213202. This issue is not triggered when using client authentication, which we do by default for transport connections. - * However if client authentication is turned off and TLSv1.3 is used on the affected JVMs then we will hit this issue. - */ - private static List getProtocols() { - JavaVersion full = - AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); - if (full.compareTo(JavaVersion.parse("11.0.3")) < 0) { - return List.of("TLSv1.2"); - } - return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java index 52a03dca95b36..ce0cc5c111265 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -16,20 +16,14 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; -import org.elasticsearch.xpack.security.LocalStateSecurity; import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; @@ -38,8 +32,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; -import java.nio.file.Files; -import java.nio.file.Path; import java.security.AccessController; import java.security.PrivilegedAction; import java.security.SecureRandom; @@ -135,39 +127,6 @@ public void testThatHttpWorksWithSslClientAuth() throws IOException { } } - public void testThatTransportWorksWithoutSslClientAuth() throws IOException { - // specify an arbitrary key and certificate - not the certs needed to connect to the transport protocol - Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem"); - Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt"); - Path nodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); - Path nodeEcCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt"); - - if (Files.notExists(keyPath) || Files.notExists(certPath)) { - throw new ElasticsearchException("key or certificate path doesn't exist"); - } - - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.security.transport.ssl.secure_key_passphrase", "testclient-client-profile"); - Settings settings = Settings.builder() - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.NONE) - .put("xpack.security.transport.ssl.key", keyPath) - .put("xpack.security.transport.ssl.certificate", certPath) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .putList("xpack.security.transport.ssl.certificate_authorities", nodeCertPath.toString(), nodeEcCertPath.toString()) - .setSecureSettings(secureSettings) - .put("cluster.name", internalCluster().getClusterName()) - .put(SecurityField.USER_SETTING.getKey(), transportClientUsername() + ":" + new String(transportClientPassword().getChars())) - .build(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - Transport transport = internalCluster().getDataNodeInstance(Transport.class); - TransportAddress transportAddress = transport.boundAddress().publishAddress(); - client.addTransportAddress(transportAddress); - - assertGreenClusterState(client); - } - } - private SSLContext getSSLContext() { try { String certPath = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"; diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 14d80ab50ee3f..1d13df3b2c32e 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -16,6 +16,7 @@ ext { // SQL test dependency versions csvjdbcVersion="1.0.34" h2Version="1.4.197" + h2gisVersion="1.5.0" } configurations { diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 9a15bcf29c0a1..37e0baf00aa71 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -21,6 +21,9 @@ dependencies { compile (project(':libs:x-content')) { transitive = false } + compile (project(':libs:elasticsearch-geo')) { + transitive = false + } compile project(':libs:core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java index 52aff352ac182..51a03dad70b55 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java @@ -44,7 +44,9 @@ public enum EsType implements SQLType { INTERVAL_DAY_TO_SECOND( ExtraTypes.INTERVAL_DAY_SECOND), INTERVAL_HOUR_TO_MINUTE( ExtraTypes.INTERVAL_HOUR_MINUTE), INTERVAL_HOUR_TO_SECOND( ExtraTypes.INTERVAL_HOUR_SECOND), - INTERVAL_MINUTE_TO_SECOND(ExtraTypes.INTERVAL_MINUTE_SECOND); + INTERVAL_MINUTE_TO_SECOND(ExtraTypes.INTERVAL_MINUTE_SECOND), + GEO_POINT( ExtraTypes.GEOMETRY), + GEO_SHAPE( ExtraTypes.GEOMETRY); private final Integer type; diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java index 3df70f8e1d956..b8f09ece2f3be 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java @@ -29,5 +29,6 @@ private ExtraTypes() {} static final int INTERVAL_HOUR_MINUTE = 111; static final int INTERVAL_HOUR_SECOND = 112; static final int INTERVAL_MINUTE_SECOND = 113; + static final int GEOMETRY = 114; } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java index 9b1ff87596798..5f2f0773ff17a 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.sql.jdbc; import java.util.Objects; @@ -89,4 +90,4 @@ public boolean equals(Object obj) { public int hashCode() { return Objects.hash(name, type, table, catalog, schema, label, displaySize); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java index 1c216d8dba7c7..c9480dbcb1c2b 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java @@ -35,7 +35,7 @@ / Additional properties can be specified either through the Properties object or in the URL. In case of duplicates, the URL wins. */ //TODO: beef this up for Security/SSL -class JdbcConfiguration extends ConnectionConfiguration { +public class JdbcConfiguration extends ConnectionConfiguration { static final String URL_PREFIX = "jdbc:es://"; public static URI DEFAULT_URI = URI.create("http://localhost:9200/"); @@ -47,7 +47,7 @@ class JdbcConfiguration extends ConnectionConfiguration { // can be out/err/url static final String DEBUG_OUTPUT_DEFAULT = "err"; - static final String TIME_ZONE = "timezone"; + public static final String TIME_ZONE = "timezone"; // follow the JDBC spec and use the JVM default... // to avoid inconsistency, the default is picked up once at startup and reused across connections // to cater to the principle of least surprise diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java index 041c457d91b3d..39d942362d731 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java @@ -190,7 +190,7 @@ public void setObject(int parameterIndex, Object x) throws SQLException { setParam(parameterIndex, null, EsType.NULL); return; } - + // check also here the unsupported types so that any unsupported interfaces ({@code java.sql.Struct}, // {@code java.sql.Array} etc) will generate the correct exception message. Otherwise, the method call // {@code TypeConverter.fromJavaToJDBC(x.getClass())} will report the implementing class as not being supported. @@ -330,7 +330,7 @@ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLE public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { setObject(parameterIndex, xmlObject); } - + @Override public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { setObject(parameterIndex, x, TypeUtils.asSqlType(targetSqlType), scaleOrLength); @@ -343,13 +343,12 @@ public void setObject(int parameterIndex, Object x, SQLType targetSqlType, int s private void setObject(int parameterIndex, Object x, EsType dataType, String typeString) throws SQLException { checkOpen(); - // set the null value on the type and exit if (x == null) { setParam(parameterIndex, null, dataType); return; } - + checkKnownUnsupportedTypes(x); if (x instanceof byte[]) { if (dataType != EsType.BINARY) { @@ -359,7 +358,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ setParam(parameterIndex, x, EsType.BINARY); return; } - + if (x instanceof Timestamp || x instanceof Calendar || x instanceof Date @@ -380,7 +379,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ LocalDateTime ldt = (LocalDateTime) x; Calendar cal = getDefaultCalendar(); cal.set(ldt.getYear(), ldt.getMonthValue() - 1, ldt.getDayOfMonth(), ldt.getHour(), ldt.getMinute(), ldt.getSecond()); - + dateToSet = cal.getTime(); } else if (x instanceof Time) { dateToSet = new java.util.Date(((Time) x).getTime()); @@ -398,7 +397,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ throw new SQLFeatureNotSupportedException( "Conversion from type [" + x.getClass().getName() + "] to [" + typeString + "] not supported"); } - + if (x instanceof Boolean || x instanceof Byte || x instanceof Short @@ -412,7 +411,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ dataType); return; } - + throw new SQLFeatureNotSupportedException( "Conversion from type [" + x.getClass().getName() + "] to [" + typeString + "] not supported"); } @@ -421,14 +420,14 @@ private void checkKnownUnsupportedTypes(Object x) throws SQLFeatureNotSupportedE List> unsupportedTypes = new ArrayList<>(Arrays.asList(Struct.class, Array.class, SQLXML.class, RowId.class, Ref.class, Blob.class, NClob.class, Clob.class, LocalDate.class, LocalTime.class, OffsetTime.class, OffsetDateTime.class, URL.class, BigDecimal.class)); - + for (Class clazz:unsupportedTypes) { if (clazz.isAssignableFrom(x.getClass())) { throw new SQLFeatureNotSupportedException("Objects of type [" + clazz.getName() + "] are not supported"); } } } - + private Calendar getDefaultCalendar() { return Calendar.getInstance(cfg.timeZone(), Locale.ROOT); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 9c30241ccbdb1..7e21f2206b1e9 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -5,13 +5,16 @@ */ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.proto.StringUtils; +import java.io.IOException; import java.sql.Date; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.Time; import java.sql.Timestamp; +import java.text.ParseException; import java.time.Duration; import java.time.LocalDate; import java.time.LocalDateTime; @@ -100,6 +103,7 @@ private static T dateTimeConvert(Long millis, Calendar c, Function readScriptSpec() throws Exception { + List list = new ArrayList<>(); + list.addAll(GeoCsvSpecTestCase.readScriptSpec()); + list.addAll(readScriptSpec("/single-node-only/command-sys-geo.csv-spec", specParser())); + return list; + } + + public GeoJdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java new file mode 100644 index 0000000000000..2a9a1592c71d0 --- /dev/null +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.single_node; + +import org.elasticsearch.xpack.sql.qa.geo.GeoSqlSpecTestCase; + +public class GeoJdbcSqlSpecIT extends GeoSqlSpecTestCase { + public GeoJdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java new file mode 100644 index 0000000000000..e40e6de9e3a9c --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.geo; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.Request; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.jdbc.JdbcConfiguration; +import org.junit.Before; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + +/** + * Tests comparing sql queries executed against our jdbc client + * with hard coded result sets. + */ +public abstract class GeoCsvSpecTestCase extends SpecBaseIntegrationTestCase { + private final CsvTestCase testCase; + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/ogc/ogc.csv-spec", parser)); + tests.addAll(readScriptSpec("/geo/geosql.csv-spec", parser)); + tests.addAll(readScriptSpec("/docs/geo.csv-spec", parser)); + return tests; + } + + public GeoCsvSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + + @Before + public void setupTestGeoDataIfNeeded() throws Exception { + if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); + } + if (client().performRequest(new Request("HEAD", "/geo")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadGeoDatasetIntoEs(client(), "geo"); + } + } + + @Override + protected final void doTest() throws Throwable { + try (Connection csv = csvConnection(testCase); + Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = executeCsvQuery(csv, testName); + ResultSet elasticResults = executeJdbcQuery(es, testCase.query); + assertResults(expected, elasticResults); + } + } + + // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } + +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java new file mode 100644 index 0000000000000..40e8f64be87cc --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.geo; + +import org.apache.http.HttpHost; +import org.apache.http.HttpStatus; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +import static org.elasticsearch.xpack.sql.qa.jdbc.DataLoader.createString; +import static org.elasticsearch.xpack.sql.qa.jdbc.DataLoader.readFromJarUrl; + +public class GeoDataLoader { + + public static void main(String[] args) throws Exception { + try (RestClient client = RestClient.builder(new HttpHost("localhost", 9200)).build()) { + loadOGCDatasetIntoEs(client, "ogc"); + loadGeoDatasetIntoEs(client, "geo"); + Loggers.getLogger(GeoDataLoader.class).info("Geo data loaded"); + } + } + + protected static void loadOGCDatasetIntoEs(RestClient client, String index) throws Exception { + createIndex(client, index, createOGCIndexRequest()); + loadData(client, index, readResource("/ogc/ogc.json")); + makeFilteredAlias(client, "lakes", index, "\"term\" : { \"ogc_type\" : \"lakes\" }"); + makeFilteredAlias(client, "road_segments", index, "\"term\" : { \"ogc_type\" : \"road_segments\" }"); + makeFilteredAlias(client, "divided_routes", index, "\"term\" : { \"ogc_type\" : \"divided_routes\" }"); + makeFilteredAlias(client, "forests", index, "\"term\" : { \"ogc_type\" : \"forests\" }"); + makeFilteredAlias(client, "bridges", index, "\"term\" : { \"ogc_type\" : \"bridges\" }"); + makeFilteredAlias(client, "streams", index, "\"term\" : { \"ogc_type\" : \"streams\" }"); + makeFilteredAlias(client, "buildings", index, "\"term\" : { \"ogc_type\" : \"buildings\" }"); + makeFilteredAlias(client, "ponds", index, "\"term\" : { \"ogc_type\" : \"ponds\" }"); + makeFilteredAlias(client, "named_places", index, "\"term\" : { \"ogc_type\" : \"named_places\" }"); + makeFilteredAlias(client, "map_neatlines", index, "\"term\" : { \"ogc_type\" : \"map_neatlines\" }"); + } + + private static String createOGCIndexRequest() throws Exception { + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("properties"); + { + // Common + createIndex.startObject("ogc_type").field("type", "keyword").endObject(); + createIndex.startObject("fid").field("type", "integer").endObject(); + createString("name", createIndex); + + // Type specific + createIndex.startObject("shore").field("type", "geo_shape").endObject(); // lakes + + createString("aliases", createIndex); // road_segments + createIndex.startObject("num_lanes").field("type", "integer").endObject(); // road_segments, divided_routes + createIndex.startObject("centerline").field("type", "geo_shape").endObject(); // road_segments, streams + + createIndex.startObject("centerlines").field("type", "geo_shape").endObject(); // divided_routes + + createIndex.startObject("boundary").field("type", "geo_shape").endObject(); // forests, named_places + + createIndex.startObject("position").field("type", "geo_shape").endObject(); // bridges, buildings + + createString("address", createIndex); // buildings + createIndex.startObject("footprint").field("type", "geo_shape").endObject(); // buildings + + createIndex.startObject("type").field("type", "keyword").endObject(); // ponds + createIndex.startObject("shores").field("type", "geo_shape").endObject(); // ponds + + createIndex.startObject("neatline").field("type", "geo_shape").endObject(); // map_neatlines + + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + return Strings.toString(createIndex); + } + + private static void createIndex(RestClient client, String index, String settingsMappings) throws IOException { + Request createIndexRequest = new Request("PUT", "/" + index); + createIndexRequest.setEntity(new StringEntity(settingsMappings, ContentType.APPLICATION_JSON)); + client.performRequest(createIndexRequest); + } + + static void loadGeoDatasetIntoEs(RestClient client, String index) throws Exception { + createIndex(client, index, readResource("/geo/geosql.json")); + loadData(client, index, readResource("/geo/geosql-bulk.json")); + } + + private static void loadData(RestClient client, String index, String bulk) throws IOException { + Request request = new Request("POST", "/" + index + "/_bulk"); + request.addParameter("refresh", "true"); + request.setJsonEntity(bulk); + Response response = client.performRequest(request); + + if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { + throw new RuntimeException("Cannot load data " + response.getStatusLine()); + } + + String bulkResponseStr = EntityUtils.toString(response.getEntity()); + Map bulkResponseMap = XContentHelper.convertToMap(JsonXContent.jsonXContent, bulkResponseStr, false); + + if ((boolean) bulkResponseMap.get("errors")) { + throw new RuntimeException("Failed to load bulk data " + bulkResponseStr); + } + } + + + public static void makeFilteredAlias(RestClient client, String aliasName, String index, String filter) throws Exception { + Request request = new Request("POST", "/" + index + "/_alias/" + aliasName); + request.setJsonEntity("{\"filter\" : { " + filter + " } }"); + client.performRequest(request); + } + + private static String readResource(String location) throws IOException { + URL dataSet = SqlSpecTestCase.class.getResource(location); + if (dataSet == null) { + throw new IllegalArgumentException("Can't find [" + location + "]"); + } + StringBuilder builder = new StringBuilder(); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(dataSet), StandardCharsets.UTF_8))) { + String line = reader.readLine(); + while(line != null) { + if (line.trim().startsWith("//") == false) { + builder.append(line); + builder.append('\n'); + } + line = reader.readLine(); + } + return builder.toString(); + } + } + +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java new file mode 100644 index 0000000000000..ec97cab6f10b1 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.qa.geo; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.Request; +import org.elasticsearch.xpack.sql.qa.jdbc.LocalH2; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.jdbc.JdbcConfiguration; +import org.h2gis.functions.factory.H2GISFunctions; +import org.junit.Before; +import org.junit.ClassRule; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Properties; + +/** + * Tests comparing geo sql queries executed against our jdbc client + * with those executed against H2GIS's jdbc client. + */ +public abstract class GeoSqlSpecTestCase extends SpecBaseIntegrationTestCase { + private String query; + + @ClassRule + public static LocalH2 H2 = new LocalH2((c) -> { + assumeTrue("JTS inside H2 is using default local for toUpperCase() in string comparison making it fail to parse WKT on certain" + + " locales", "point".toUpperCase(Locale.getDefault()).equals("POINT")); + // Load GIS extensions + H2GISFunctions.load(c); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/ogc/sqltsch.sql'"); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/geo/setup_test_geo.sql'"); + }); + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = new SqlSpecParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/ogc/ogc.sql-spec", parser)); + tests.addAll(readScriptSpec("/geo/geosql.sql-spec", parser)); + return tests; + } + + @Before + public void setupTestGeoDataIfNeeded() throws Exception { + assumeTrue("Cannot support locales that don't use Hindu-Arabic numerals and non-ascii - sign due to H2", + "-42".equals(NumberFormat.getInstance(Locale.getDefault()).format(-42))); + if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); + } + if (client().performRequest(new Request("HEAD", "/geo")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadGeoDatasetIntoEs(client(), "geo"); + } + } + + + private static class SqlSpecParser implements Parser { + @Override + public Object parse(String line) { + return line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + } + } + + public GeoSqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber); + this.query = query; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection h2 = H2.get(); + Connection es = esJdbc()) { + + ResultSet expected, elasticResults; + expected = executeJdbcQuery(h2, query); + elasticResults = executeJdbcQuery(es, query); + + assertResults(expected, elasticResults); + } + } + + // TODO: use UTC for now until deciding on a strategy for handling date extraction + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java index 6376bd13308d6..daa4e5b4d0c87 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java @@ -46,7 +46,7 @@ private CsvTestUtils() { */ public static ResultSet executeCsvQuery(Connection csv, String csvTableName) throws SQLException { ResultSet expected = csv.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY) - .executeQuery("SELECT * FROM " + csvTableName); + .executeQuery("SELECT * FROM " + csvTableName); // trigger data loading for type inference expected.beforeFirst(); return expected; @@ -187,13 +187,13 @@ public Object parse(String line) { } else { if (line.endsWith(";")) { - // pick up the query - testCase = new CsvTestCase(); - query.append(line.substring(0, line.length() - 1).trim()); - testCase.query = query.toString(); - testCase.earlySchema = earlySchema.toString(); - earlySchema.setLength(0); - query.setLength(0); + // pick up the query + testCase = new CsvTestCase(); + query.append(line.substring(0, line.length() - 1).trim()); + testCase.query = query.toString(); + testCase.earlySchema = earlySchema.toString(); + earlySchema.setLength(0); + query.setLength(0); } // keep reading the query else { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index 774a406da863c..ff50a33a0afe8 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -63,7 +63,7 @@ public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { freeze(client, "archive"); } - private static void createString(String name, XContentBuilder builder) throws Exception { + public static void createString(String name, XContentBuilder builder) throws Exception { builder.startObject(name).field("type", "text") .startObject("fields") .startObject("keyword").field("type", "keyword").endObject() @@ -292,7 +292,7 @@ protected static void loadLibDatasetIntoEs(RestClient client, String index) thro Response response = client.performRequest(request); } - protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { + public static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { for (String index : indices) { client.performRequest(new Request("POST", "/" + index + "/_alias/" + aliasName)); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 8931fe0264e9d..76894fc5a53d5 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -8,18 +8,25 @@ import com.carrotsearch.hppc.IntObjectHashMap; import org.apache.logging.log4j.Logger; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.jdbc.EsType; import org.elasticsearch.xpack.sql.proto.StringUtils; import org.relique.jdbc.csv.CsvResultSet; +import java.io.IOException; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Types; +import java.text.ParseException; import java.time.temporal.TemporalAmount; import java.util.ArrayList; +import java.util.Calendar; import java.util.List; import java.util.Locale; +import java.util.TimeZone; import static java.lang.String.format; import static java.sql.Types.BIGINT; @@ -29,6 +36,8 @@ import static java.sql.Types.REAL; import static java.sql.Types.SMALLINT; import static java.sql.Types.TINYINT; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -38,6 +47,7 @@ * Utility class for doing JUnit-style asserts over JDBC. */ public class JdbcAssert { + private static final Calendar UTC_CALENDAR = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT); private static final IntObjectHashMap SQL_TO_TYPE = new IntObjectHashMap<>(); @@ -139,6 +149,11 @@ public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, expectedType = Types.TIMESTAMP; } + // H2 treats GEOMETRY as OTHER + if (expectedType == Types.OTHER && nameOf(actualType).startsWith("GEO_") ) { + actualType = Types.OTHER; + } + // since csv doesn't support real, we use float instead..... if (expectedType == Types.FLOAT && expected instanceof CsvResultSet) { expectedType = Types.REAL; @@ -251,6 +266,24 @@ else if (type == Types.DOUBLE) { assertEquals(msg, (double) expectedObject, (double) actualObject, lenientFloatingNumbers ? 1d : 0.0d); } else if (type == Types.FLOAT) { assertEquals(msg, (float) expectedObject, (float) actualObject, lenientFloatingNumbers ? 1f : 0.0f); + } else if (type == Types.OTHER) { + if (actualObject instanceof Geometry) { + // We need to convert the expected object to libs/geo Geometry for comparision + try { + expectedObject = WellKnownText.fromWKT(expectedObject.toString()); + } catch (IOException | ParseException ex) { + fail(ex.getMessage()); + } + } + if (actualObject instanceof Point) { + // geo points are loaded form doc values where they are stored as long-encoded values leading + // to lose in precision + assertThat(expectedObject, instanceOf(Point.class)); + assertEquals(((Point) expectedObject).getLat(), ((Point) actualObject).getLat(), 0.000001d); + assertEquals(((Point) expectedObject).getLon(), ((Point) actualObject).getLon(), 0.000001d); + } else { + assertEquals(msg, expectedObject, actualObject); + } } // intervals else if (type == Types.VARCHAR && actualObject instanceof TemporalAmount) { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java index e6295985cf519..2f3ce7eaddd88 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java @@ -81,4 +81,4 @@ protected void after() { public Connection get() throws SQLException { return DriverManager.getConnection(url, DEFAULTS); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec index 9f63de97c9928..073788511d0f0 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec @@ -129,11 +129,20 @@ RIGHT |SCALAR RTRIM |SCALAR SPACE |SCALAR SUBSTRING |SCALAR -UCASE |SCALAR +UCASE |SCALAR CAST |SCALAR CONVERT |SCALAR DATABASE |SCALAR USER |SCALAR +ST_ASTEXT |SCALAR +ST_ASWKT |SCALAR +ST_DISTANCE |SCALAR +ST_GEOMETRYTYPE |SCALAR +ST_GEOMFROMTEXT |SCALAR +ST_WKTTOSQL |SCALAR +ST_X |SCALAR +ST_Y |SCALAR +ST_Z |SCALAR SCORE |SCORE ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec index 8d9a65d1b85b6..bfb28775bc3b6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec @@ -182,6 +182,26 @@ SELECT -2 * INTERVAL '1 23:45' DAY TO MINUTES AS result; -3 23:30:00.0 ; +intervalHoursMultiply +SELECT 4 * -INTERVAL '2' HOURS AS result1, -5 * -INTERVAL '3' HOURS AS result2; + result1 | result2 +---------------+-------------- +-0 08:00:00.0 | +0 15:00:00.0 +; + +intervalAndFieldMultiply +schema::languages:byte|result:string +SELECT languages, CAST (languages * INTERVAL '1 10:30' DAY TO MINUTES AS string) AS result FROM test_emp ORDER BY emp_no LIMIT 5; + + languages | result +---------------+--------------------------------------------- +2 | +2 21:00:00.0 +5 | +7 04:30:00.0 +4 | +5 18:00:00.0 +5 | +7 04:30:00.0 +1 | +1 10:30:00.0 +; + dateMinusInterval SELECT CAST('2018-05-13T12:34:56' AS DATETIME) - INTERVAL '2-8' YEAR TO MONTH AS result; diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index c2432007bff35..936c7eef88191 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -201,7 +201,7 @@ showFunctions // tag::showFunctions SHOW FUNCTIONS; - name | type + name | type -----------------+--------------- AVG |AGGREGATE COUNT |AGGREGATE @@ -325,13 +325,21 @@ RIGHT |SCALAR RTRIM |SCALAR SPACE |SCALAR SUBSTRING |SCALAR -UCASE |SCALAR +UCASE |SCALAR CAST |SCALAR CONVERT |SCALAR DATABASE |SCALAR USER |SCALAR +ST_ASTEXT |SCALAR +ST_ASWKT |SCALAR +ST_DISTANCE |SCALAR +ST_GEOMETRYTYPE |SCALAR +ST_GEOMFROMTEXT |SCALAR +ST_WKTTOSQL |SCALAR +ST_X |SCALAR +ST_Y |SCALAR +ST_Z |SCALAR SCORE |SCORE - // end::showFunctions ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec new file mode 100644 index 0000000000000..60fbebfc13950 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec @@ -0,0 +1,79 @@ +// +// CSV spec used by the geo docs +// + +/////////////////////////////// +// +// ST_AsWKT() +// +/////////////////////////////// + +selectAsWKT +// tag::aswkt +SELECT city, ST_AsWKT(location) location FROM "geo" WHERE city = 'Amsterdam'; + + city:s | location:s +Amsterdam |point (4.850311987102032 52.347556999884546) +// end::aswkt +; + +selectWKTToSQL +// tag::wkttosql +SELECT CAST(ST_WKTToSQL('POINT (10 20)') AS STRING) location; + + location:s +point (10.0 20.0) +// end::wkttosql +; + + +selectDistance +// tag::distance +SELECT ST_Distance(ST_WKTToSQL('POINT (10 20)'), ST_WKTToSQL('POINT (20 30)')) distance; + + distance:d +1499101.2889383635 +// end::distance +; + +/////////////////////////////// +// +// Geometry Properties +// +/////////////////////////////// + +selectGeometryType +// tag::geometrytype +SELECT ST_GeometryType(ST_WKTToSQL('POINT (10 20)')) type; + + type:s +POINT +// end::geometrytype +; + +selectX +// tag::x +SELECT ST_X(ST_WKTToSQL('POINT (10 20)')) x; + + x:d +10.0 +// end::x +; + +selectY +// tag::y +SELECT ST_Y(ST_WKTToSQL('POINT (10 20)')) y; + + y:d +20.0 +// end::y +; + +selectZ +// tag::z +SELECT ST_Z(ST_WKTToSQL('POINT (10 20 30)')) z; + + z:d +30.0 +// end::z +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv new file mode 100644 index 0000000000000..8275bd7c884ef --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv @@ -0,0 +1,16 @@ +city,region,region_point,location,shape +Mountain View,Americas,POINT(-105.2551 54.5260),point (-122.083843 37.386483),point (-122.083843 37.386483) +Chicago,Americas,POINT(-105.2551 54.5260),point (-87.637874 41.888783),point (-87.637874 41.888783) +New York,Americas,POINT(-105.2551 54.5260),point (-73.990027 40.745171),point (-73.990027 40.745171) +San Francisco,Americas,POINT(-105.2551 54.5260),point (-122.394228 37.789541),point (-122.394228 37.789541) +Phoenix,Americas,POINT(-105.2551 54.5260),point (-111.973505 33.376242),point (-111.973505 33.376242) +Amsterdam,Europe,POINT(15.2551 54.5260),point (4.850312 52.347557),point (4.850312 52.347557) +Berlin,Europe,POINT(15.2551 54.5260),point (13.390889 52.486701),point (13.390889 52.486701) +Munich,Europe,POINT(15.2551 54.5260),point (11.537505 48.146321),point (11.537505 48.146321) +London,Europe,POINT(15.2551 54.5260),point (-0.121672 51.510871),point (-0.121672 51.510871) +Paris,Europe,POINT(15.2551 54.5260),point (2.351773 48.845538),point (2.351773 48.845538) +Singapore,Asia,POINT(100.6197 34.0479),point (103.855535 1.295868),point (103.855535 1.295868) +Hong Kong,Asia,POINT(100.6197 34.0479),point (114.183925 22.281397),point (114.183925 22.281397) +Seoul,Asia,POINT(100.6197 34.0479),point (127.060851 37.509132),point (127.060851 37.509132) +Tokyo,Asia,POINT(100.6197 34.0479),point (139.76402225 35.669616),point (139.76402225 35.669616) +Sydney,Asia,POINT(100.6197 34.0479),point (151.208629 -33.863385),point (151.208629 -33.863385) diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json new file mode 100644 index 0000000000000..8c65742aac063 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json @@ -0,0 +1,33 @@ +{"index":{"_id": "1"}} +{"region": "Americas", "city": "Mountain View", "location": {"lat":"37.386483", "lon":"-122.083843"}, "location_no_dv": {"lat":"37.386483", "lon":"-122.083843"}, "shape": "POINT (-122.083843 37.386483 30)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "2"}} +{"region": "Americas", "city": "Chicago", "location": [-87.637874, 41.888783], "location_no_dv": [-87.637874, 41.888783], "shape": {"type" : "point", "coordinates" : [-87.637874, 41.888783, 181]}, "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "3"}} +{"region": "Americas", "city": "New York", "location": "40.745171,-73.990027", "location_no_dv": "40.745171,-73.990027", "shape": "POINT (-73.990027 40.745171 10)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "4"}} +{"region": "Americas", "city": "San Francisco", "location": "37.789541,-122.394228", "location_no_dv": "37.789541,-122.394228", "shape": "POINT (-122.394228 37.789541 16)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "5"}} +{"region": "Americas", "city": "Phoenix", "location": "33.376242,-111.973505", "location_no_dv": "33.376242,-111.973505", "shape": "POINT (-111.973505 33.376242 331)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "6"}} +{"region": "Europe", "city": "Amsterdam", "location": "52.347557,4.850312", "location_no_dv": "52.347557,4.850312", "shape": "POINT (4.850312 52.347557 2)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "7"}} +{"region": "Europe", "city": "Berlin", "location": "52.486701,13.390889", "location_no_dv": "52.486701,13.390889", "shape": "POINT (13.390889 52.486701 34)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "8"}} +{"region": "Europe", "city": "Munich", "location": "48.146321,11.537505", "location_no_dv": "48.146321,11.537505", "shape": "POINT (11.537505 48.146321 519)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "9"}} +{"region": "Europe", "city": "London", "location": "51.510871,-0.121672", "location_no_dv": "51.510871,-0.121672", "shape": "POINT (-0.121672 51.510871 11)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "10"}} +{"region": "Europe", "city": "Paris", "location": "48.845538,2.351773", "location_no_dv": "48.845538,2.351773", "shape": "POINT (2.351773 48.845538 35)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "11"}} +{"region": "Asia", "city": "Singapore", "location": "1.295868,103.855535", "location_no_dv": "1.295868,103.855535", "shape": "POINT (103.855535 1.295868 15)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "12"}} +{"region": "Asia", "city": "Hong Kong", "location": "22.281397,114.183925", "location_no_dv": "22.281397,114.183925", "shape": "POINT (114.183925 22.281397 552)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "13"}} +{"region": "Asia", "city": "Seoul", "location": "37.509132,127.060851", "location_no_dv": "37.509132,127.060851", "shape": "POINT (127.060851 37.509132 38)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "14"}} +{"region": "Asia", "city": "Tokyo", "location": "35.669616,139.76402225", "location_no_dv": "35.669616,139.76402225", "shape": "POINT (139.76402225 35.669616 40)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "15"}} +{"region": "Asia", "city": "Sydney", "location": "-33.863385,151.208629", "location_no_dv": "-33.863385,151.208629", "shape": "POINT (151.208629 -33.863385 100)", "region_point": "POINT(100.6197 34.0479)"} + + + diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec new file mode 100644 index 0000000000000..31f3857216c0b --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec @@ -0,0 +1,288 @@ +// +// Commands on geo test data +// + +showTables +SHOW TABLES "geo"; + + name:s | type:s | kind:s +geo |BASE TABLE |INDEX +; + +// DESCRIBE + +describe +DESCRIBE "geo"; + + column:s | type:s | mapping:s +city | VARCHAR | keyword +location | GEOMETRY | geo_point +location_no_dv | GEOMETRY | geo_point +region | VARCHAR | keyword +region_point | VARCHAR | keyword +shape | GEOMETRY | geo_shape +; + +// SELECT ALL +// TODO: For now we just get geopoint formatted as is and we also need to convert it to STRING to work with CSV + +selectAllPointsAsStrings +SELECT city, CAST(location AS STRING) location, CAST(location_no_dv AS STRING) location_no_dv, CAST(shape AS STRING) shape, region FROM "geo" ORDER BY "city"; + + city:s | location:s | location_no_dv:s | shape:s | region:s +Amsterdam |point (4.850311987102032 52.347556999884546) |point (4.850312 52.347557) |point (4.850312 52.347557 2.0) |Europe +Berlin |point (13.390888944268227 52.48670099303126) |point (13.390889 52.486701) |point (13.390889 52.486701 34.0) |Europe +Chicago |point (-87.63787407428026 41.888782968744636) |point (-87.637874 41.888783) |point (-87.637874 41.888783 181.0) |Americas +Hong Kong |point (114.18392493389547 22.28139698971063) |point (114.183925 22.281397) |point (114.183925 22.281397 552.0) |Asia +London |point (-0.12167204171419144 51.51087098289281)|point (-0.121672 51.510871) |point (-0.121672 51.510871 11.0) |Europe +Mountain View |point (-122.08384302444756 37.38648299127817) |point (-122.083843 37.386483) |point (-122.083843 37.386483 30.0) |Americas +Munich |point (11.537504978477955 48.14632098656148) |point (11.537505 48.146321) |point (11.537505 48.146321 519.0) |Europe +New York |point (-73.9900270756334 40.74517097789794) |point (-73.990027 40.745171) |point (-73.990027 40.745171 10.0) |Americas +Paris |point (2.3517729341983795 48.84553796611726) |point (2.351773 48.845538) |point (2.351773 48.845538 35.0) |Europe +Phoenix |point (-111.97350500151515 33.37624196894467) |point (-111.973505 33.376242) |point (-111.973505 33.376242 331.0) |Americas +San Francisco |point (-122.39422800019383 37.789540970698) |point (-122.394228 37.789541) |point (-122.394228 37.789541 16.0) |Americas +Seoul |point (127.06085099838674 37.50913198571652) |point (127.060851 37.509132) |point (127.060851 37.509132 38.0) |Asia +Singapore |point (103.8555349688977 1.2958679627627134) |point (103.855535 1.295868) |point (103.855535 1.295868 15.0) |Asia +Sydney |point (151.20862897485495 -33.863385021686554)|point (151.208629 -33.863385) |point (151.208629 -33.863385 100.0) |Asia +Tokyo |point (139.76402222178876 35.66961596254259) |point (139.76402225 35.669616)|point (139.76402225 35.669616 40.0) |Asia +; + +// TODO: Both shape and location contain the same data for now, we should change it later to make things more interesting +selectAllPointsAsWKT +SELECT city, ST_ASWKT(location) location_wkt, ST_ASWKT(shape) shape_wkt, region FROM "geo" ORDER BY "city"; + + city:s | location_wkt:s | shape_wkt:s | region:s +Amsterdam |point (4.850311987102032 52.347556999884546) |point (4.850312 52.347557 2.0) |Europe +Berlin |point (13.390888944268227 52.48670099303126) |point (13.390889 52.486701 34.0) |Europe +Chicago |point (-87.63787407428026 41.888782968744636) |point (-87.637874 41.888783 181.0) |Americas +Hong Kong |point (114.18392493389547 22.28139698971063) |point (114.183925 22.281397 552.0) |Asia +London |point (-0.12167204171419144 51.51087098289281)|point (-0.121672 51.510871 11.0) |Europe +Mountain View |point (-122.08384302444756 37.38648299127817) |point (-122.083843 37.386483 30.0) |Americas +Munich |point (11.537504978477955 48.14632098656148) |point (11.537505 48.146321 519.0) |Europe +New York |point (-73.9900270756334 40.74517097789794) |point (-73.990027 40.745171 10.0) |Americas +Paris |point (2.3517729341983795 48.84553796611726) |point (2.351773 48.845538 35.0) |Europe +Phoenix |point (-111.97350500151515 33.37624196894467) |point (-111.973505 33.376242 331.0) |Americas +San Francisco |point (-122.39422800019383 37.789540970698) |point (-122.394228 37.789541 16.0) |Americas +Seoul |point (127.06085099838674 37.50913198571652) |point (127.060851 37.509132 38.0) |Asia +Singapore |point (103.8555349688977 1.2958679627627134) |point (103.855535 1.295868 15.0) |Asia +Sydney |point (151.20862897485495 -33.863385021686554)|point (151.208629 -33.863385 100.0) |Asia +Tokyo |point (139.76402222178876 35.66961596254259) |point (139.76402225 35.669616 40.0) |Asia +; + +selectWithAsWKTInWhere +SELECT city, ST_ASWKT(location) location_wkt, region FROM "geo" WHERE LOCATE('114', ST_ASWKT(location)) > 0 ORDER BY "city"; + + city:s | location_wkt:s | region:s +Hong Kong |point (114.18392493389547 22.28139698971063)|Asia +; + +selectAllPointsOrderByLonFromAsWKT +SELECT city, SUBSTRING(ST_ASWKT(location), 8, LOCATE(' ', ST_ASWKT(location), 8) - 8) lon FROM "geo" ORDER BY lon; + + city:s | lon:s +London |-0.12167204171419144 +Phoenix |-111.97350500151515 +Mountain View |-122.08384302444756 +San Francisco |-122.39422800019383 +New York |-73.9900270756334 +Chicago |-87.63787407428026 +Singapore |103.8555349688977 +Munich |11.537504978477955 +Hong Kong |114.18392493389547 +Seoul |127.06085099838674 +Berlin |13.390888944268227 +Tokyo |139.76402222178876 +Sydney |151.20862897485495 +Paris |2.3517729341983795 +Amsterdam |4.850311987102032 +; + +selectAllPointsGroupByHemisphereFromAsWKT +SELECT COUNT(city) count, CAST(SUBSTRING(ST_ASWKT(location), 8, 1) = '-' AS STRING) west FROM "geo" GROUP BY west ORDER BY west; + + count:l | west:s +9 |false +6 |true +; + +selectRegionUsingWktToSql +SELECT region, city, ST_ASWKT(ST_WKTTOSQL(region_point)) region_wkt FROM geo ORDER BY region, city; + + region:s | city:s | region_wkt:s +Americas |Chicago |point (-105.2551 54.526) +Americas |Mountain View |point (-105.2551 54.526) +Americas |New York |point (-105.2551 54.526) +Americas |Phoenix |point (-105.2551 54.526) +Americas |San Francisco |point (-105.2551 54.526) +Asia |Hong Kong |point (100.6197 34.0479) +Asia |Seoul |point (100.6197 34.0479) +Asia |Singapore |point (100.6197 34.0479) +Asia |Sydney |point (100.6197 34.0479) +Asia |Tokyo |point (100.6197 34.0479) +Europe |Amsterdam |point (15.2551 54.526) +Europe |Berlin |point (15.2551 54.526) +Europe |London |point (15.2551 54.526) +Europe |Munich |point (15.2551 54.526) +Europe |Paris |point (15.2551 54.526) +; + +selectCitiesWithAGroupByWktToSql +SELECT COUNT(city) city_by_region, CAST(ST_WKTTOSQL(region_point) AS STRING) region FROM geo WHERE city LIKE '%a%' GROUP BY ST_WKTTOSQL(region_point) ORDER BY ST_WKTTOSQL(region_point); + + city_by_region:l | region:s +3 |point (-105.2551 54.526) +1 |point (100.6197 34.0479) +2 |point (15.2551 54.526) +; + +selectCitiesWithEOrderByWktToSql +SELECT region, city FROM geo WHERE city LIKE '%e%' ORDER BY ST_WKTTOSQL(region_point), city; + + region:s | city:s +Americas |Mountain View +Americas |New York +Americas |Phoenix +Asia |Seoul +Asia |Singapore +Asia |Sydney +Europe |Amsterdam +Europe |Berlin +; + + +selectCitiesByDistance +SELECT region, city, ST_Distance(location, ST_WktToSQL('POINT (-71 42)')) distance FROM geo WHERE distance < 5000000 ORDER BY region, city; + + region:s | city:s | distance:d +Americas |Chicago |1373941.5140200066 +Americas |Mountain View |4335936.909375596 +Americas |New York |285839.6579622518 +Americas |Phoenix |3692895.0346903414 +Americas |San Francisco |4343565.010996301 +; + +selectCitiesByDistanceFloored +SELECT region, city, FLOOR(ST_Distance(location, ST_WktToSQL('POINT (-71 42)'))) distance FROM geo WHERE distance < 5000000 ORDER BY region, city; + + region:s | city:s | distance:l +Americas |Chicago |1373941 +Americas |Mountain View |4335936 +Americas |New York |285839 +Americas |Phoenix |3692895 +Americas |San Francisco |4343565 +; + +selectCitiesOrderByDistance +SELECT region, city FROM geo ORDER BY ST_Distance(location, ST_WktToSQL('POINT (-71 42)')) ; + + region:s | city:s +Americas |New York +Americas |Chicago +Americas |Phoenix +Americas |Mountain View +Americas |San Francisco +Europe |London +Europe |Paris +Europe |Amsterdam +Europe |Berlin +Europe |Munich +Asia |Tokyo +Asia |Seoul +Asia |Hong Kong +Asia |Singapore +Asia |Sydney +; + +groupCitiesByDistance +SELECT COUNT(*) count, FIRST(region) region FROM geo GROUP BY FLOOR(ST_Distance(location, ST_WktToSQL('POINT (-71 42)'))/5000000); + + count:l | region:s +5 |Americas +5 |Europe +3 |Asia +2 |Asia +; + +selectWktToSqlOfNull +SELECT ST_ASWKT(ST_WktToSql(NULL)) shape; + shape:s +null +; + +selectWktToSqlOfNull +SELECT ST_Distance(ST_WktToSql(NULL), ST_WktToSQL('POINT (-71 42)')) shape; + shape:d +null +; + +groupByGeometryType +SELECT COUNT(*) cnt, ST_GeometryType(location) gt FROM geo GROUP BY ST_GeometryType(location); + + cnt:l | gt:s +15 |POINT +; + + +groupAndOrderByGeometryType +SELECT COUNT(*) cnt, ST_GeometryType(location) gt FROM geo GROUP BY gt ORDER BY gt; + + cnt:l | gt:s +15 |POINT +; + +groupByEastWest +SELECT COUNT(*) cnt, FLOOR(ST_X(location)/90) east FROM geo GROUP BY east ORDER BY east; + + cnt:l | east:l +3 |-2 +3 |-1 +4 |0 +5 |1 +; + +groupByNorthSouth +SELECT COUNT(*) cnt, FLOOR(ST_Y(location)/45) north FROM geo GROUP BY north ORDER BY north; + + cnt:l | north:l +1 |-1 +9 |0 +5 |1 +; + +groupByNorthEastSortByEastNorth +SELECT COUNT(*) cnt, FLOOR(ST_Y(location)/45) north, FLOOR(ST_X(location)/90) east FROM geo GROUP BY north, east ORDER BY east, north; + + cnt:l | north:l | east:l +3 |0 |-2 +2 |0 |-1 +1 |1 |-1 +4 |1 |0 +1 |-1 |1 +4 |0 |1 +; + +selectFilterByXOfLocation +SELECT city, ST_X(shape) x, ST_Y(shape) y, ST_Z(shape) z, ST_X(location) lx, ST_Y(location) ly FROM geo WHERE lx > 0 ORDER BY ly; + + city:s | x:d | y:d | z:d | lx:d | ly:d +Sydney |151.208629 |-33.863385 |100.0 |151.20862897485495|-33.863385021686554 +Singapore |103.855535 |1.295868 |15.0 |103.8555349688977 |1.2958679627627134 +Hong Kong |114.183925 |22.281397 |552.0 |114.18392493389547|22.28139698971063 +Tokyo |139.76402225 |35.669616 |40.0 |139.76402222178876|35.66961596254259 +Seoul |127.060851 |37.509132 |38.0 |127.06085099838674|37.50913198571652 +Munich |11.537505 |48.146321 |519.0 |11.537504978477955|48.14632098656148 +Paris |2.351773 |48.845538 |35.0 |2.3517729341983795|48.84553796611726 +Amsterdam |4.850312 |52.347557 |2.0 |4.850311987102032 |52.347556999884546 +Berlin |13.390889 |52.486701 |34.0 |13.390888944268227|52.48670099303126 +; + +selectFilterByRegionPoint +SELECT city, region, ST_X(location) x FROM geo WHERE ST_X(ST_WKTTOSQL(region_point)) < 0 ORDER BY x; + + city:s | region:s | x:d +San Francisco |Americas |-122.39422800019383 +Mountain View |Americas |-122.08384302444756 +Phoenix |Americas |-111.97350500151515 +Chicago |Americas |-87.63787407428026 +New York |Americas |-73.9900270756334 +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json new file mode 100644 index 0000000000000..56007a0284c43 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json @@ -0,0 +1,28 @@ +{ + "settings": { + "number_of_shards": 1 + }, + "mappings": { + "properties": { + "region": { + "type": "keyword" + }, + "city": { + "type": "keyword" + }, + "location": { + "type": "geo_point" + }, + "location_no_dv": { + "type": "geo_point", + "doc_values": "false" + }, + "shape": { + "type": "geo_shape" + }, + "region_point": { + "type": "keyword" + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec new file mode 100644 index 0000000000000..e801d8477f6bf --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec @@ -0,0 +1,24 @@ +// +// Commands on geo test data +// + +selectAllShapesAsGeometries +SELECT city, shape, region FROM "geo" ORDER BY "city"; + +selectAllShapesAsWKT +SELECT city, ST_GEOMFROMTEXT(ST_ASWKT(shape)) shape_wkt, region FROM "geo" ORDER BY "city"; + +selectAllPointsAsGeometries +SELECT city, location, region FROM "geo" ORDER BY "city"; + +selectAllPointsAsWKT +SELECT city, ST_GEOMFROMTEXT(ST_ASWKT(location)) shape_wkt, region FROM "geo" ORDER BY "city"; + +selectRegionUsingWktToSqlWithoutConvertion +SELECT region, city, shape, ST_GEOMFROMTEXT(region_point) region_wkt FROM geo ORDER BY region, city; + +selectCitiesWithGroupByWktToSql +SELECT COUNT(city) city_by_region, ST_GEOMFROMTEXT(region_point) region_geom FROM geo WHERE city LIKE '%a%' GROUP BY region_geom ORDER BY city_by_region; + +selectCitiesWithOrderByWktToSql +SELECT region, city, UCASE(ST_ASWKT(ST_GEOMFROMTEXT(region_point))) region_wkt FROM geo WHERE city LIKE '%e%' ORDER BY region_wkt, city; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql b/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql new file mode 100644 index 0000000000000..b8b8d4e36f453 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS "geo"; +CREATE TABLE "geo" ( + "city" VARCHAR(50), + "region" VARCHAR(50), + "region_point" VARCHAR(50), + "location" POINT, + "shape" GEOMETRY +) + AS SELECT * FROM CSVREAD('classpath:/geo/geo.csv'); diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt b/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt new file mode 100644 index 0000000000000..ac061f5cc4493 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt @@ -0,0 +1,41 @@ +Software Notice + +This OGC work (including software, documents, or other related items) is being +provided by the copyright holders under the following license. By obtaining, +using and/or copying this work, you (the licensee) agree that you have read, +understood, and will comply with the following terms and conditions: + +Permission to use, copy, and modify this software and its documentation, with +or without modification, for any purpose and without fee or royalty is hereby +granted, provided that you include the following on ALL copies of the software +and documentation or portions thereof, including modifications, that you make: + +1. The full text of this NOTICE in a location viewable to users of the +redistributed or derivative work. + +2. Any pre-existing intellectual property disclaimers, notices, or terms and +conditions. If none exist, a short notice of the following form (hypertext is +preferred, text is permitted) should be used within the body of any +redistributed or derivative code: "Copyright © [$date-of-document] Open +Geospatial Consortium, Inc. All Rights Reserved. +http://www.opengeospatial.org/ogc/legal (Hypertext is preferred, but a textual +representation is permitted.) + +3. Notice of any changes or modifications to the OGC files, including the date +changes were made. (We recommend you provide URIs to the location from which +the code is derived.) + + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE +NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT +THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY THIRD PARTY +ATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. + +COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR DOCUMENTATION. + +The name and trademarks of copyright holders may NOT be used in advertising or +publicity pertaining to the software without specific, written prior permission. +Title to copyright in this software and any associated documentation will at all +times remain with copyright holders. \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec new file mode 100644 index 0000000000000..f1941161697d2 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec @@ -0,0 +1,36 @@ +// +// Commands on OGC data +// + +showTables +SHOW TABLES "ogc"; + + name:s | type:s | kind:s +ogc |BASE TABLE |INDEX +; + +// DESCRIBE + +describe +DESCRIBE "ogc"; + + column:s | type:s | mapping:s +address | VARCHAR | text +address.keyword | VARCHAR | keyword +aliases | VARCHAR | text +aliases.keyword | VARCHAR | keyword +boundary | GEOMETRY | geo_shape +centerline | GEOMETRY | geo_shape +centerlines | GEOMETRY | geo_shape +fid | INTEGER | integer +footprint | GEOMETRY | geo_shape +name | VARCHAR | text +name.keyword | VARCHAR | keyword +neatline | GEOMETRY | geo_shape +num_lanes | INTEGER | integer +ogc_type | VARCHAR | keyword +position | GEOMETRY | geo_shape +shore | GEOMETRY | geo_shape +shores | GEOMETRY | geo_shape +type | VARCHAR | keyword +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json new file mode 100644 index 0000000000000..afdf2f5d61ac6 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json @@ -0,0 +1,58 @@ +// This dataset is derived from OpenGIS Simple Features for SQL (Types and Functions) Test Suite on Apr 1, 2018 +// +// Copyright © 2018 Open Geospatial Consortium, Inc. All Rights Reserved. +// http://www.opengeospatial.org/ogc/legal +// +// lakes +{"index":{"_id": "101"}} +{"ogc_type":"lakes", "fid": 101, "name": "BLUE LAKE", "shore": "POLYGON ((52 18, 66 23, 73 9, 48 6, 52 18), (59 18, 67 18, 67 13, 59 13, 59 18))"} +// +// road segments +{"index":{"_id": "102"}} +{"ogc_type":"road_segments", "fid": 102, "name": "Route 5", "num_lanes": 2, "centerline": "LINESTRING (0 18, 10 21, 16 23, 28 26, 44 31)"} +{"index":{"_id": "103"}} +{"ogc_type":"road_segments", "fid": 103, "name": "Route 5", "aliases": "Main Street", "num_lanes": 4, "centerline": "LINESTRING (44 31, 56 34, 70 38)"} +{"index":{"_id": "104"}} +{"ogc_type":"road_segments", "fid": 104, "name": "Route 5", "num_lanes": 2, "centerline": "LINESTRING (70 38, 72 48)"} +{"index":{"_id": "105"}} +{"ogc_type":"road_segments", "fid": 105, "name": "Main Street", "num_lanes": 4, "centerline": "LINESTRING (70 38, 84 42)"} +{"index":{"_id": "106"}} +{"ogc_type":"road_segments", "fid": 106, "name": "Dirt Road by Green Forest", "num_lanes": 1, "centerline": "LINESTRING (28 26, 28 0)"} +// +// divided routes +{"index":{"_id": "119"}} +{"ogc_type":"divided_routes", "fid": 119, "name": "Route 75", "num_lanes": 4, "centerlines": "MULTILINESTRING ((10 48, 10 21, 10 0), (16 0, 16 23, 16 48))"} +// +// forests +{"index":{"_id": "109"}} +{"ogc_type":"forests", "fid": 109, "name": "Green Forest", "boundary": "MULTIPOLYGON (((28 26, 28 0, 84 0, 84 42, 28 26), (52 18, 66 23, 73 9, 48 6, 52 18)), ((59 18, 67 18, 67 13, 59 13, 59 18)))"} +// +// forests +{"index":{"_id": "110"}} +{"ogc_type":"bridges", "fid": 110, "name": "Cam Bridge", "position": "POINT (44 31)"} +// +// streams +{"index":{"_id": "111"}} +{"ogc_type":"streams", "fid": 111, "name": "Cam Stream", "centerline": "LINESTRING (38 48, 44 41, 41 36, 44 31, 52 18)"} +{"index":{"_id": "112"}} +{"ogc_type":"streams", "fid": 112, "centerline": "LINESTRING (76 0, 78 4, 73 9)"} +// +// buildings +{"index":{"_id": "113"}} +{"ogc_type":"buildings", "fid": 113, "address": "123 Main Street", "position": "POINT (52 30)", "footprint": "POLYGON ((50 31, 54 31, 54 29, 50 29, 50 31))"} +{"index":{"_id": "114"}} +{"ogc_type":"buildings", "fid": 114, "address": "215 Main Street", "position": "POINT (64 33)", "footprint": "POLYGON ((66 34, 62 34, 62 32, 66 32, 66 34))"} +// +// ponds +{"index":{"_id": "120"}} +{"ogc_type":"ponds", "fid": 120, "type": "Stock Pond", "shores": "MULTIPOLYGON (((24 44, 22 42, 24 40, 24 44)), ((26 44, 26 40, 28 42, 26 44)))"} +// +// named places +{"index":{"_id": "117"}} +{"ogc_type":"named_places", "fid": 117, "name": "Ashton", "boundary": "POLYGON ((62 48, 84 48, 84 30, 56 30, 56 34, 62 48))"} +{"index":{"_id": "118"}} +{"ogc_type":"named_places", "fid": 118, "name": "Goose Island", "boundary": "POLYGON ((67 13, 67 18, 59 18, 59 13, 67 13))"} +// +// map neat lines +{"index":{"_id": "115"}} +{"ogc_type":"map_neatlines", "fid": 115, "neatline": "POLYGON ((0 0, 0 48, 84 48, 84 0, 0 0))"} diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec new file mode 100644 index 0000000000000..3976c5a8b181e --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec @@ -0,0 +1,85 @@ +// +// Basic GEO SELECT +// + +selectLakes +SELECT fid, name, shore FROM lakes ORDER BY fid; +selectRoadSegments +SELECT fid, name, num_lanes, aliases, centerline FROM road_segments ORDER BY fid; +selectDividedRoutes +SELECT fid, name, num_lanes, centerlines FROM divided_routes ORDER BY fid; +selectForests +SELECT fid, name, boundary FROM forests ORDER BY fid; +selectBridges +SELECT fid, name, position FROM bridges ORDER BY fid; +selectStreams +SELECT fid, name, centerline FROM streams ORDER BY fid; +selectBuildings +SELECT fid, address, position, footprint FROM buildings ORDER BY fid; +selectPonds +SELECT fid, type, name, shores FROM ponds ORDER BY fid; +selectNamedPlaces +SELECT fid, name, boundary FROM named_places ORDER BY fid; +selectMapNeatLines +SELECT fid, neatline FROM map_neatlines ORDER BY fid; + +// +// Type conversion functions +// + +// The string serialization is slightly different between ES and H2, so we need to tweak it a bit by uppercasing both +// and removing floating point +selectRoadSegmentsAsWkt +SELECT fid, name, num_lanes, aliases, REPLACE(UCASE(ST_AsText(centerline)), '.0', '') centerline_wkt FROM road_segments ORDER BY fid; + +selectSinglePoint +SELECT ST_GeomFromText('point (10.0 12.0)') point; + + +// +// Geometry Property Functions +// +// H2GIS doesn't follow the standard here that mandates ST_Dimension returns SMALLINT +selectLakesProps +SELECT fid, UCASE(ST_GeometryType(shore)) type FROM lakes ORDER BY fid; +selectRoadSegmentsProps +SELECT fid, UCASE(ST_GeometryType(centerline)) type FROM road_segments ORDER BY fid; +selectDividedRoutesProps +SELECT fid, UCASE(ST_GeometryType(centerlines)) type FROM divided_routes ORDER BY fid; +selectForestsProps +SELECT fid, UCASE(ST_GeometryType(boundary)) type FROM forests ORDER BY fid; +selectBridgesProps +SELECT fid, UCASE(ST_GeometryType(position)) type FROM bridges ORDER BY fid; +selectStreamsProps +SELECT fid, UCASE(ST_GeometryType(centerline)) type FROM streams ORDER BY fid; +selectBuildingsProps +SELECT fid, UCASE(ST_GeometryType(position)) type1, UCASE(ST_GeometryType(footprint)) type2 FROM buildings ORDER BY fid; +selectPondsProps +SELECT fid, UCASE(ST_GeometryType(shores)) type FROM ponds ORDER BY fid; +selectNamedPlacesProps +SELECT fid, UCASE(ST_GeometryType(boundary)) type FROM named_places ORDER BY fid; +selectMapNeatLinesProps +SELECT fid, UCASE(ST_GeometryType(neatline)) type FROM map_neatlines ORDER BY fid; + +selectLakesXY +SELECT fid, ST_X(shore) x, ST_Y(shore) y FROM lakes ORDER BY fid; +selectRoadSegmentsXY +SELECT fid, ST_X(centerline) x, ST_Y(centerline) y FROM road_segments ORDER BY fid; +selectDividedRoutesXY +SELECT fid, ST_X(centerlines) x, ST_Y(centerlines) y FROM divided_routes ORDER BY fid; +selectForestsXY +SELECT fid, ST_X(boundary) x, ST_Y(boundary) y FROM forests ORDER BY fid; +selectBridgesPositionsXY +SELECT fid, ST_X(position) x, ST_Y(position) y FROM bridges ORDER BY fid; +selectStreamsXY +SELECT fid, ST_X(centerline) x, ST_Y(centerline) y FROM streams ORDER BY fid; +selectBuildingsXY +SELECT fid, ST_X(position) x, ST_Y(position) y FROM buildings ORDER BY fid; +selectBuildingsFootprintsXY +SELECT fid, ST_X(footprint) x, ST_Y(footprint) y FROM buildings ORDER BY fid; +selectPondsXY +SELECT fid, ST_X(shores) x, ST_Y(shores) y FROM ponds ORDER BY fid; +selectNamedPlacesXY +SELECT fid, ST_X(boundary) x, ST_Y(boundary) y FROM named_places ORDER BY fid; +selectMapNeatLinesXY +SELECT fid, ST_X(neatline) x, ST_Y(neatline) y FROM map_neatlines ORDER BY fid; diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql b/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql new file mode 100644 index 0000000000000..6d1322ecd3690 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql @@ -0,0 +1,672 @@ +-- FILE: sqltsch.sql 10/01/98 +-- +-- 1 2 3 4 5 6 7 8 +--345678901234567890123456789012345678901234567890123456789012345678901234567890 +--////////////////////////////////////////////////////////////////////////////// +-- +-- Copyright 1998, Open GIS Consortium, Inc. +-- +-- The material in this document details an Open GIS Consortium Test Suite in +-- accordance with a license that your organization has signed. Please refer +-- to http://www.opengeospatial.org/testing/ to obtain a copy of the general license +-- (it is part of the Conformance Testing Agreement). +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- OpenGIS Simple Features for SQL (Types and Functions) Test Suite Software +-- +-- This file "sqltsch.sql" is part 1 of a two part standardized test +-- suite in SQL script form. The other file that is required for this test +-- suite, "sqltque.sql", one additional script is provided ("sqltcle.sql") that +-- performs cleanup operations between test runs, and other documents that +-- describe the OGC Conformance Test Program are available via the WWW at +-- http://www.opengeospatial.org/testing/index.htm +-- +-- NOTE CONCERNING INFORMATION ON CONFORMANCE TESTING AND THIS TEST SUITE +-- ---------------------------------------------------------------------- +-- +-- Organizations wishing to submit product for conformance testing should +-- access the above WWW site to discover the proper procedure for obtaining +-- a license to use the OpenGIS(R) certification mark associated with this +-- test suite. +-- +-- +-- NOTE CONCERNING TEST SUITE ADAPTATION +-- ------------------------------------- +-- +-- OGC recognizes that many products will have to adapt this test suite to +-- make it work properly. OGC has documented the allowable adaptations within +-- this test suite where possible. Other information about adaptations may be +-- discovered in the Test Suite Guidelines document for this test suite. +-- +-- PLEASE NOTE THE OGC REQUIRES THAT ADAPTATIONS ARE FULLY DOCUMENTED USING +-- LIBERAL COMMENT BLOCKS CONFORMING TO THE FOLLOWING FORMAT: +-- +-- -- !#@ ADAPTATION BEGIN +-- explanatory text goes here +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- original sql goes here +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +-- adated sql goes here +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- BEGIN TEST SUITE CODE +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- Create the neccessary feature and geometry tables(views) and metadata tables +-- (views) to load and query the "Blue Lake" test data for OpenGIS Simple +-- Features for SQL (Types and Functions) test. +-- +-- Required feature tables (views) are: +-- Lakes +-- Road Segments +-- Divided Routes +-- Buildings +-- Forests +-- Bridges +-- Named Places +-- Streams +-- Ponds +-- Map Neatlines +-- +-- Please refer to the Test Suite Guidelines for this test suite for further +-- information concerning this test data. +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- CREATE SPATIAL_REF_SYS METADATA TABLE +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- +-- *** ADAPTATION ALERT **** +-- Implementations do not need to execute this statement if they already +-- create the spatial_ref_sys table or view via another mechanism. +-- The size of the srtext VARCHAR exceeds that allowed on some systems. +-- +-- CREATE TABLE spatial_ref_sys ( +-- srid INTEGER NOT NULL PRIMARY KEY, +-- auth_name VARCHAR(256), +-- auth_srid INTEGER, +-- -- srtext VARCHAR(2048) +-- srtext VARCHAR(2000) +-- ); +-- -- +-- INSERT INTO spatial_ref_sys VALUES(101, 'POSC', 32214, +-- 'PROJCS["UTM_ZONE_14N", GEOGCS["World Geodetic System 72", +-- DATUM["WGS_72", SPHEROID["NWL_10D", 6378135, 298.26]], +-- PRIMEM["Greenwich", 0], UNIT["Meter", 1.0]], +-- PROJECTION["Transverse_Mercator"], +-- PARAMETER["False_Easting", 500000.0], +-- PARAMETER["False_Northing", 0.0], +-- PARAMETER["Central_Meridian", -99.0], +-- PARAMETER["Scale_Factor", 0.9996], +-- PARAMETER["Latitude_of_origin", 0.0], +-- UNIT["Meter", 1.0]]' +-- ); +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- CREATE FEATURE SCHEMA +-- +-- *** ADAPTATION ALERT *** +-- The following schema is created using CREATE TABLE statements. +-- Furthermore, it DOES NOT create the GEOMETRY_COLUMNS metadata table. +-- Implementer's should replace the CREATE TABLES below with the mechanism +-- that it uses to create feature tables and the GEOMETRY_COLUMNS table/view +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-------------------------------------------------------------------------------- +-- +-- Create feature tables +-- +-------------------------------------------------------------------------------- +-- +-- Lakes +-- +-- +-- +-- +CREATE TABLE lakes ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + shore POLYGON +); +-- +-- Road Segments +-- +-- +-- +-- +CREATE TABLE road_segments ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + aliases VARCHAR(64), + num_lanes INTEGER, + centerline LINESTRING +); +-- +-- Divided Routes +-- +-- +-- +-- +CREATE TABLE divided_routes ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + num_lanes INTEGER, + centerlines MULTILINESTRING +); +-- +-- Forests +-- +-- +-- +-- +CREATE TABLE forests ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + boundary MULTIPOLYGON +); +-- +-- Bridges +-- +-- +-- +-- +CREATE TABLE bridges ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + position POINT +); +-- +-- Streams +-- +-- +-- +-- +CREATE TABLE streams ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + centerline LINESTRING +); +-- +-- Buildings +-- +--*** ADAPTATION ALERT *** +-- A view could be used to provide the below semantics without multiple geometry +-- columns in a table. In other words, create two tables. One table would +-- contain the POINT position and the other would create the POLYGON footprint. +-- Then create a view with the semantics of the buildings table below. +-- +-- +-- +CREATE TABLE buildings ( + fid INTEGER NOT NULL PRIMARY KEY, + address VARCHAR(64), + position POINT, + footprint POLYGON +); +-- +-- Ponds +-- +-- +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Fixes typo in the MULTIPOYLGON type +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- CREATE TABLE ponds ( +-- fid INTEGER NOT NULL PRIMARY KEY, +-- name VARCHAR(64), +-- type VARCHAR(64), +-- shores MULTIPOYLGON +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +CREATE TABLE ponds ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + type VARCHAR(64), + shores MULTIPOLYGON +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +-- Named Places +-- +-- +-- +-- +CREATE TABLE named_places ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + boundary POLYGON +); +-- +-- Map Neatline +-- +-- +-- +-- +CREATE TABLE map_neatlines ( + fid INTEGER NOT NULL PRIMARY KEY, + neatline POLYGON +); +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- POPULATE GEOMETRY AND FEATURE TABLES +-- +-- *** ADAPTATION ALERT *** +-- This script DOES NOT make any inserts into a GEOMTERY_COLUMNS table/view. +-- Implementers should insert whatever makes this happen in their implementation +-- below. Furthermore, the inserts below may be replaced by whatever mechanism +-- may be provided by implementers to insert rows in feature tables such that +-- metadata (and other mechanisms) are updated properly. +-- +--////////////////////////////////////////////////////////////////////////////// +-- +--============================================================================== +-- Lakes +-- +-- We have one lake, Blue Lake. It is a polygon with a hole. Its geometry is +-- described in WKT format as: +-- 'POLYGON( (52 18, 66 23, 73 9, 48 6, 52 18), +-- (59 18, 67 18, 67 13, 59 13, 59 18) )' +--============================================================================== +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO lakes VALUES (101, 'BLUE LAKE', +-- PolygonFromText('POLYGON((52 18,66 23,73 9,48 6,52 18),(59 18,67 18,67 13,59 13,59 18))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO lakes VALUES (101, 'BLUE LAKE', + ST_PolyFromText('POLYGON((52 18,66 23,73 9,48 6,52 18),(59 18,67 18,67 13,59 13,59 18))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Road segments +-- +-- We have five road segments. Their geometries are all linestrings. +-- The geometries are described in WKT format as: +-- name 'Route 5', fid 102 +-- 'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' +-- name 'Route 5', fid 103 +-- 'LINESTRING( 44 31, 56 34, 70 38 )' +-- name 'Route 5', fid 104 +-- 'LINESTRING( 70 38, 72 48 )' +-- name 'Main Street', fid 105 +-- 'LINESTRING( 70 38, 84 42 )' +-- name 'Dirt Road by Green Forest', fid 106 +-- 'LINESTRING( 28 26, 28 0 )' +-- +--================== +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2, +-- LineStringFromText('LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4, +-- LineStringFromText('LINESTRING( 44 31, 56 34, 70 38 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2, +-- LineStringFromText('LINESTRING( 70 38, 72 48 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4, +-- LineStringFromText('LINESTRING( 70 38, 84 42 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL, 1, +-- LineStringFromText('LINESTRING( 28 26, 28 0 )',101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2, + ST_LineFromText('LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101) +); +INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4, + ST_LineFromText('LINESTRING( 44 31, 56 34, 70 38 )' ,101) +); +INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2, + ST_LineFromText('LINESTRING( 70 38, 72 48 )' ,101) +); +INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4, + ST_LineFromText('LINESTRING( 70 38, 84 42 )' ,101) +); +INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL, 1, + ST_LineFromText('LINESTRING( 28 26, 28 0 )',101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- DividedRoutes +-- +-- We have one divided route. Its geometry is a multilinestring. +-- The geometry is described in WKT format as: +-- 'MULTILINESTRING( (10 48, 10 21, 10 0), (16 0, 10 23, 16 48) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO divided_routes VALUES(119, 'Route 75', 4, +-- MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO divided_routes VALUES(119, 'Route 75', 4, + ST_MLineFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Forests +-- +-- We have one forest. Its geometry is a multipolygon. +-- The geometry is described in WKT format as: +-- 'MULTIPOLYGON( ( (28 26, 28 0, 84 0, 84 42, 28 26), +-- (52 18, 66 23, 73 9, 48 6, 52 18) ), +-- ( (59 18, 67 18, 67 13, 59 13, 59 18) ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO forests VALUES(109, 'Green Forest', +-- MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO forests VALUES(109, 'Green Forest', + ST_MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- Bridges +-- +-- We have one bridge. Its geometry is a point. +-- The geometry is described in WKT format as: +-- 'POINT( 44 31 )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO bridges VALUES(110, 'Cam Bridge', +-- PointFromText('POINT( 44 31 )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO bridges VALUES(110, 'Cam Bridge', + ST_PointFromText('POINT( 44 31 )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Streams +-- +-- We have two streams. Their geometries are linestrings. +-- The geometries are described in WKT format as: +-- 'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )' +-- 'LINESTRING( 76 0, 78 4, 73 9 )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO streams VALUES(111, 'Cam Stream', +-- LineStringFromText('LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101) +-- ); +-- INSERT INTO streams VALUES(112, NULL, +-- LineStringFromText('LINESTRING( 76 0, 78 4, 73 9 )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO streams VALUES(111, 'Cam Stream', + ST_LineFromText('LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101) +); +INSERT INTO streams VALUES(112, NULL, + ST_LineFromText('LINESTRING( 76 0, 78 4, 73 9 )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Buildings +-- +-- We have two buildings. Their geometries are points and polygons. +-- The geometries are described in WKT format as: +-- address '123 Main Street' fid 113 +-- 'POINT( 52 30 )' and +-- 'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )' +-- address '215 Main Street' fid 114 +-- 'POINT( 64 33 )' and +-- 'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO buildings VALUES(113, '123 Main Street', +-- PointFromText('POINT( 52 30 )', 101), +-- PolygonFromText('POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101) +-- ); +-- INSERT INTO buildings VALUES(114, '215 Main Street', +-- PointFromText('POINT( 64 33 )', 101), +-- PolygonFromText('POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO buildings VALUES(113, '123 Main Street', + ST_PointFromText('POINT( 52 30 )', 101), + ST_PolyFromText('POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101) +); +INSERT INTO buildings VALUES(114, '215 Main Street', + ST_PointFromText('POINT( 64 33 )', 101), + ST_PolyFromText('POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Ponds +-- +-- We have one pond. Its geometry is a multipolygon. +-- The geometry is described in WKT format as: +-- 'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO ponds VALUES(120, NULL, 'Stock Pond', +-- MultiPolygonFromText('MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO ponds VALUES(120, NULL, 'Stock Pond', + ST_MPolyFromText('MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- Named Places +-- +-- We have two named places. Their geometries are polygons. +-- The geometries are described in WKT format as: +-- name 'Ashton' fid 117 +-- 'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )' +-- address 'Goose Island' fid 118 +-- 'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO named_places VALUES(117, 'Ashton', +-- PolygonFromText('POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101) +-- ); +-- INSERT INTO named_places VALUES(118, 'Goose Island', +-- PolygonFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO named_places VALUES(117, 'Ashton', + ST_PolyFromText('POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101) +); +INSERT INTO named_places VALUES(118, 'Goose Island', + ST_PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Map Neatlines +-- +-- We have one map neatline. Its geometry is a polygon. +-- The geometry is described in WKT format as: +-- 'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO map_neatlines VALUES(115, +-- PolygonFromText('POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO map_neatlines VALUES(115, + ST_PolyFromText('POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +-- +-- +-- end sqltsch.sql \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec new file mode 100644 index 0000000000000..c9380fae2809e --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec @@ -0,0 +1,15 @@ +// +// Geo-specific Sys Commands +// + +geoSysColumns +SYS COLUMNS TABLE LIKE 'geo'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i|BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i| NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |city |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |2 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location_no_dv |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region_point |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |shape |114 |GEO_SHAPE |2147483647 |2147483647 |null |null |1 |null |null |114 |0 |null |6 |YES |null |null |null |null |NO |NO +; \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ecee..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 new file mode 100644 index 0000000000000..9cbac57161c8e --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 @@ -0,0 +1 @@ +a079fc39ccc3de02acdeb7117443e5d9bd431687 \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index db84a444f5794..d5a4cb436e6a5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -63,6 +63,7 @@ import static org.elasticsearch.xpack.sql.stats.FeatureMetric.LOCAL; import static org.elasticsearch.xpack.sql.stats.FeatureMetric.ORDERBY; import static org.elasticsearch.xpack.sql.stats.FeatureMetric.WHERE; +import static org.elasticsearch.xpack.sql.type.DataType.GEO_SHAPE; /** * The verifier has the role of checking the analyzed tree for failures and build a list of failures following this check. @@ -131,7 +132,6 @@ Collection verify(LogicalPlan plan) { // start bottom-up plan.forEachUp(p -> { - if (p.analyzed()) { return; } @@ -236,6 +236,7 @@ Collection verify(LogicalPlan plan) { checkForScoreInsideFunctions(p, localFailures); checkNestedUsedInGroupByOrHaving(p, localFailures); + checkForGeoFunctionsOnDocValues(p, localFailures); // everything checks out // mark the plan as analyzed @@ -719,4 +720,33 @@ private static void checkNestedUsedInGroupByOrHaving(LogicalPlan p, Set fail(nested.get(0), "HAVING isn't (yet) compatible with nested fields " + new AttributeSet(nested).names())); } } + + /** + * Makes sure that geo shapes do not appear in filter, aggregation and sorting contexts + */ + private static void checkForGeoFunctionsOnDocValues(LogicalPlan p, Set localFailures) { + + p.forEachDown(f -> { + f.condition().forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used for filtering")); + } + }, FieldAttribute.class); + }, Filter.class); + + // geo shape fields shouldn't be used in aggregates or having (yet) + p.forEachDown(a -> a.groupings().forEach(agg -> agg.forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used in grouping")); + } + }, FieldAttribute.class)), Aggregate.class); + + + // geo shape fields shouldn't be used in order by clauses + p.forEachDown(o -> o.order().forEach(agg -> agg.forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used for sorting")); + } + }, FieldAttribute.class)), OrderBy.class); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 652197473abf4..13294fbca221b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -5,13 +5,17 @@ */ package org.elasticsearch.xpack.sql.execution.search.extractor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -128,13 +132,31 @@ private Object unwrapMultiValue(Object values) { if (list.isEmpty()) { return null; } else { - if (arrayLeniency || list.size() == 1) { - return unwrapMultiValue(list.get(0)); - } else { - throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); + // let's make sure first that we are not dealing with an geo_point represented as an array + if (isGeoPointArray(list) == false) { + if (list.size() == 1 || arrayLeniency) { + return unwrapMultiValue(list.get(0)); + } else { + throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); + } } } } + if (dataType == DataType.GEO_POINT) { + try { + GeoPoint geoPoint = GeoUtils.parseGeoPoint(values, true); + return new GeoShape(geoPoint.lon(), geoPoint.lat()); + } catch (ElasticsearchParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse geo_point value [{}] (returned by [{}])", values, fieldName); + } + } + if (dataType == DataType.GEO_SHAPE) { + try { + return new GeoShape(values); + } catch (IOException ex) { + throw new SqlIllegalArgumentException("Cannot read geo_shape value [{}] (returned by [{}])", values, fieldName); + } + } if (values instanceof Map) { throw new SqlIllegalArgumentException("Objects (returned by [{}]) are not supported", fieldName); } @@ -149,6 +171,17 @@ private Object unwrapMultiValue(Object values) { throw new SqlIllegalArgumentException("Type {} (returned by [{}]) is not supported", values.getClass().getSimpleName(), fieldName); } + private boolean isGeoPointArray(List list) { + if (dataType != DataType.GEO_POINT) { + return false; + } + // we expect the point in [lon lat] or [lon lat alt] formats + if (list.size() > 3 || list.size() < 1) { + return false; + } + return list.get(0) instanceof Number; + } + @SuppressWarnings({ "unchecked", "rawtypes" }) Object extractFromSource(Map map) { Object value = null; @@ -173,7 +206,9 @@ Object extractFromSource(Map map) { if (node instanceof List) { List listOfValues = (List) node; - if (listOfValues.size() == 1 || arrayLeniency) { + // we can only do this optimization until the last element of our pass since geo points are using arrays + // and we don't want to blindly ignore the second element of array if arrayLeniency is enabled + if ((i < path.length - 1) && (listOfValues.size() == 1 || arrayLeniency)) { // this is a List with a size of 1 e.g.: {"a" : [{"b" : "value"}]} meaning the JSON is a list with one element // or a list of values with one element e.g.: {"a": {"b" : ["value"]}} // in case of being lenient about arrays, just extract the first value in the array diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java index f6e1e3ad8be69..d382dad83a19d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java @@ -57,6 +57,11 @@ public static TypeResolution isNumericOrDateOrTime(Expression e, String operatio "date", "time", "datetime", "numeric"); } + + public static TypeResolution isGeo(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isGeo, operationName, paramOrd, "geo_point", "geo_shape"); + } + public static TypeResolution isExact(Expression e, String message) { if (e instanceof FieldAttribute) { EsField.Exact exact = ((FieldAttribute) e).getExactInfo(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index 0e9f07ef2132c..3a9ae06203476 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -46,6 +46,13 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.SecondOfMinute; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.WeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StAswkt; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StGeometryType; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosql; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StX; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StY; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StZ; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; @@ -249,11 +256,23 @@ private void defineDefaultFunctions() { def(Space.class, Space::new, "SPACE"), def(Substring.class, Substring::new, "SUBSTRING"), def(UCase.class, UCase::new, "UCASE")); + // DataType conversion addToMap(def(Cast.class, Cast::new, "CAST", "CONVERT")); // Scalar "meta" functions addToMap(def(Database.class, Database::new, "DATABASE"), def(User.class, User::new, "USER")); + + // Geo Functions + addToMap(def(StAswkt.class, StAswkt::new, "ST_ASWKT", "ST_ASTEXT"), + def(StDistance.class, StDistance::new, "ST_DISTANCE"), + def(StWkttosql.class, StWkttosql::new, "ST_WKTTOSQL", "ST_GEOMFROMTEXT"), + def(StGeometryType.class, StGeometryType::new, "ST_GEOMETRYTYPE"), + def(StX.class, StX::new, "ST_X"), + def(StY.class, StY::new, "ST_Y"), + def(StZ.class, StZ::new, "ST_Z") + ); + // Special addToMap(def(Score.class, Score::new, "SCORE")); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index d14aeea507f47..0b9bbd1094a44 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -11,6 +11,9 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor; @@ -98,6 +101,10 @@ public static List getNamedWriteables() { entries.add(new Entry(Processor.class, LocateFunctionProcessor.NAME, LocateFunctionProcessor::new)); entries.add(new Entry(Processor.class, ReplaceFunctionProcessor.NAME, ReplaceFunctionProcessor::new)); entries.add(new Entry(Processor.class, SubstringFunctionProcessor.NAME, SubstringFunctionProcessor::new)); + // geo + entries.add(new Entry(Processor.class, GeoProcessor.NAME, GeoProcessor::new)); + entries.add(new Entry(Processor.class, StWkttosqlProcessor.NAME, StWkttosqlProcessor::new)); + entries.add(new Entry(Processor.class, StDistanceProcessor.NAME, StDistanceProcessor::new)); return entries; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java new file mode 100644 index 0000000000000..519e4c0c74092 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.function.Function; + +public class GeoProcessor implements Processor { + + private interface GeoShapeFunction { + default R apply(Object o) { + if (o instanceof GeoShape) { + return doApply((GeoShape) o); + } else { + throw new SqlIllegalArgumentException("A geo_point or geo_shape is required; received [{}]", o); + } + } + + R doApply(GeoShape s); + } + + public enum GeoOperation { + ASWKT(GeoShape::toString), + GEOMETRY_TYPE(GeoShape::getGeometryType), + X(GeoShape::getX), + Y(GeoShape::getY), + Z(GeoShape::getZ); + + private final Function apply; + + GeoOperation(GeoShapeFunction apply) { + this.apply = l -> l == null ? null : apply.apply(l); + } + + public final Object apply(Object l) { + return apply.apply(l); + } + } + + public static final String NAME = "geo"; + + private final GeoOperation processor; + + public GeoProcessor(GeoOperation processor) { + this.processor = processor; + } + + public GeoProcessor(StreamInput in) throws IOException { + processor = in.readEnum(GeoOperation.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + return processor.apply(input); + } + + GeoOperation processor() { + return processor; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + GeoProcessor other = (GeoProcessor) obj; + return processor == other.processor; + } + + @Override + public int hashCode() { + return processor.hashCode(); + } + + @Override + public String toString() { + return processor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java new file mode 100644 index 0000000000000..74b5c9646b853 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.geo.GeometryParser; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.geo.utils.WellKnownText; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.io.IOException; +import java.io.InputStream; +import java.text.ParseException; +import java.util.Objects; + +/** + * Wrapper class to represent a GeoShape in SQL + * + * It is required to override the XContent serialization. The ShapeBuilder serializes using GeoJSON by default, + * but in SQL we need the serialization to be WKT-based. + */ +public class GeoShape implements ToXContentFragment, NamedWriteable { + + public static final String NAME = "geo"; + + private final Geometry shape; + + public GeoShape(double lon, double lat) { + shape = new Point(lat, lon); + } + + public GeoShape(Object value) throws IOException { + try { + shape = parse(value); + } catch (ParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [" + value + "] as a geo_shape value", ex); + } + } + + public GeoShape(StreamInput in) throws IOException { + String value = in.readString(); + try { + shape = parse(value); + } catch (ParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [" + value + "] as a geo_shape value", ex); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(WellKnownText.toWKT(shape)); + } + + @Override + public String toString() { + return WellKnownText.toWKT(shape); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(WellKnownText.toWKT(shape)); + } + + public Geometry toGeometry() { + return shape; + } + + public Point firstPoint() { + return shape.visit(new GeometryVisitor() { + @Override + public Point visit(Circle circle) { + return new Point(circle.getLat(), circle.getLon(), circle.hasAlt() ? circle.getAlt() : Double.NaN); + } + + @Override + public Point visit(GeometryCollection collection) { + if (collection.size() > 0) { + return collection.get(0).visit(this); + } + return null; + } + + @Override + public Point visit(Line line) { + if (line.length() > 0) { + return new Point(line.getLat(0), line.getLon(0), line.hasAlt() ? line.getAlt(0) : Double.NaN); + } + return null; + } + + @Override + public Point visit(LinearRing ring) { + return visit((Line) ring); + } + + @Override + public Point visit(MultiLine multiLine) { + return visit((GeometryCollection) multiLine); + } + + @Override + public Point visit(MultiPoint multiPoint) { + return visit((GeometryCollection) multiPoint); + } + + @Override + public Point visit(MultiPolygon multiPolygon) { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Point visit(Point point) { + return point; + } + + @Override + public Point visit(Polygon polygon) { + return visit(polygon.getPolygon()); + } + + @Override + public Point visit(Rectangle rectangle) { + return new Point(rectangle.getMinLat(), rectangle.getMinLon(), rectangle.getMinAlt()); + } + }); + } + + public Double getX() { + Point firstPoint = firstPoint(); + return firstPoint != null ? firstPoint.getLon() : null; + } + + public Double getY() { + Point firstPoint = firstPoint(); + return firstPoint != null ? firstPoint.getLat() : null; + } + + public Double getZ() { + Point firstPoint = firstPoint(); + return firstPoint != null && firstPoint.hasAlt() ? firstPoint.getAlt() : null; + } + + public String getGeometryType() { + return toGeometry().type().name(); + } + + public static double distance(GeoShape shape1, GeoShape shape2) { + if (shape1.shape instanceof Point == false) { + throw new SqlIllegalArgumentException("distance calculation is only supported for points; received [{}]", shape1); + } + if (shape2.shape instanceof Point == false) { + throw new SqlIllegalArgumentException("distance calculation is only supported for points; received [{}]", shape2); + } + double srcLat = ((Point) shape1.shape).getLat(); + double srcLon = ((Point) shape1.shape).getLon(); + double dstLat = ((Point) shape2.shape).getLat(); + double dstLon = ((Point) shape2.shape).getLon(); + return GeoUtils.arcDistance(srcLat, srcLon, dstLat, dstLon); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GeoShape geoShape = (GeoShape) o; + return shape.equals(geoShape.shape); + } + + @Override + public int hashCode() { + return Objects.hash(shape); + } + + @Override + public String getWriteableName() { + return NAME; + } + + private static Geometry parse(Object value) throws IOException, ParseException { + XContentBuilder content = JsonXContent.contentBuilder(); + content.startObject(); + content.field("value", value); + content.endObject(); + + try (InputStream stream = BytesReference.bytes(content).streamInput(); + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return GeometryParser.parse(parser, true, true, true); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java new file mode 100644 index 0000000000000..5c4b6edbe87eb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_AsWKT function that takes a geometry and returns its Well Known Text representation + */ +public class StAswkt extends UnaryGeoFunction { + + public StAswkt(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StAswkt::new, field()); + } + + @Override + protected StAswkt replaceChild(Expression newChild) { + return new StAswkt(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.ASWKT; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java new file mode 100644 index 0000000000000..fd14e90dd9d93 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isGeo; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +/** + * Calculates the distance between two points + */ +public class StDistance extends BinaryOperator { + + private static final StDistanceFunction FUNCTION = new StDistanceFunction(); + + public StDistance(Source source, Expression source1, Expression source2) { + super(source, source1, source2, FUNCTION); + } + + @Override + protected StDistance replaceChildren(Expression newLeft, Expression newRight) { + return new StDistance(source(), newLeft, newRight); + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StDistance::new, left(), right()); + } + + @Override + public ScriptTemplate scriptWithField(FieldAttribute field) { + return new ScriptTemplate(processScript("{sql}.geoDocValue(doc,{})"), + paramsBuilder().variable(field.exactAttribute().name()).build(), + dataType()); + } + + @Override + protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { + return isGeo(e, sourceText(), paramOrdinal); + } + + @Override + public StDistance swapLeftAndRight() { + return new StDistance(source(), right(), left()); + } + + @Override + protected Pipe makePipe() { + return new StDistancePipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right())); + } + + @Override + protected String scriptMethodName() { + return "stDistance"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java new file mode 100644 index 0000000000000..d1c15c1e2a1b2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.predicate.PredicateBiFunction; + +class StDistanceFunction implements PredicateBiFunction { + + @Override + public String name() { + return "ST_DISTANCE"; + } + + @Override + public String symbol() { + return "ST_DISTANCE"; + } + + @Override + public Double doApply(Object s1, Object s2) { + return StDistanceProcessor.process(s1, s2); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java new file mode 100644 index 0000000000000..c944266482651 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.BinaryPipe; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.Objects; + +public class StDistancePipe extends BinaryPipe { + + public StDistancePipe(Source source, Expression expression, Pipe left, Pipe right) { + super(source, expression, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StDistancePipe::new, expression(), left(), right()); + } + + @Override + protected BinaryPipe replaceChildren(Pipe left, Pipe right) { + return new StDistancePipe(source(), expression(), left, right); + } + + @Override + public StDistanceProcessor asProcessor() { + return new StDistanceProcessor(left().asProcessor(), right().asProcessor()); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + StDistancePipe other = (StDistancePipe) obj; + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java new file mode 100644 index 0000000000000..d6c9026b982d9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.BinaryProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.Objects; + +public class StDistanceProcessor extends BinaryProcessor { + + public static final String NAME = "geo_distance"; + + public StDistanceProcessor(Processor source1, Processor source2) { + super(source1, source2); + } + + public StDistanceProcessor(StreamInput in) throws IOException { + super(in); + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + + } + + @Override + public Object process(Object input) { + Object l = left().process(input); + checkParameter(l); + Object r = right().process(input); + checkParameter(r); + return doProcess(l, r); + } + + @Override + protected Object doProcess(Object left, Object right) { + return process(left, right); + } + + public static Double process(Object source1, Object source2) { + if (source1 == null || source2 == null) { + return null; + } + + if (source1 instanceof GeoShape == false) { + throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source1); + } + if (source2 instanceof GeoShape == false) { + throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source2); + } + return GeoShape.distance((GeoShape) source1, (GeoShape) source2); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + StDistanceProcessor other = (StDistanceProcessor) obj; + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java new file mode 100644 index 0000000000000..15215bd9201de --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_GEOMETRY_TYPE function that takes a geometry and returns its type + */ +public class StGeometryType extends UnaryGeoFunction { + + public StGeometryType(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StGeometryType::new, field()); + } + + @Override + protected StGeometryType replaceChild(Expression newChild) { + return new StGeometryType(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.GEOMETRY_TYPE; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java new file mode 100644 index 0000000000000..3ebae55dec4f0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.gen.script.Scripts; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isString; + +/** + * Constructs geometric objects from their WTK representations + */ +public class StWkttosql extends UnaryScalarFunction { + + public StWkttosql(Source source, Expression field) { + super(source, field); + } + + @Override + protected StWkttosql replaceChild(Expression newChild) { + return new StWkttosql(source(), newChild); + } + + @Override + protected TypeResolution resolveType() { + if (field().dataType().isString()) { + return TypeResolution.TYPE_RESOLVED; + } + return isString(field(), functionName(), Expressions.ParamOrdinal.DEFAULT); + } + + @Override + protected Processor makeProcessor() { + return StWkttosqlProcessor.INSTANCE; + } + + @Override + public DataType dataType() { + return DataType.GEO_SHAPE; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StWkttosql::new, field()); + } + + @Override + public String processScript(String script) { + return Scripts.formatTemplate(Scripts.SQL_SCRIPTS + ".stWktToSql(" + script + ")"); + } + + @Override + public Object fold() { + return StWkttosqlProcessor.INSTANCE.process(field().fold()); + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java new file mode 100644 index 0000000000000..f17ee2315befe --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; + +public class StWkttosqlProcessor implements Processor { + + static final StWkttosqlProcessor INSTANCE = new StWkttosqlProcessor(); + + public static final String NAME = "geo_wkttosql"; + + StWkttosqlProcessor() { + } + + public StWkttosqlProcessor(StreamInput in) throws IOException { + } + + @Override + public Object process(Object input) { + return StWkttosqlProcessor.apply(input); + } + + public static GeoShape apply(Object input) { + if (input == null) { + return null; + } + + if ((input instanceof String) == false) { + throw new SqlIllegalArgumentException("A string is required; received [{}]", input); + } + try { + return new GeoShape(input); + } catch (IOException | IllegalArgumentException | ElasticsearchParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [{}] as a geo_shape value", input); + } + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java new file mode 100644 index 0000000000000..f3cdafbe70dab --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_X function that takes a geometry and returns the X coordinate of its first point + */ +public class StX extends UnaryGeoFunction { + + public StX(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StX::new, field()); + } + + @Override + protected StX replaceChild(Expression newChild) { + return new StX(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.X; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java new file mode 100644 index 0000000000000..0a9bc3aa1a40b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_Y function that takes a geometry and returns the Y coordinate of its first point + */ +public class StY extends UnaryGeoFunction { + + public StY(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StY::new, field()); + } + + @Override + protected StY replaceChild(Expression newChild) { + return new StY(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.Y; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java new file mode 100644 index 0000000000000..b6c0c9466bbe1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_Z function that takes a geometry and returns the Z coordinate of its first point + */ +public class StZ extends UnaryGeoFunction { + + public StZ(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StZ::new, field()); + } + + @Override + protected StZ replaceChild(Expression newChild) { + return new StZ(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.Z; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java new file mode 100644 index 0000000000000..50c05b7fbedb7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isGeo; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +/** + * Base class for functions that get a single geo shape or geo point as an argument + */ +public abstract class UnaryGeoFunction extends UnaryScalarFunction { + + protected UnaryGeoFunction(Source source, Expression field) { + super(source, field); + } + + @Override + public Object fold() { + return operation().apply(field().fold()); + } + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + return isGeo(field(), operation().toString(), Expressions.ParamOrdinal.DEFAULT); + } + + @Override + protected Processor makeProcessor() { + return new GeoProcessor(operation()); + } + + protected abstract GeoProcessor.GeoOperation operation(); + + @Override + public ScriptTemplate scriptWithField(FieldAttribute field) { + //TODO change this to use _source instead of the exact form (aka field.keyword for geo shape fields) + return new ScriptTemplate(processScript("{sql}.geoDocValue(doc,{})"), + paramsBuilder().variable(field.exactAttribute().name()).build(), + dataType()); + } + + @Override + public String processScript(String template) { + // basically, transform the script to InternalSqlScriptUtils.[function_name](other_function_or_field_name) + return super.processScript( + format(Locale.ROOT, "{sql}.%s(%s)", + StringUtils.underscoreToLowerCamelCase("ST_" + operation().name()), + template)); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + UnaryGeoFunction other = (UnaryGeoFunction) obj; + return Objects.equals(other.field(), field()); + } + + @Override + public int hashCode() { + return Objects.hash(field()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 6a4ec411fe1cf..d39aec4423684 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.expression.function.scalar.whitelist; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; @@ -12,6 +13,10 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor.NonIsoDateTimeExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; @@ -73,7 +78,7 @@ public static Object docValue(Map> doc, String fi } return null; } - + public static boolean nullSafeFilter(Boolean filter) { return filter == null ? false : filter.booleanValue(); } @@ -109,7 +114,7 @@ public static Boolean neq(Object left, Object right) { public static Boolean lt(Object left, Object right) { return BinaryComparisonOperation.LT.apply(left, right); } - + public static Boolean lte(Object left, Object right) { return BinaryComparisonOperation.LTE.apply(left, right); } @@ -125,7 +130,7 @@ public static Boolean gte(Object left, Object right) { public static Boolean and(Boolean left, Boolean right) { return BinaryLogicOperation.AND.apply(left, right); } - + public static Boolean or(Boolean left, Boolean right) { return BinaryLogicOperation.OR.apply(left, right); } @@ -328,14 +333,14 @@ public static Integer dateTimeChrono(Object dateTime, String tzId, String chrono } return DateTimeFunction.dateTimeChrono(asDateTime(dateTime), tzId, chronoName); } - + public static String dayName(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; } return NameExtractor.DAY_NAME.extract(asDateTime(dateTime), tzId); } - + public static Integer dayOfWeek(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; @@ -349,7 +354,7 @@ public static String monthName(Object dateTime, String tzId) { } return NameExtractor.MONTH_NAME.extract(asDateTime(dateTime), tzId); } - + public static Integer quarter(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; @@ -390,7 +395,7 @@ private static Object asDateTime(Object dateTime, boolean lenient) { } return dateTime; } - + public static IntervalDayTime intervalDayTime(String text, String typeName) { if (text == null || typeName == null) { return null; @@ -416,7 +421,7 @@ public static OffsetTime asTime(String time) { public static Integer ascii(String s) { return (Integer) StringOperation.ASCII.apply(s); } - + public static Integer bitLength(String s) { return (Integer) StringOperation.BIT_LENGTH.apply(s); } @@ -428,7 +433,7 @@ public static String character(Number n) { public static Integer charLength(String s) { return (Integer) StringOperation.CHAR_LENGTH.apply(s); } - + public static String concat(String s1, String s2) { return (String) ConcatFunctionProcessor.process(s1, s2); } @@ -452,7 +457,7 @@ public static Integer length(String s) { public static Integer locate(String s1, String s2) { return locate(s1, s2, null); } - + public static Integer locate(String s1, String s2, Number pos) { return LocateFunctionProcessor.doProcess(s1, s2, pos); } @@ -460,7 +465,7 @@ public static Integer locate(String s1, String s2, Number pos) { public static String ltrim(String s) { return (String) StringOperation.LTRIM.apply(s); } - + public static Integer octetLength(String s) { return (Integer) StringOperation.OCTET_LENGTH.apply(s); } @@ -468,15 +473,15 @@ public static Integer octetLength(String s) { public static Integer position(String s1, String s2) { return (Integer) BinaryStringStringOperation.POSITION.apply(s1, s2); } - + public static String repeat(String s, Number count) { return BinaryStringNumericOperation.REPEAT.apply(s, count); } - + public static String replace(String s1, String s2, String s3) { return (String) ReplaceFunctionProcessor.doProcess(s1, s2, s3); } - + public static String right(String s, Number count) { return BinaryStringNumericOperation.RIGHT.apply(s, count); } @@ -496,7 +501,47 @@ public static String substring(String s, Number start, Number length) { public static String ucase(String s) { return (String) StringOperation.UCASE.apply(s); } - + + public static String stAswkt(Object v) { + return GeoProcessor.GeoOperation.ASWKT.apply(v).toString(); + } + + public static GeoShape stWktToSql(String wktString) { + return StWkttosqlProcessor.apply(wktString); + } + + public static Double stDistance(Object v1, Object v2) { + return StDistanceProcessor.process(v1, v2); + } + + public static String stGeometryType(Object g) { + return (String) GeoProcessor.GeoOperation.GEOMETRY_TYPE.apply(g); + } + + public static Double stX(Object g) { + return (Double) GeoProcessor.GeoOperation.X.apply(g); + } + + public static Double stY(Object g) { + return (Double) GeoProcessor.GeoOperation.Y.apply(g); + } + + public static Double stZ(Object g) { + return (Double) GeoProcessor.GeoOperation.Z.apply(g); + } + + // processes doc value as a geometry + public static GeoShape geoDocValue(Map> doc, String fieldName) { + Object obj = docValue(doc, fieldName); + if (obj != null) { + if (obj instanceof GeoPoint) { + return new GeoShape(((GeoPoint) obj).getLon(), ((GeoPoint) obj).getLat()); + } + // TODO: Add support for geo_shapes when it is there + } + return null; + } + // // Casting // diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java index b24ec56727d64..223e22b2a33ba 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime; import org.elasticsearch.xpack.sql.expression.literal.IntervalYearMonth; import org.elasticsearch.xpack.sql.type.DataType; @@ -95,6 +96,13 @@ default ScriptTemplate scriptWithFoldable(Expression foldable) { dataType()); } + if (fold instanceof GeoShape) { + GeoShape geoShape = (GeoShape) fold; + return new ScriptTemplate(processScript("{sql}.stWktToSql({})"), + paramsBuilder().variable(geoShape.toString()).build(), + dataType()); + } + return new ScriptTemplate(processScript("{}"), paramsBuilder().variable(fold).build(), dataType()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java index b06a1fb887433..ed7dc9da77543 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java @@ -408,5 +408,4 @@ public static TemporalAmount negate(TemporalAmount interval) { public static TemporalAmount parseInterval(Source source, String value, DataType intervalType) { return PARSERS.get(intervalType).parse(source, value); } - } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java index 333ba3f11c0b1..d6bdeeb0fe46b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.literal; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import java.util.ArrayList; import java.util.Collection; @@ -30,6 +31,7 @@ public static Collection getNamedWriteab entries.add(new NamedWriteableRegistry.Entry(IntervalDayTime.class, IntervalDayTime.NAME, IntervalDayTime::new)); entries.add(new NamedWriteableRegistry.Entry(IntervalYearMonth.class, IntervalYearMonth.NAME, IntervalYearMonth::new)); + entries.add(new NamedWriteableRegistry.Entry(GeoShape.class, GeoShape.NAME, GeoShape::new)); return entries; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java index b6bfaa4acb63d..5705bb4d85ab4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java @@ -164,7 +164,7 @@ protected Object doProcess(Object left, Object right) { return null; } - if (f == BinaryArithmeticOperation.MUL || f == BinaryArithmeticOperation.DIV || f == BinaryArithmeticOperation.MOD) { + if (f == BinaryArithmeticOperation.DIV || f == BinaryArithmeticOperation.MOD) { if (!(left instanceof Number)) { throw new SqlIllegalArgumentException("A number is required; received {}", left); } @@ -176,8 +176,8 @@ protected Object doProcess(Object left, Object right) { return f.apply(left, right); } - if (f == BinaryArithmeticOperation.ADD || f == BinaryArithmeticOperation.SUB) { - return f.apply(left, right); + if (f == BinaryArithmeticOperation.ADD || f == BinaryArithmeticOperation.SUB || f == BinaryArithmeticOperation.MUL) { + return f.apply(left, right); } // this should not occur diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 5be5e28718459..5b1076592d859 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -43,7 +43,7 @@ protected TypeResolution resolveType() { // 2. 3. 4. intervals if ((DataTypes.isInterval(l) || DataTypes.isInterval(r))) { if (DataTypeConversion.commonType(l, r) == null) { - return new TypeResolution(format("[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); } else { return resolveWithIntervals(); } @@ -54,6 +54,12 @@ protected TypeResolution resolveType() { } protected TypeResolution resolveWithIntervals() { + DataType l = left().dataType(); + DataType r = right().dataType(); + + if (!(r.isDateOrTimeBased() || DataTypes.isInterval(r))|| !(l.isDateOrTimeBased() || DataTypes.isInterval(l))) { + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + } return TypeResolution.TYPE_RESOLVED; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java index 7a09bbedebfa3..e3fa7ac1031f7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java @@ -47,7 +47,7 @@ protected TypeResolution resolveType() { return TypeResolution.TYPE_RESOLVED; } - return new TypeResolution(format("[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java index ee3ca6aa6773b..a47b9cc973122 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java @@ -34,6 +34,10 @@ protected Sub replaceChildren(Expression newLeft, Expression newRight) { @Override protected TypeResolution resolveWithIntervals() { + TypeResolution resolution = super.resolveWithIntervals(); + if (resolution.unresolved()) { + return resolution; + } if ((right().dataType().isDateOrTimeBased()) && DataTypes.isInterval(left().dataType())) { return new TypeResolution(format(null, "Cannot subtract a {}[{}] from an interval[{}]; do you mean the reverse?", right().dataType().typeName, right().source().text(), left().source().text())); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 8495b0269eb84..7e5516810d92a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.sql.planner; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Attribute; @@ -38,6 +40,8 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.expression.literal.Intervals; import org.elasticsearch.xpack.sql.expression.predicate.Range; @@ -85,6 +89,7 @@ import org.elasticsearch.xpack.sql.querydsl.agg.TopHitsAgg; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.GeoDistanceQuery; import org.elasticsearch.xpack.sql.querydsl.query.MatchQuery; import org.elasticsearch.xpack.sql.querydsl.query.MultiMatchQuery; import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; @@ -656,6 +661,24 @@ private static Query translateQuery(BinaryComparison bc) { Object value = valueOf(bc.right()); String format = dateFormat(bc.left()); + // Possible geo optimization + if (bc.left() instanceof StDistance && value instanceof Number) { + if (bc instanceof LessThan || bc instanceof LessThanOrEqual) { + // Special case for ST_Distance translatable into geo_distance query + StDistance stDistance = (StDistance) bc.left(); + if (stDistance.left() instanceof FieldAttribute && stDistance.right().foldable()) { + Object geoShape = valueOf(stDistance.right()); + if (geoShape instanceof GeoShape) { + Geometry geometry = ((GeoShape) geoShape).toGeometry(); + if (geometry instanceof Point) { + String field = nameOf(stDistance.left()); + return new GeoDistanceQuery(source, field, ((Number) value).doubleValue(), + ((Point) geometry).getLat(), ((Point) geometry).getLon()); + } + } + } + } + } if (bc instanceof GreaterThan) { return new RangeQuery(source, name, value, false, null, false, format); } @@ -954,6 +977,9 @@ public QueryTranslation translate(Expression exp, boolean onAggs) { protected static Query handleQuery(ScalarFunction sf, Expression field, Supplier query) { Query q = query.get(); + if (field instanceof StDistance && q instanceof GeoDistanceQuery) { + return wrapIfNested(q, ((StDistance) field).left()); + } if (field instanceof FieldAttribute) { return wrapIfNested(q, field); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java new file mode 100644 index 0000000000000..dd1a1171c1603 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.Objects; + +public class GeoDistanceQuery extends LeafQuery { + + private final String field; + private final double lat; + private final double lon; + private final double distance; + + public GeoDistanceQuery(Source source, String field, double distance, double lat, double lon) { + super(source); + this.field = field; + this.distance = distance; + this.lat = lat; + this.lon = lon; + } + + public String field() { + return field; + } + + public double lat() { + return lat; + } + + public double lon() { + return lon; + } + + public double distance() { + return distance; + } + + @Override + public QueryBuilder asBuilder() { + return QueryBuilders.geoDistanceQuery(field).distance(distance, DistanceUnit.METERS).point(lat, lon); + } + + @Override + public int hashCode() { + return Objects.hash(field, distance, lat, lon); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GeoDistanceQuery other = (GeoDistanceQuery) obj; + return Objects.equals(field, other.field) && + Objects.equals(distance, other.distance) && + Objects.equals(lat, other.lat) && + Objects.equals(lon, other.lon); + } + + @Override + protected String innerToString() { + return field + ":" + "(" + distance + "," + "(" + lat + ", " + lon + "))"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index 1f04e7c8e1982..76f2436e8629c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -53,6 +53,9 @@ public enum DataType { // // specialized types // + GEO_SHAPE( ExtTypes.GEOMETRY, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, false, false, false), + // display size = 2 doubles + len("POINT( )") + GEO_POINT( ExtTypes.GEOMETRY, Double.BYTES*2, Integer.MAX_VALUE, 25 * 2 + 8, false, false, false), // IP can be v4 or v6. The latter has 2^128 addresses or 340,282,366,920,938,463,463,374,607,431,768,211,456 // aka 39 chars IP( "ip", JDBCType.VARCHAR, 39, 39, 0, false, false, true), @@ -251,6 +254,10 @@ public boolean isPrimitive() { return this != OBJECT && this != NESTED && this != UNSUPPORTED; } + public boolean isGeo() { + return this == GEO_POINT || this == GEO_SHAPE; + } + public boolean isDateBased() { return this == DATE || this == DATETIME; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index 40a03e26eb0ef..5fd1867aeb27a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -121,6 +121,17 @@ public static DataType commonType(DataType left, DataType right) { return right; } } + // Interval * integer is a valid operation + if (DataTypes.isInterval(left)) { + if (right.isInteger()) { + return left; + } + } + if (DataTypes.isInterval(right)) { + if (left.isInteger()) { + return right; + } + } if (DataTypes.isInterval(left)) { // intervals widening if (DataTypes.isInterval(right)) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index dcd6a1b35a13e..3f985ae4e3b6e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.type; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.expression.literal.Interval; import java.time.OffsetTime; @@ -81,6 +82,9 @@ public static DataType fromJava(Object value) { if (value instanceof Interval) { return ((Interval) value).dataType(); } + if (value instanceof GeoShape) { + return DataType.GEO_SHAPE; + } throw new SqlIllegalArgumentException("No idea what's the DataType for {}", value.getClass()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java index 1ad9dd92abfec..2c07be3eb620d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java @@ -27,7 +27,8 @@ enum ExtTypes implements SQLType { INTERVAL_DAY_TO_SECOND(110), INTERVAL_HOUR_TO_MINUTE(111), INTERVAL_HOUR_TO_SECOND(112), - INTERVAL_MINUTE_TO_SECOND(113); + INTERVAL_MINUTE_TO_SECOND(113), + GEOMETRY(114); private final Integer type; diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 4ac4632572ca0..6d24ea79f2bc2 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -4,7 +4,14 @@ # you may not use this file except in compliance with the Elastic License. # -# This file contains a whitelist for SQL specific utilities available inside SQL scripting +# This file contains a whitelist for SQL specific utilities and classes available inside SQL scripting + +#### Classes + +class org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape { + +} + class org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime { } @@ -137,7 +144,19 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS String space(Number) String substring(String, Number, Number) String ucase(String) - + +# +# Geo Functions +# + GeoShape geoDocValue(java.util.Map, String) + String stAswkt(Object) + Double stDistance(Object, Object) + String stGeometryType(Object) + GeoShape stWktToSql(String) + Double stX(Object) + Double stY(Object) + Double stZ(Object) + # # Casting # diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java index bc7b85b5392e9..b36111ffac3bb 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java @@ -158,7 +158,7 @@ public void testDottedFieldPathTypo() { public void testStarExpansionExcludesObjectAndUnsupportedTypes() { LogicalPlan plan = plan("SELECT * FROM test"); List list = ((Project) plan).projections(); - assertThat(list, hasSize(8)); + assertThat(list, hasSize(10)); List names = Expressions.names(list); assertThat(names, not(hasItem("some"))); assertThat(names, not(hasItem("some.dotted"))); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index dcf8dad5ecb79..f10b1a402708f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -241,6 +241,27 @@ public void testSubtractFromInterval() { error("SELECT INTERVAL 1 MONTH - CAST('12:23:56.789' AS TIME)")); } + public void testAddIntervalAndNumberNotAllowed() { + assertEquals("1:8: [+] has arguments with incompatible types [INTERVAL_DAY] and [INTEGER]", + error("SELECT INTERVAL 1 DAY + 100")); + assertEquals("1:8: [+] has arguments with incompatible types [INTEGER] and [INTERVAL_DAY]", + error("SELECT 100 + INTERVAL 1 DAY")); + } + + public void testSubtractIntervalAndNumberNotAllowed() { + assertEquals("1:8: [-] has arguments with incompatible types [INTERVAL_MINUTE] and [DOUBLE]", + error("SELECT INTERVAL 10 MINUTE - 100.0")); + assertEquals("1:8: [-] has arguments with incompatible types [DOUBLE] and [INTERVAL_MINUTE]", + error("SELECT 100.0 - INTERVAL 10 MINUTE")); + } + + public void testMultiplyIntervalWithDecimalNotAllowed() { + assertEquals("1:8: [*] has arguments with incompatible types [INTERVAL_MONTH] and [DOUBLE]", + error("SELECT INTERVAL 1 MONTH * 1.234")); + assertEquals("1:8: [*] has arguments with incompatible types [DOUBLE] and [INTERVAL_MONTH]", + error("SELECT 1.234 * INTERVAL 1 MONTH")); + } + public void testMultipleColumns() { assertEquals("1:43: Unknown column [xxx]\nline 1:8: Unknown column [xxx]", error("SELECT xxx FROM test GROUP BY DAY_oF_YEAR(xxx)")); @@ -773,4 +794,28 @@ public void testAggregateAliasInFilter() { public void testProjectUnresolvedAliasInFilter() { assertEquals("1:8: Unknown column [tni]", error("SELECT tni AS i FROM test WHERE i > 10 GROUP BY i")); } + + public void testGeoShapeInWhereClause() { + assertEquals("1:49: geo shapes cannot be used for filtering", + error("SELECT ST_AsWKT(shape) FROM test WHERE ST_AsWKT(shape) = 'point (10 20)'")); + + // We get only one message back because the messages are grouped by the node that caused the issue + assertEquals("1:46: geo shapes cannot be used for filtering", + error("SELECT MAX(ST_X(shape)) FROM test WHERE ST_Y(shape) > 10 GROUP BY ST_GEOMETRYTYPE(shape) ORDER BY ST_ASWKT(shape)")); + } + + public void testGeoShapeInGroupBy() { + assertEquals("1:44: geo shapes cannot be used in grouping", + error("SELECT ST_X(shape) FROM test GROUP BY ST_X(shape)")); + } + + public void testGeoShapeInOrderBy() { + assertEquals("1:44: geo shapes cannot be used for sorting", + error("SELECT ST_X(shape) FROM test ORDER BY ST_Z(shape)")); + } + + public void testGeoShapeInSelect() { + accept("SELECT ST_X(shape) FROM test"); + } + } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 973d5b50fad00..50a3b185dba86 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -451,6 +452,125 @@ public void testObjectsForSourceValue() throws IOException { assertThat(ex.getMessage(), is("Objects (returned by [" + fieldName + "]) are not supported")); } + public void testGeoShapeExtraction() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false); + Map map = new HashMap<>(); + map.put(fieldName, "POINT (1 2)"); + assertEquals(new GeoShape(1, 2), fe.extractFromSource(map)); + + map = new HashMap<>(); + assertNull(fe.extractFromSource(map)); + } + + + public void testMultipleGeoShapeExtraction() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false); + Map map = new HashMap<>(); + map.put(fieldName, "POINT (1 2)"); + assertEquals(new GeoShape(1, 2), fe.extractFromSource(map)); + + map = new HashMap<>(); + assertNull(fe.extractFromSource(map)); + + Map map2 = new HashMap<>(); + map2.put(fieldName, Arrays.asList("POINT (1 2)", "POINT (3 4)")); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map2)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false, true); + assertEquals(new GeoShape(1, 2), lenientFe.extractFromSource(map2)); + } + + public void testGeoPointExtractionFromSource() throws IOException { + int layers = randomIntBetween(1, 3); + String pathCombined = ""; + double lat = randomDoubleBetween(-90, 90, true); + double lon = randomDoubleBetween(-180, 180, true); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + boolean[] arrayWrap = new boolean[layers - 1]; + source.startObject(); { + for (int i = 0; i < layers - 1; i++) { + arrayWrap[i] = randomBoolean(); + String name = randomAlphaOfLength(10); + source.field(name); + if (arrayWrap[i]) { + source.startArray(); + } + source.startObject(); + pathCombined = pathCombined + name + "."; + } + String name = randomAlphaOfLength(10); + pathCombined = pathCombined + name; + source.field(name, randomPoint(lat, lon)); + for (int i = layers - 2; i >= 0; i--) { + source.endObject(); + if (arrayWrap[i]) { + source.endArray(); + } + } + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + + FieldHitExtractor fe = new FieldHitExtractor(pathCombined, DataType.GEO_POINT, UTC, false); + assertEquals(new GeoShape(lon, lat), fe.extract(hit)); + } + + public void testMultipleGeoPointExtractionFromSource() throws IOException { + double lat = randomDoubleBetween(-90, 90, true); + double lon = randomDoubleBetween(-180, 180, true); + SearchHit hit = new SearchHit(1); + String fieldName = randomAlphaOfLength(5); + int arraySize = randomIntBetween(2, 4); + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); { + source.startArray(fieldName); + source.value(randomPoint(lat, lon)); + for (int i = 1; i < arraySize; i++) { + source.value(randomPoint(lat, lon)); + } + source.endArray(); + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, false); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, false, true); + assertEquals(new GeoShape(lon, lat), lenientFe.extract(hit)); + } + + public void testGeoPointExtractionFromDocValues() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, singletonList("2, 1")); + hit.fields(singletonMap(fieldName, field)); + assertEquals(new GeoShape(1, 2), fe.extract(hit)); + hit = new SearchHit(1); + assertNull(fe.extract(hit)); + } + + public void testGeoPointExtractionFromMultipleDocValues() { + String fieldName = randomAlphaOfLength(5); + SearchHit hit = new SearchHit(1); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true); + + hit.fields(singletonMap(fieldName, new DocumentField(fieldName, Arrays.asList("2,1", "3,4")))); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true, true); + assertEquals(new GeoShape(1, 2), lenientFe.extract(hit)); + } + private FieldHitExtractor getFieldHitExtractor(String fieldName, boolean useDocValue) { return new FieldHitExtractor(fieldName, null, UTC, useDocValue); } @@ -471,4 +591,18 @@ private Object randomNonNullValue() { ESTestCase::randomDouble)); return value.get(); } + + private Object randomPoint(double lat, double lon) { + Supplier value = randomFrom(Arrays.asList( + () -> lat + "," + lon, + () -> Arrays.asList(lon, lat), + () -> { + Map map1 = new HashMap<>(); + map1.put("lat", lat); + map1.put("lon", lon); + return map1; + } + )); + return value.get(); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java new file mode 100644 index 0000000000000..07cc6171cf013 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; + +import java.io.IOException; + +public class GeoProcessorTests extends AbstractWireSerializingTestCase { + public static GeoProcessor randomGeoProcessor() { + return new GeoProcessor(randomFrom(GeoOperation.values())); + } + + @Override + protected GeoProcessor createTestInstance() { + return randomGeoProcessor(); + } + + @Override + protected Reader instanceReader() { + return GeoProcessor::new; + } + + @Override + protected GeoProcessor mutateInstance(GeoProcessor instance) throws IOException { + return new GeoProcessor(randomValueOtherThan(instance.processor(), () -> randomFrom(GeoOperation.values()))); + } + + public void testApplyAsWKT() throws Exception { + assertEquals("point (10.0 20.0)", new GeoProcessor(GeoOperation.ASWKT).process(new GeoShape(10, 20))); + assertEquals("point (10.0 20.0)", new GeoProcessor(GeoOperation.ASWKT).process(new GeoShape("POINT (10 20)"))); + } + + public void testApplyGeometryType() throws Exception { + assertEquals("POINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape(10, 20))); + assertEquals("POINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("POINT (10 20)"))); + assertEquals("MULTIPOINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals("LINESTRING", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals("POLYGON", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals("MULTILINESTRING", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + assertEquals("MULTIPOLYGON", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + assertEquals("ENVELOPE", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + assertEquals("GEOMETRYCOLLECTION", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + } + + + public void testApplyGetXYZ() throws Exception { + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape(10, 20))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape(10, 20))); + assertNull(new GeoProcessor(GeoOperation.Z).process(new GeoShape(10, 20))); + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("POINT (10 20)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("POINT (10 20)"))); + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(30.0, new GeoProcessor(GeoOperation.Z).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(2.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + // minX minX, maxX, maxY, minY + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + // minY minX, maxX, maxY, minY + assertEquals(30.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.X).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + assertEquals(10.0, new GeoProcessor(GeoOperation.Y).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + } + + public void testApplyGetXYZToPolygons() throws Exception { + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertNull(new GeoProcessor(GeoOperation.Z).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals(5.0, new GeoProcessor(GeoOperation.Z).process( + new GeoShape("polygon ((3.0 1.0 5.0, 4.0 2.0 6.0, 4.0 3.0 7.0, 3.0 1.0 5.0))"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + } + + public void testApplyNull() { + for (GeoOperation op : GeoOperation.values()) { + GeoProcessor proc = new GeoProcessor(op); + assertNull(proc.process(null)); + } + } + + public void testTypeCheck() { + GeoProcessor proc = new GeoProcessor(GeoOperation.ASWKT); + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> proc.process("string")); + assertEquals("A geo_point or geo_shape is required; received [string]", siae.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java new file mode 100644 index 0000000000000..9f78f8b3df43b --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.gen.processor.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.FunctionTestUtils.l; +import static org.elasticsearch.xpack.sql.tree.Source.EMPTY; +import static org.hamcrest.Matchers.instanceOf; + +public class StDistanceProcessorTests extends AbstractWireSerializingTestCase { + + public StDistanceProcessor createTestInstance() { + return new StDistanceProcessor( + constantPoint(randomDoubleBetween(-180, 180, true), randomDoubleBetween(-90, 90, true)), + constantPoint(randomDoubleBetween(-180, 180, true), randomDoubleBetween(-90, 90, true)) + ); + } + + public static Processor constantPoint(double lon, double lat) { + return new ChainingProcessor(new ConstantProcessor("point (" + lon + " " + lat + ")"), StWkttosqlProcessor.INSTANCE); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testApply() { + StDistanceProcessor proc = new StDistanceProcessor(constantPoint(10, 20), constantPoint(30, 40)); + Object result = proc.process(null); + assertThat(result, instanceOf(Double.class)); + assertEquals(GeoUtils.arcDistance(20, 10, 40, 30), (double) result, 0.000001); + } + + public void testNullHandling() { + assertNull(new StDistance(EMPTY, l(new GeoShape(1, 2)), l(null)).makePipe().asProcessor().process(null)); + assertNull(new StDistance(EMPTY, l(null), l(new GeoShape(1, 2))).makePipe().asProcessor().process(null)); + } + + public void testTypeCheck() { + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, + () -> new StDistance(EMPTY, l("foo"), l(new GeoShape(1, 2))).makePipe().asProcessor().process(null)); + assertEquals("A geo_point or geo_shape with type point is required; received [foo]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, + () -> new StDistance(EMPTY, l(new GeoShape(1, 2)), l("bar")).makePipe().asProcessor().process(null)); + assertEquals("A geo_point or geo_shape with type point is required; received [bar]", siae.getMessage()); + } + + @Override + protected Writeable.Reader instanceReader() { + return StDistanceProcessor::new; + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java new file mode 100644 index 0000000000000..fc7b33ae905d7 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import static org.hamcrest.Matchers.instanceOf; + +public class StWkttosqlProcessorTests extends ESTestCase { + public static StWkttosqlProcessor randomStWkttosqlProcessor() { + return new StWkttosqlProcessor(); + } + + public void testApply() { + StWkttosqlProcessor proc = new StWkttosqlProcessor(); + assertNull(proc.process(null)); + Object result = proc.process("POINT (10 20)"); + assertThat(result, instanceOf(GeoShape.class)); + GeoShape geoShape = (GeoShape) result; + assertEquals("point (10.0 20.0)", geoShape.toString()); + } + + public void testTypeCheck() { + StWkttosqlProcessor procPoint = new StWkttosqlProcessor(); + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process(42)); + assertEquals("A string is required; received [42]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("some random string")); + assertEquals("Cannot parse [some random string] as a geo_shape value", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("point (foo bar)")); + assertEquals("Cannot parse [point (foo bar)] as a geo_shape value", siae.getMessage()); + + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("point (10 10")); + assertEquals("Cannot parse [point (10 10] as a geo_shape value", siae.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index cf6530e2188ff..93f6515f71062 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.IsoWeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; @@ -764,6 +765,15 @@ public void testLiteralsOnTheRight() { assertEquals(FIVE, nullEquals.right()); } + public void testLiteralsOnTheRightInStDistance() { + Alias a = new Alias(EMPTY, "a", L(10)); + Expression result = new BooleanLiteralsOnTheRight().rule(new StDistance(EMPTY, FIVE, a)); + assertTrue(result instanceof StDistance); + StDistance sd = (StDistance) result; + assertEquals(a, sd.left()); + assertEquals(FIVE, sd.right()); + } + public void testBoolSimplifyNotIsNullAndNotIsNotNull() { BooleanSimplification simplification = new BooleanSimplification(); assertTrue(simplification.rule(new Not(EMPTY, new IsNull(EMPTY, ONE))) instanceof IsNotNull); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index f3f2d9569c53f..9c8c32689b70e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -57,7 +57,7 @@ public void testSysColumns() { SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, randomValueOtherThanMany(Mode::isDriver, () -> randomFrom(Mode.values()))); // nested fields are ignored - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -144,7 +144,7 @@ public void testSysColumnsInOdbcMode() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.ODBC); - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -233,7 +233,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - + row = rows.get(9); assertEquals("some.ambiguous", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); @@ -279,7 +279,7 @@ public void testSysColumnsInJdbcMode() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.JDBC); - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -463,7 +463,7 @@ public void testSysColumnsNoArg() throws Exception { public void testSysColumnsWithCatalogWildcard() throws Exception { executeCommand("SYS COLUMNS CATALOG 'cluster' TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -476,7 +476,7 @@ public void testSysColumnsWithCatalogWildcard() throws Exception { public void testSysColumnsWithMissingCatalog() throws Exception { executeCommand("SYS COLUMNS TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -489,7 +489,7 @@ public void testSysColumnsWithMissingCatalog() throws Exception { public void testSysColumnsWithNullCatalog() throws Exception { executeCommand("SYS COLUMNS CATALOG ? TABLE LIKE 'test' LIKE '%'", singletonList(new SqlTypedParamValue("keyword", null)), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -529,4 +529,4 @@ private Tuple sql(String sql, List para SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); return new Tuple<>(cmd, session); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index 4a8da68a1d51e..805268dd5b687 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -48,7 +48,7 @@ public void testSysTypes() { "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", - "UNSUPPORTED", "OBJECT", "NESTED"); + "GEO_SHAPE", "GEO_POINT", "UNSUPPORTED", "OBJECT", "NESTED"); cmd.execute(null, wrap(r -> { assertEquals(19, r.columnCount()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 0543e65d4ae46..693840bd65c34 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.sql.querydsl.agg.GroupByDateHistogram; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.GeoDistanceQuery; import org.elasticsearch.xpack.sql.querydsl.query.NotQuery; import org.elasticsearch.xpack.sql.querydsl.query.Query; import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; @@ -65,6 +66,7 @@ import static org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation.PI; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; public class QueryTranslatorTests extends ESTestCase { @@ -496,7 +498,7 @@ public void testTranslateMathFunction_HavingClause_Painless() { assertNull(translation.query); AggFilter aggFilter = translation.aggFilter; assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.gt(InternalSqlScriptUtils." + - operation.name().toLowerCase(Locale.ROOT) + "(params.a0),params.v0))", + operation.name().toLowerCase(Locale.ROOT) + "(params.a0),params.v0))", aggFilter.scriptTemplate().toString()); assertThat(aggFilter.scriptTemplate().params().toString(), startsWith("[{a=max(int){a->")); assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=10}]")); @@ -561,6 +563,109 @@ public void testGroupByAndHavingWithFunctionOnTopOfAggregation() { assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=10}]")); } + public void testTranslateStAsWktForPoints() { + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_AsWKT(point) = 'point (10 20)'"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(" + + "InternalSqlScriptUtils.stAswkt(InternalSqlScriptUtils.geoDocValue(doc,params.v0))," + + "params.v1)" + + ")", + aggFilter.scriptTemplate().toString()); + assertEquals("[{v=point}, {v=point (10 20)}]", aggFilter.scriptTemplate().params().toString()); + } + + public void testTranslateStWktToSql() { + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_WKTToSQL(keyword) = ST_WKTToSQL('point (10 20)')"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(" + + "InternalSqlScriptUtils.eq(InternalSqlScriptUtils.stWktToSql(" + + "InternalSqlScriptUtils.docValue(doc,params.v0)),InternalSqlScriptUtils.stWktToSql(params.v1)))", + aggFilter.scriptTemplate().toString()); + assertEquals("[{v=keyword}, {v=point (10.0 20.0)}]", aggFilter.scriptTemplate().params().toString()); + } + + public void testTranslateStDistanceToScript() { + String operator = randomFrom(">", ">="); + String operatorFunction = operator.equalsIgnoreCase(">") ? "gt" : "gte"; + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_Distance(point, ST_WKTToSQL('point (10 20)')) " + operator + " 20"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(" + + "InternalSqlScriptUtils." + operatorFunction + "(" + + "InternalSqlScriptUtils.stDistance(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0),InternalSqlScriptUtils.stWktToSql(params.v1)),params.v2))", + sc.script().toString()); + assertEquals("[{v=point}, {v=point (10.0 20.0)}, {v=20}]", sc.script().params().toString()); + } + + public void testTranslateStDistanceToQuery() { + String operator = randomFrom("<", "<="); + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_Distance(point, ST_WKTToSQL('point (10 20)')) " + operator + " 25"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertTrue(translation.query instanceof GeoDistanceQuery); + GeoDistanceQuery gq = (GeoDistanceQuery) translation.query; + assertEquals("point", gq.field()); + assertEquals(20.0, gq.lat(), 0.00001); + assertEquals(10.0, gq.lon(), 0.00001); + assertEquals(25.0, gq.distance(), 0.00001); + } + + public void testTranslateStXY() { + String dim = randomFrom("X", "Y"); + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_" + dim + "(point) = 10"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertThat(translation.query, instanceOf(ScriptQuery.class)); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(InternalSqlScriptUtils.st" + dim + "(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0)),params.v1))", + sc.script().toString()); + assertEquals("[{v=point}, {v=10}]", sc.script().params().toString()); + } + + public void testTranslateStGeometryType() { + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_GEOMETRYTYPE(point) = 'POINT'"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertThat(translation.query, instanceOf(ScriptQuery.class)); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(InternalSqlScriptUtils.stGeometryType(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0)),params.v1))", + sc.script().toString()); + assertEquals("[{v=point}, {v=POINT}]", sc.script().params().toString()); + } + public void testTranslateCoalesce_GroupBy_Painless() { LogicalPlan p = plan("SELECT COALESCE(int, 10) FROM test GROUP BY 1"); assertTrue(p instanceof Aggregate); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index 447c820c8e421..7ca4d0058325f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -628,6 +628,10 @@ public void testCommonType() { assertEquals(FLOAT, commonType(FLOAT, INTEGER)); assertEquals(DOUBLE, commonType(DOUBLE, FLOAT)); + // numeric and intervals + assertEquals(INTERVAL_YEAR_TO_MONTH, commonType(INTERVAL_YEAR_TO_MONTH, LONG)); + assertEquals(INTERVAL_HOUR_TO_MINUTE, commonType(INTEGER, INTERVAL_HOUR_TO_MINUTE)); + // dates/datetimes and intervals assertEquals(DATETIME, commonType(DATE, DATETIME)); assertEquals(DATETIME, commonType(DATETIME, DATE)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index 65b491fe71a1d..997de6e2f5c53 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -170,8 +170,11 @@ public void testNestedDoc() { public void testGeoField() { Map mapping = loadMapping("mapping-geo.json"); - EsField dt = mapping.get("location"); - assertThat(dt.getDataType().typeName, is("unsupported")); + assertThat(mapping.size(), is(2)); + EsField gp = mapping.get("location"); + assertThat(gp.getDataType().typeName, is("geo_point")); + EsField gs = mapping.get("site"); + assertThat(gs.getDataType().typeName, is("geo_shape")); } public void testIpField() { diff --git a/x-pack/plugin/sql/src/test/resources/mapping-geo.json b/x-pack/plugin/sql/src/test/resources/mapping-geo.json index 3c958ff37edfc..e6e499ef82e83 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-geo.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-geo.json @@ -2,6 +2,9 @@ "properties" : { "location" : { "type" : "geo_point" + }, + "site": { + "type" : "geo_shape" } } } diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json index d93633f7aced0..c75ecfdc845c0 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json @@ -44,6 +44,8 @@ } } }, - "foo_type" : { "type" : "foo" } + "foo_type" : { "type" : "foo" }, + "point": {"type" : "geo_point"}, + "shape": {"type" : "geo_shape"} } } diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json index 448c50e6a9f0a..e46d64a45e88f 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json @@ -6,6 +6,7 @@ "keyword" : { "type" : "keyword" }, "unsupported" : { "type" : "ip_range" }, "date" : { "type" : "date"}, + "shape": { "type" : "geo_shape" }, "some" : { "properties" : { "dotted" : { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 1e9223b79f201..a475c3ceadca6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -100,7 +100,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: @@ -114,8 +114,8 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "stopped" } - - match: { transforms.0.state.task_state: "stopped" } +# - match: { transforms.0.state.indexer_state: "stopped" } +# - match: { transforms.0.state.task_state: "stopped" } - do: data_frame.start_data_frame_transform: @@ -127,7 +127,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } --- @@ -168,7 +168,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: @@ -194,7 +194,7 @@ teardown: transform_id: "airline-transform-start-later" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-later" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: @@ -206,47 +206,3 @@ teardown: - do: data_frame.delete_data_frame_transform: transform_id: "airline-transform-start-later" - ---- -"Test stop all": - - do: - data_frame.put_data_frame_transform: - transform_id: "airline-transform-stop-all" - body: > - { - "source": { "index": "airline-data" }, - "dest": { "index": "airline-data-start-later" }, - "pivot": { - "group_by": { "airline": {"terms": {"field": "airline"}}}, - "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} - } - } - - do: - data_frame.start_data_frame_transform: - transform_id: "airline-transform-stop-all" - - match: { started: true } - - - do: - data_frame.start_data_frame_transform: - transform_id: "airline-transform-start-stop" - - match: { started: true } - - - do: - data_frame.stop_data_frame_transform: - transform_id: "_all" - wait_for_completion: true - - - match: { stopped: true } - - - do: - data_frame.get_data_frame_transform_stats: - transform_id: "*" - - match: { count: 2 } - - match: { transforms.0.state.indexer_state: "stopped" } - - match: { transforms.0.state.task_state: "stopped" } - - match: { transforms.1.state.indexer_state: "stopped" } - - match: { transforms.1.state.task_state: "stopped" } - - - do: - data_frame.delete_data_frame_transform: - transform_id: "airline-transform-stop-all" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 33b0f40863a79..f552e4710c781 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -42,23 +42,26 @@ teardown: --- "Test get transform stats": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.get_data_frame_transform_stats: transform_id: "airline-transform-stats" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - match: { transforms.0.state.checkpoint: 0 } - - match: { transforms.0.stats.pages_processed: 0 } + - lte: { transforms.0.stats.pages_processed: 1 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } - - match: { transforms.0.stats.trigger_count: 0 } + - match: { transforms.0.stats.trigger_count: 1 } - match: { transforms.0.stats.index_time_in_ms: 0 } - match: { transforms.0.stats.index_total: 0 } - match: { transforms.0.stats.index_failures: 0 } - - match: { transforms.0.stats.search_time_in_ms: 0 } - - match: { transforms.0.stats.search_total: 0 } + - gte: { transforms.0.stats.search_time_in_ms: 0 } + - lte: { transforms.0.stats.search_total: 1 } - match: { transforms.0.stats.search_failures: 0 } --- @@ -146,6 +149,9 @@ teardown: --- "Test get multiple transform stats where one does not have a task": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.put_data_frame_transform: transform_id: "airline-transform-stats-dos" @@ -172,7 +178,7 @@ teardown: transform_id: "_all" - match: { count: 2 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.1.id: "airline-transform-stats-dos" } - match: { transforms.1.state.indexer_state: "stopped" } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java index a96c04ab7cd99..58a649de48c02 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; @@ -223,7 +224,8 @@ public void testThatTemplatesExist() { // otherwise a rolling upgrade would not work as expected, when the node has a .watches shard on it public void testThatTemplatesAreAppliedOnNewerNodes() { DiscoveryNode localNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); - DiscoveryNode masterNode = new DiscoveryNode("master", ESTestCase.buildNewFakeTransportAddress(), Version.V_6_0_0); + DiscoveryNode masterNode = new DiscoveryNode("master", ESTestCase.buildNewFakeTransportAddress(), + VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT)); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("master").add(localNode).add(masterNode).build(); ClusterChangedEvent event = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index f73496db0f875..f17aab309ba72 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -266,9 +266,7 @@ public void testRollupAfterRestart() throws Exception { final Request clusterHealthRequest = new Request("GET", "/_cluster/health"); clusterHealthRequest.addParameter("wait_for_status", "yellow"); clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true"); - if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { - clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); - } + clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); Map clusterHealthResponse = entityAsMap(client().performRequest(clusterHealthRequest)); assertThat(clusterHealthResponse.get("timed_out"), equalTo(Boolean.FALSE)); @@ -384,9 +382,7 @@ private void waitForYellow(String indexName) throws IOException { request.addParameter("wait_for_status", "yellow"); request.addParameter("timeout", "30s"); request.addParameter("wait_for_no_relocating_shards", "true"); - if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { - request.addParameter("wait_for_no_initializing_shards", "true"); - } + request.addParameter("wait_for_no_initializing_shards", "true"); Map response = entityAsMap(client().performRequest(request)); assertThat(response.get("timed_out"), equalTo(Boolean.FALSE)); } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java index 03c28c05e616b..035e29ccf771c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java @@ -34,7 +34,7 @@ public class RollupDateHistoUpgradeIT extends AbstractUpgradeTestCase { Version.fromString(System.getProperty("tests.upgrade_from_version")); public void testDateHistoIntervalUpgrade() throws Exception { - assumeTrue("DateHisto interval changed in 7.1", UPGRADE_FROM_VERSION.before(Version.V_8_0_0)); // TODO change this after backport + assumeTrue("DateHisto interval changed in 7.1", UPGRADE_FROM_VERSION.before(Version.V_7_2_0)); switch (CLUSTER_TYPE) { case OLD: break; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index e453014258a24..4d732015d47f4 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Test old cluster datafeed without aggs": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index bce9c25c08c03..2a7b56adb9a16 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Put job and datafeed without aggs in old cluster": @@ -48,8 +53,8 @@ --- "Put job and datafeed with aggs in old cluster - pre-deprecated interval": - skip: - version: "8.0.0 - " #TODO change this after backport - reason: calendar_interval introduced in 7.1.0 + version: "all" #TODO change this after backport + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258; calendar_interval introduced in 7.2.0" - do: ml.put_job: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 5dc71ecb0679e..4b742e10de61f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,4 +1,8 @@ setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + - do: cluster.health: wait_for_status: green diff --git a/x-pack/qa/security-migrate-tests/build.gradle b/x-pack/qa/security-migrate-tests/build.gradle deleted file mode 100644 index 1851f0e21b027..0000000000000 --- a/x-pack/qa/security-migrate-tests/build.gradle +++ /dev/null @@ -1,43 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackModule('security'), configuration: 'runtime') - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') -} - -integTestCluster { - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - extraConfigFile 'roles.yml', 'roles.yml' - [ - test_admin: 'superuser', - transport_user: 'superuser', - existing: 'superuser', - bob: 'actual_role' - ].each { String user, String role -> - setupCommand 'setupUser#' + user, - 'bin/elasticsearch-users', 'useradd', user, '-p', 'x-pack-test-password', '-r', role - } - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_admin', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } - // TODO: systemProperty('tests.cluster', "${-> cluster.transportPortURI }") when migerating to testclusters -} - -testingConventions { - naming.clear() - naming { - IT { - baseClass 'org.elasticsearch.xpack.security.MigrateToolTestCase' - } - } -} diff --git a/x-pack/qa/security-migrate-tests/roles.yml b/x-pack/qa/security-migrate-tests/roles.yml deleted file mode 100644 index 6e997383f8a5a..0000000000000 --- a/x-pack/qa/security-migrate-tests/roles.yml +++ /dev/null @@ -1,22 +0,0 @@ -# A role that has all sorts of configuration: -# - it can monitor the cluster -# - for index1 and index2 it can do CRUD things and refresh -# - for other indices it has search-only privileges -actual_role: - run_as: [ "joe" ] - cluster: - - monitor - indices: - - names: [ "index1", "index2" ] - privileges: [ "read", "write", "create_index", "indices:admin/refresh" ] - field_security: - grant: - - foo - - bar - query: - bool: - must_not: - match: - hidden: true - - names: "*" - privileges: [ "read" ] diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java deleted file mode 100644 index 3581bf2fda7fd..0000000000000 --- a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; - -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; -import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; -import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; -import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool; -import org.junit.Before; - -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collections; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.containsString; - -/** - * Integration tests for the {@code elasticsearch-migrate} shell command - */ -public class MigrateToolIT extends MigrateToolTestCase { - - @Before - public void setupUpTest() throws Exception { - Client client = getClient(); - SecurityClient c = new SecurityClient(client); - - // Add an existing user so the tool will skip it - PutUserResponse pur = c.preparePutUser("existing", "s3kirt".toCharArray(), Hasher.BCRYPT, "role1", "user").get(); - assertTrue(pur.created()); - } - - public void testRunMigrateTool() throws Exception { - final String testConfigDir = System.getProperty("tests.config.dir"); - logger.info("--> CONF: {}", testConfigDir); - final Path configPath = PathUtils.get(testConfigDir); - Settings settings = Settings.builder().put("path.home", configPath.getParent()).build(); - // Cluster should already be up - String url = "http://" + getHttpURL(); - logger.info("--> using URL: {}", url); - MockTerminal t = new MockTerminal(); - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - OptionParser parser = muor.getParser(); - - OptionSet options = parser.parse("-u", "test_admin", "-p", "x-pack-test-password", "-U", url); - muor.execute(t, options, new Environment(settings, configPath)); - - logger.info("--> output:\n{}", t.getOutput()); - - Client client = getClient(); - SecurityClient c = new SecurityClient(client); - - // Check that the migrated user can be retrieved - GetUsersResponse resp = c.prepareGetUsers("bob").get(); - assertTrue("user 'bob' should exist", resp.hasUsers()); - User bob = resp.users()[0]; - assertEquals(bob.principal(), "bob"); - assertArrayEquals(bob.roles(), new String[]{"actual_role"}); - - // Make sure the existing user did not change - resp = c.prepareGetUsers("existing").get(); - assertTrue("user should exist", resp.hasUsers()); - User existing = resp.users()[0]; - assertEquals(existing.principal(), "existing"); - assertArrayEquals(existing.roles(), new String[]{"role1", "user"}); - - // Make sure the "actual_role" made it in and is correct - GetRolesResponse roleResp = c.prepareGetRoles().names("actual_role").get(); - assertTrue("role should exist", roleResp.hasRoles()); - RoleDescriptor rd = roleResp.roles()[0]; - assertNotNull(rd); - assertEquals(rd.getName(), "actual_role"); - assertArrayEquals(rd.getClusterPrivileges(), new String[]{"monitor"}); - assertArrayEquals(rd.getRunAs(), new String[]{"joe"}); - RoleDescriptor.IndicesPrivileges[] ips = rd.getIndicesPrivileges(); - assertEquals(ips.length, 2); - for (RoleDescriptor.IndicesPrivileges ip : ips) { - final FieldPermissions fieldPermissions = new FieldPermissions( - new FieldPermissionsDefinition(ip.getGrantedFields(), ip.getDeniedFields())); - if (Arrays.equals(ip.getIndices(), new String[]{"index1", "index2"})) { - assertArrayEquals(ip.getPrivileges(), new String[]{"read", "write", "create_index", "indices:admin/refresh"}); - assertTrue(fieldPermissions.hasFieldLevelSecurity()); - assertTrue(fieldPermissions.grantsAccessTo("bar")); - assertTrue(fieldPermissions.grantsAccessTo("foo")); - assertNotNull(ip.getQuery()); - assertThat(ip.getQuery().iterator().next().utf8ToString(), - containsString("{\"bool\":{\"must_not\":{\"match\":{\"hidden\":true}}}}")); - } else { - assertArrayEquals(ip.getIndices(), new String[]{"*"}); - assertArrayEquals(ip.getPrivileges(), new String[]{"read"}); - assertFalse(fieldPermissions.hasFieldLevelSecurity()); - assertNull(ip.getQuery()); - } - } - - // Check that bob can access the things the "actual_role" says he can - String token = basicAuthHeaderValue("bob", new SecureString("x-pack-test-password".toCharArray())); - // Create "index1" index and try to search from it as "bob" - client.filterWithHeader(Collections.singletonMap("Authorization", token)).admin().indices().prepareCreate("index1").get(); - // Wait for the index to be ready so it doesn't fail if no shards are initialized - client.admin().cluster().health(Requests.clusterHealthRequest("index1") - .timeout(TimeValue.timeValueSeconds(30)) - .waitForYellowStatus() - .waitForEvents(Priority.LANGUID) - .waitForNoRelocatingShards(true)) - .actionGet(); - client.filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("index1").get(); - } -} diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java deleted file mode 100644 index 0111aeff4cca2..0000000000000 --- a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.file.Path; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.hamcrest.Matchers.notNullValue; - -/** - * {@link MigrateToolTestCase} is an abstract base class to run integration - * tests against an external Elasticsearch Cluster. - *

- * You can define a list of transport addresses from where you can reach your cluster - * by setting "tests.cluster" system property. It defaults to "localhost:9300". - *

- * All tests can be run from maven using mvn install as maven will start an external cluster first. - *

- * If you want to debug this module from your IDE, then start an external cluster by yourself - * then run JUnit. If you changed the default port, set "tests.cluster=localhost:PORT" when running - * your test. - */ -@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -public abstract class MigrateToolTestCase extends LuceneTestCase { - - /** - * Key used to eventually switch to using an external cluster and provide its transport addresses - */ - public static final String TESTS_CLUSTER = "tests.cluster"; - - /** - * Key used to eventually switch to using an external cluster and provide its transport addresses - */ - public static final String TESTS_HTTP_CLUSTER = "tests.rest.cluster"; - - /** - * Defaults to localhost:9300 - */ - public static final String TESTS_CLUSTER_DEFAULT = "localhost:9300"; - - protected static final Logger logger = LogManager.getLogger(MigrateToolTestCase.class); - - private static final AtomicInteger counter = new AtomicInteger(); - private static Client client; - private static String clusterAddresses; - private static String clusterHttpAddresses; - - private static Client startClient(Path tempDir, TransportAddress... transportAddresses) { - logger.info("--> Starting Elasticsearch Java TransportClient {}, {}", transportAddresses, tempDir); - - Settings clientSettings = Settings.builder() - .put("cluster.name", "qa_migrate_tests_" + counter.getAndIncrement()) - .put("client.transport.ignore_cluster_name", true) - .put("path.home", tempDir) - .put(SecurityField.USER_SETTING.getKey(), "transport_user:x-pack-test-password") - .build(); - - TransportClient client = new PreBuiltXPackTransportClient(clientSettings).addTransportAddresses(transportAddresses); - Exception clientException = null; - try { - logger.info("--> Elasticsearch Java TransportClient started"); - ClusterHealthResponse health = client.admin().cluster().prepareHealth().get(); - logger.info("--> connected to [{}] cluster which is running [{}] node(s).", - health.getClusterName(), health.getNumberOfNodes()); - } catch (Exception e) { - clientException = e; - } - - assumeNoException("Sounds like your cluster is not running at " + clusterAddresses, clientException); - - return client; - } - - private static Client startClient() throws UnknownHostException { - String[] stringAddresses = clusterAddresses.split(","); - TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; - int i = 0; - for (String stringAddress : stringAddresses) { - int lastColon = stringAddress.lastIndexOf(":"); - if (lastColon == -1) { - throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid"); - } - String ip = stringAddress.substring(0, lastColon); - String port = stringAddress.substring(lastColon + 1); - try { - transportAddresses[i++] = new TransportAddress(InetAddress.getByName(ip), Integer.valueOf(port)); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("port is not valid, expected number but was [" + port + "]"); - } - } - return startClient(createTempDir(), transportAddresses); - } - - public static Client getClient() { - if (client == null) { - try { - client = startClient(); - } catch (UnknownHostException e) { - logger.error("could not start the client", e); - } - assertThat(client, notNullValue()); - } - return client; - } - - public static String getHttpURL() { - return clusterHttpAddresses; - } - - @BeforeClass - public static void initializeSettings() throws UnknownHostException { - clusterAddresses = System.getProperty(TESTS_CLUSTER); - clusterHttpAddresses = System.getProperty(TESTS_HTTP_CLUSTER); - if (clusterAddresses == null || clusterAddresses.isEmpty()) { - throw new UnknownHostException("unable to get a cluster address"); - } - } - - @AfterClass - public static void stopTransportClient() { - if (client != null) { - client.close(); - client = null; - } - } - - @Before - public void defineIndexName() { - doClean(); - } - - @After - public void cleanIndex() { - doClean(); - } - - private void doClean() { - if (client != null) { - try { - client.admin().indices().prepareDelete("_all").get(); - } catch (Exception e) { - // We ignore this cleanup exception - } - } - } -} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash index c267744194a1c..bafe7d9342f0e 100644 --- a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash +++ b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash @@ -17,7 +17,6 @@ verify_xpack_installation() { 'elasticsearch-certgen' 'elasticsearch-certutil' 'elasticsearch-croneval' - 'elasticsearch-migrate' 'elasticsearch-saml-metadata' 'elasticsearch-setup-passwords' 'elasticsearch-sql-cli'