diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 985c70a39a091..2617baadba013 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -162,7 +162,12 @@ cd elasticsearch/ ./gradlew assemble ``` -You will find the newly built packages under: `./distribution/(deb|rpm|tar|zip)/build/distributions/`. +The package distributions (Debian and RPM) can be found under: +`./distribution/packages/(deb|rpm)/build/distributions/` + +The archive distributions (tar and zip) can be found under: +`./distribution/archives/(tar|zip)/build/distributions/` + Before submitting your changes, run the test suite to make sure that nothing is broken, with: diff --git a/README.textile b/README.textile index f17958262d8e4..c964e31655dc8 100644 --- a/README.textile +++ b/README.textile @@ -192,7 +192,7 @@ h3. Distributed, Highly Available Let's face it, things will fail.... -Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards). +Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replicas. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards). In order to play with the distributed nature of Elasticsearch, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed. diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 37f7962ff0c96..f1e444dbde6e3 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -264,6 +264,8 @@ The REST layer is tested through specific tests that are shared between all the elasticsearch official clients and consist of YAML files that describe the operations to be executed and the obtained results that need to be tested. +The YAML files support various operators defined in the link:/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc[rest-api-spec] and adhere to the link:/rest-api-spec/README.markdown[Elasticsearch REST API JSON specification] + The REST tests are run automatically when executing the "./gradlew check" command. To run only the REST tests use the following command: diff --git a/build.gradle b/build.gradle index 42d6f7e278709..94823e0ce5b1a 100644 --- a/build.gradle +++ b/build.gradle @@ -103,7 +103,7 @@ allprojects { isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea') // for BWC testing - versionCollection = versions + bwcVersions = versions buildMetadata = buildMetadataMap } @@ -122,13 +122,13 @@ task verifyVersions { Set knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ }.collect { Version.fromString(it) }) // Limit the known versions to those that should be index compatible, and are not future versions - knownVersions = knownVersions.findAll { it.major >= versions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) } + knownVersions = knownVersions.findAll { it.major >= bwcVersions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) } /* Limit the listed versions to those that have been marked as released. * Versions not marked as released don't get the same testing and we want * to make sure that we flip all unreleased versions to released as soon * as possible after release. */ - Set actualVersions = new TreeSet<>(versions.versionsIndexCompatibleWithCurrent.findAll { false == it.snapshot }) + Set actualVersions = new TreeSet<>(bwcVersions.indexCompatible.findAll { false == it.snapshot }) // Finally, compare! if (knownVersions.equals(actualVersions) == false) { @@ -144,13 +144,24 @@ task verifyVersions { * the enabled state of every bwc task. It should be set back to true * after the backport of the backcompat code is complete. */ -allprojects { - ext.bwc_tests_enabled = true +final boolean bwc_tests_enabled = true +final String bwc_tests_disabled_issue = "" /* place a PR link here when commiting bwc changes */ +if (bwc_tests_enabled == false) { + if (bwc_tests_disabled_issue.isEmpty()) { + throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") + } + println "========================= WARNING =========================" + println " Backwards compatibility tests are disabled!" + println "See ${bwc_tests_disabled_issue}" + println "===========================================================" +} +subprojects { + ext.bwc_tests_enabled = bwc_tests_enabled } task verifyBwcTestsEnabled { doLast { - if (project.bwc_tests_enabled == false) { + if (bwc_tests_enabled == false) { throw new GradleException('Bwc tests are disabled. They must be re-enabled after completing backcompat behavior backporting.') } } @@ -192,11 +203,11 @@ subprojects { "org.elasticsearch.client:test:${version}": ':client:test', "org.elasticsearch.client:transport:${version}": ':client:transport', "org.elasticsearch.test:framework:${version}": ':test:framework', - "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', - "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', - "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', - "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm', - "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb', + "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip', + "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:archives:zip', + "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:archives:tar', + "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:packages:rpm', + "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:packages:deb', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', // for transport client "org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4', @@ -208,13 +219,14 @@ subprojects { "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', ] - for (final Version version : versionCollection.versionsIndexCompatibleWithCurrent) { - if (version.branch != null) { - final String snapshotProject = ":distribution:bwc-snapshot-${version.branch}" - project(snapshotProject).ext.bwcVersion = version - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${version}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${version}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${version}"] = snapshotProject + bwcVersions.snapshotProjectNames.each { snapshotName -> + Version snapshot = bwcVersions.getSnapshotForProject(snapshotName) + if (snapshot != null ) { + String snapshotProject = ":distribution:bwc:${snapshotName}" + project(snapshotProject).ext.bwcVersion = snapshot + ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${snapshot}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${snapshot}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${snapshot}"] = snapshotProject } } @@ -245,7 +257,7 @@ subprojects { } } project.configurations.compile.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) - project.configurations.provided.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) + project.configurations.compileOnly.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) } } } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy index da25afa938916..3b97cfb5f4881 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy @@ -21,9 +21,14 @@ package com.carrotsearch.gradle.junit4 import com.carrotsearch.ant.tasks.junit4.JUnit4 import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe +import com.carrotsearch.ant.tasks.junit4.events.TestStartedEvent +import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent +import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent +import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap +import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener import org.gradle.internal.logging.progress.ProgressLogger import org.gradle.internal.logging.progress.ProgressLoggerFactory @@ -34,7 +39,6 @@ import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.FAI import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED_ASSUMPTION import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.OK -import static java.lang.Math.max /** * Adapts junit4's event listeners into gradle's ProgressLogger. Note that @@ -54,137 +58,118 @@ import static java.lang.Math.max class TestProgressLogger implements AggregatedEventListener { /** Factory to build a progress logger when testing starts */ ProgressLoggerFactory factory - ProgressLogger progressLogger + ProgressLogger parentProgressLogger + ProgressLogger suiteLogger + ProgressLogger testLogger + ProgressLogger[] slaveLoggers int totalSuites int totalSlaves - // sprintf formats used to align the integers we print - String suitesFormat - String slavesFormat - String testsFormat - // Counters incremented test completion. volatile int suitesCompleted = 0 volatile int testsCompleted = 0 volatile int testsFailed = 0 volatile int testsIgnored = 0 - // Information about the last, most interesting event. - volatile String eventDescription - volatile int eventSlave - volatile long eventExecutionTime - - /** Have we finished a whole suite yet? */ - volatile boolean suiteFinished = false - /* Note that we probably overuse volatile here but it isn't hurting us and - lets us move things around without worrying about breaking things. */ - @Subscribe void onStart(AggregatedStartEvent e) throws IOException { totalSuites = e.suiteCount totalSlaves = e.slaveCount - progressLogger = factory.newOperation(TestProgressLogger) - progressLogger.setDescription('Randomized test runner') - progressLogger.started() - progressLogger.progress( - "Starting JUnit4 for ${totalSuites} suites on ${totalSlaves} jvms") - - suitesFormat = "%0${widthForTotal(totalSuites)}d" - slavesFormat = "%-${widthForTotal(totalSlaves)}s" - /* Just guess the number of tests because we can't figure it out from - here and it isn't worth doing anything fancy to prevent the console - from jumping around a little. 200 is a pretty wild guess for the - minimum but it makes REST tests output sanely. */ - int totalNumberOfTestsGuess = max(200, totalSuites * 10) - testsFormat = "%0${widthForTotal(totalNumberOfTestsGuess)}d" + parentProgressLogger = factory.newOperation(TestProgressLogger) + parentProgressLogger.setDescription('Randomized test runner') + parentProgressLogger.started() + + suiteLogger = factory.newOperation(TestProgressLogger, parentProgressLogger) + suiteLogger.setDescription('Suite logger') + suiteLogger.started("Suites: 0/" + totalSuites) + testLogger = factory.newOperation(TestProgressLogger, parentProgressLogger) + testLogger.setDescription('Test logger') + testLogger.started('Tests: completed: 0, failed: 0, ignored: 0') + slaveLoggers = new ProgressLogger[e.slaveCount] + for (int i = 0; i < e.slaveCount; ++i) { + slaveLoggers[i] = factory.newOperation(TestProgressLogger, parentProgressLogger) + slaveLoggers[i].setDescription("J${i} test logger") + slaveLoggers[i].started("J${i}: initializing...") + } + } + + @Subscribe + void onChildBootstrap(ChildBootstrap e) throws IOException { + slaveLoggers[e.getSlave().id].progress("J${e.slave.id}: starting (pid ${e.slave.pidString})") + } + + @Subscribe + void onQuit(AggregatedQuitEvent e) throws IOException { + suiteLogger.completed() + testLogger.completed() + for (ProgressLogger slaveLogger : slaveLoggers) { + slaveLogger.completed() + } + parentProgressLogger.completed() + } + + @Subscribe + void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException { + String suiteName = simpleName(e.suiteStartedEvent.description.className) + slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${suiteName} - initializing") + } + + @Subscribe + void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException { + suitesCompleted++ + suiteLogger.progress("Suites: " + suitesCompleted + "/" + totalSuites) } @Subscribe void onTestResult(AggregatedTestResultEvent e) throws IOException { + final String statusMessage testsCompleted++ switch (e.status) { case ERROR: case FAILURE: testsFailed++ + statusMessage = "failed" break case IGNORED: case IGNORED_ASSUMPTION: testsIgnored++ + statusMessage = "ignored" break case OK: + String time = formatDurationInSeconds(e.executionTime) + statusMessage = "completed [${time}]" break default: - throw new IllegalArgumentException( - "Unknown test status: [${e.status}]") + throw new IllegalArgumentException("Unknown test status: [${e.status}]") } - if (!suiteFinished) { - updateEventInfo(e) - } - - log() + testLogger.progress("Tests: completed: ${testsCompleted}, failed: ${testsFailed}, ignored: ${testsIgnored}") + String testName = simpleName(e.description.className) + '.' + e.description.methodName + slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ${statusMessage}") } @Subscribe - void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException { - suitesCompleted++ - suiteFinished = true - updateEventInfo(e) - log() + void onTestStarted(TestStartedEvent e) throws IOException { + String testName = simpleName(e.description.className) + '.' + e.description.methodName + slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ...") } - /** - * Update the suite information with a junit4 event. - */ - private void updateEventInfo(Object e) { - eventDescription = simpleName(e.description.className) - if (e.description.methodName != null) { - eventDescription += "#${e.description.methodName}" - } - eventSlave = e.slave.id - eventExecutionTime = e.executionTime + @Subscribe + void onHeartbeat(HeartBeatEvent e) throws IOException { + String testName = simpleName(e.description.className) + '.' + e.description.methodName + String time = formatDurationInSeconds(e.getNoEventDuration()) + slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} stalled for ${time}") } /** * Extract a Class#getSimpleName style name from Class#getName style * string. We can't just use Class#getSimpleName because junit descriptions - * don't alway s set the class field but they always set the className + * don't always set the class field but they always set the className * field. */ private static String simpleName(String className) { return className.substring(className.lastIndexOf('.') + 1) } - private void log() { - /* Remember that instances of this class are only ever active on one - thread at a time so there really aren't race conditions here. It'd be - OK if there were because they'd only display an overcount - temporarily. */ - String log = '' - if (totalSuites > 1) { - /* Skip printing the suites to save space when there is only a - single suite. This is nice because when there is only a single - suite we log the method name and those can be long. */ - log += sprintf("Suites [${suitesFormat}/${suitesFormat}], ", - [suitesCompleted, totalSuites]) - } - log += sprintf("Tests [${testsFormat}|%d|%d], ", - [testsCompleted, testsFailed, testsIgnored]) - log += "in ${formatDurationInSeconds(eventExecutionTime)} " - if (totalSlaves > 1) { - /* Skip printing the slaves if there is only one of them. This is - nice because when there is only a single slave there is often - only a single suite and we could use the extra space to log the - test method names. */ - log += "J${sprintf(slavesFormat, eventSlave)} " - } - log += "completed ${eventDescription}" - progressLogger.progress(log) - } - - private static int widthForTotal(int total) { - return ((total - 1) as String).length() - } - @Override void setOuter(JUnit4 junit) {} } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 07218af3da95f..4a8f657635805 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -76,7 +76,6 @@ class BuildPlugin implements Plugin { project.pluginManager.apply('nebula.info-java') project.pluginManager.apply('nebula.info-scm') project.pluginManager.apply('nebula.info-jar') - project.pluginManager.apply(ProvidedBasePlugin) globalBuildInfo(project) configureRepositories(project) @@ -261,6 +260,9 @@ class BuildPlugin implements Plugin { * to iterate the transitive dependencies and add excludes. */ static void configureConfigurations(Project project) { + // we want to test compileOnly deps! + project.configurations.testCompile.extendsFrom(project.configurations.compileOnly) + // we are not shipping these jars, we act like dumb consumers of these things if (project.path.startsWith(':test:fixtures') || project.path == ':build-tools') { return @@ -297,7 +299,7 @@ class BuildPlugin implements Plugin { project.configurations.compile.dependencies.all(disableTransitiveDeps) project.configurations.testCompile.dependencies.all(disableTransitiveDeps) - project.configurations.provided.dependencies.all(disableTransitiveDeps) + project.configurations.compileOnly.dependencies.all(disableTransitiveDeps) } /** Adds repositories used by ES dependencies */ @@ -665,7 +667,7 @@ class BuildPlugin implements Plugin { // only require dependency licenses for non-elasticsearch deps project.dependencyLicenses.dependencies = project.configurations.runtime.fileCollection { it.group.startsWith('org.elasticsearch') == false - } - project.configurations.provided + } - project.configurations.compileOnly } private static configureDependenciesInfo(Project project) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy index 7dbd943b62d6a..419d3792bb616 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy @@ -34,24 +34,29 @@ public class Version { final int revision final int id final boolean snapshot - final String branch /** - * Suffix on the version name. Unlike Version.java the build does not - * consider alphas and betas different versions, it just preserves the - * suffix that the version was declared with in Version.java. + * Suffix on the version name. */ final String suffix public Version(int major, int minor, int revision, - String suffix, boolean snapshot, String branch) { + String suffix, boolean snapshot) { this.major = major this.minor = minor this.revision = revision this.snapshot = snapshot this.suffix = suffix - this.branch = branch - this.id = major * 100000 + minor * 1000 + revision * 10 + - (snapshot ? 1 : 0) + + int suffixOffset = 0 + if (suffix.contains("alpha")) { + suffixOffset += Integer.parseInt(suffix.substring(6)) + } else if (suffix.contains("beta")) { + suffixOffset += 25 + Integer.parseInt(suffix.substring(5)) + } else if (suffix.contains("rc")) { + suffixOffset += 50 + Integer.parseInt(suffix.substring(3)); + } + + this.id = major * 1000000 + minor * 10000 + revision * 100 + suffixOffset } public static Version fromString(String s) { @@ -60,7 +65,7 @@ public class Version { throw new InvalidUserDataException("Invalid version [${s}]") } return new Version(m.group(1) as int, m.group(2) as int, - m.group(3) as int, m.group(4) ?: '', m.group(5) != null, null) + m.group(3) as int, m.group(4) ?: '', m.group(5) != null) } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy index a6c151ee59275..7d5b793254fe4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy @@ -20,164 +20,334 @@ package org.elasticsearch.gradle import org.gradle.api.GradleException +import org.gradle.api.InvalidUserDataException import java.util.regex.Matcher /** * The collection of version constants declared in Version.java, for use in BWC testing. + * + * if major+1 released: released artifacts from $version down to major-1.highestMinor.highestPatch, none of these should be snapshots, period. + * if major+1 unreleased: + * - if released: + * -- caveat 0: snapshot for the major-1.highestMinor.highestPatch + * - if unreleased: + * -- caveat 0: snapshot for the major-1.highestMinor.highestPatch + * -- caveat 1: every same major lower minor branch should also be tested if its released, and if not, its a snapshot. There should only be max 2 of these. + * -- caveat 2: the largest released minor branch before the unreleased minor should also be a snapshot + * -- caveat 3: if the current version is a different major than the previous rules apply to major - 1 of the current version + * + * Please note that the caveat's also correspond with the 4 types of snapshots. + * - Caveat 0 - always maintenanceBugfixSnapshot. + * - Caveat 1 - This is tricky. If caveat 3 applies, the highest matching value is nextMinorSnapshot, if there is another it is the stagedMinorSnapshot. + * If caveat 3 does not apply then the only possible value is the stagedMinorSnapshot. + * - Caveat 2 - always nextBugfixSnapshot + * - Caveat 3 - this only changes the applicability of Caveat 1 + * + * Notes on terminology: + * - The case for major+1 being released is accomplished through the isReleasableBranch value. If this is false, then the branch is no longer + * releasable, meaning not to test against any snapshots. + * - Released is defined as having > 1 suffix-free version in a major.minor series. For instance, only 6.2.0 means unreleased, but a + * 6.2.0 and 6.2.1 mean that 6.2.0 was released already. */ class VersionCollection { private final List versions - private final boolean buildSnapshot = System.getProperty("build.snapshot", "true") == "true" + Version nextMinorSnapshot + Version stagedMinorSnapshot + Version nextBugfixSnapshot + Version maintenanceBugfixSnapshot + final Version currentVersion + private final TreeSet versionSet = new TreeSet<>() + final List snapshotProjectNames = ['next-minor-snapshot', + 'staged-minor-snapshot', + 'next-bugfix-snapshot', + 'maintenance-bugfix-snapshot'] + + // When we roll 8.0 its very likely these will need to be extracted from this class + private final boolean isReleasableBranch = true /** - * Construct a VersionCollection from the lines of the Version.java file. + * Construct a VersionCollection from the lines of the Version.java file. The basic logic for the following is pretty straight forward. + * @param versionLines The lines of the Version.java file. */ VersionCollection(List versionLines) { + final boolean buildSnapshot = System.getProperty("build.snapshot", "true") == "true" List versions = [] + // This class should be converted wholesale to use the treeset for (final String line : versionLines) { final Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_alpha\d+|_beta\d+|_rc\d+)? .*/ if (match.matches()) { final Version foundVersion = new Version( Integer.parseInt(match.group(1)), Integer.parseInt(match.group(2)), - Integer.parseInt(match.group(3)), (match.group(4) ?: '').replace('_', '-'), false, null) - - if (versions.size() > 0 && foundVersion.onOrBeforeIncludingSuffix(versions[-1])) { - throw new GradleException("Versions.java contains out of order version constants:" + - " ${foundVersion} should come before ${versions[-1]}") - } - - // Only keep the last alpha/beta/rc in the series - if (versions.size() > 0 && versions[-1].id == foundVersion.id) { - versions[-1] = foundVersion - } else { - versions.add(foundVersion) - } + Integer.parseInt(match.group(3)), (match.group(4) ?: '').replace('_', '-'), false) + safeAddToSet(foundVersion) } } - if (versions.empty) { - throw new GradleException("Unexpectedly found no version constants in Versions.java"); + if (versionSet.empty) { + throw new GradleException("Unexpectedly found no version constants in Versions.java") } - /* - * The tip of each minor series (>= 5.6) is unreleased, so they must be built from source (we set branch to non-null), and we set - * the snapshot flag if and only if build.snapshot is true. - */ - Version prevConsideredVersion = null - boolean found6xSnapshot = false - for (final int versionIndex = versions.size() - 1; versionIndex >= 0; versionIndex--) { - final Version currConsideredVersion = versions[versionIndex] - - if (prevConsideredVersion == null - || currConsideredVersion.major != prevConsideredVersion.major - || currConsideredVersion.minor != prevConsideredVersion.minor) { - - // This is a snapshot version. Work out its branch. NB this doesn't name the current branch correctly, but this doesn't - // matter as we don't BWC test against it. - String branch = "${currConsideredVersion.major}.${currConsideredVersion.minor}" - - if (false == found6xSnapshot && currConsideredVersion.major == 6) { - // TODO needs generalising to deal with when 7.x is cut, and when 6.x is deleted, and so on... - branch = "6.x" - found6xSnapshot = true - } + // If the major version has been released, then remove all of the alpha/beta/rc versions that exist in the set + versionSet.removeAll { it.suffix.isEmpty() == false && isMajorReleased(it, versionSet) } - versions[versionIndex] = new Version( - currConsideredVersion.major, currConsideredVersion.minor, - currConsideredVersion.revision, currConsideredVersion.suffix, buildSnapshot, branch) - } + // set currentVersion + Version lastVersion = versionSet.last() + currentVersion = new Version(lastVersion.major, lastVersion.minor, lastVersion.revision, lastVersion.suffix, buildSnapshot) - if (currConsideredVersion.onOrBefore("5.6.0")) { - break - } + // remove all of the potential alpha/beta/rc from the currentVersion + versionSet.removeAll { + it.suffix.isEmpty() == false && + it.major == currentVersion.major && + it.minor == currentVersion.minor && + it.revision == currentVersion.revision } + + // re-add the currentVersion to the set + versionSet.add(currentVersion) - prevConsideredVersion = currConsideredVersion + if (isReleasableBranch) { + if (isReleased(currentVersion)) { + // caveat 0 - if the minor has been released then it only has a maintenance version + // go back 1 version to get the last supported snapshot version of the line, which is a maint bugfix + Version highestMinor = getHighestPreviousMinor(currentVersion.major) + maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) + } else { + // caveat 3 - if our currentVersion is a X.0.0, we need to check X-1 minors to see if they are released + if (currentVersion.minor == 0) { + for (Version version: getMinorTips(currentVersion.major - 1)) { + if (isReleased(version) == false) { + // caveat 1 - This should only ever contain 2 non released branches in flight. An example is 6.x is frozen, + // and 6.2 is cut but not yet released there is some simple logic to make sure that in the case of more than 2, + // it will bail. The order is that the minor snapshot is fufilled first, and then the staged minor snapshot + if (nextMinorSnapshot == null) { + // it has not been set yet + nextMinorSnapshot = replaceAsSnapshot(version) + } else if (stagedMinorSnapshot == null) { + stagedMinorSnapshot = replaceAsSnapshot(version) + } else { + throw new GradleException("More than 2 snapshot version existed for the next minor and staged (frozen) minors.") + } + } else { + // caveat 2 - this is the last minor snap for this major, so replace the highest (last) one of these and break + nextBugfixSnapshot = replaceAsSnapshot(version) + // we only care about the largest minor here, so in the case of 6.1 and 6.0, it will only get 6.1 + break + } + } + // caveat 0 - now dip back 2 versions to get the last supported snapshot version of the line + Version highestMinor = getHighestPreviousMinor(currentVersion.major - 1) + maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) + } else { + // caveat 3 did not apply. version is not a X.0.0, so we are somewhere on a X.Y line + // only check till minor == 0 of the major + for (Version version: getMinorTips(currentVersion.major)) { + if (isReleased(version) == false) { + // caveat 1 - This should only ever contain 0 or 1 branch in flight. An example is 6.x is frozen, and 6.2 is cut + // but not yet released there is some simple logic to make sure that in the case of more than 1, it will bail + if (stagedMinorSnapshot == null) { + stagedMinorSnapshot = replaceAsSnapshot(version) + } else { + throw new GradleException("More than 1 snapshot version existed for the staged (frozen) minors.") + } + } else { + // caveat 2 - this is the last minor snap for this major, so replace the highest (last) one of these and break + nextBugfixSnapshot = replaceAsSnapshot(version) + // we only care about the largest minor here, so in the case of 6.1 and 6.0, it will only get 6.1 + break + } + } + // caveat 0 - now dip back 1 version to get the last supported snapshot version of the line + Version highestMinor = getHighestPreviousMinor(currentVersion.major) + maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) + } + } } - this.versions = Collections.unmodifiableList(versions) + this.versions = Collections.unmodifiableList(versionSet.toList()) } /** * @return The list of versions read from the Version.java file */ List getVersions() { - return Collections.unmodifiableList(versions) + return versions } /** - * @return The latest version in the Version.java file, which must be the current version of the system. + * Index compat supports 1 previous entire major version. For instance, any 6.x test for this would test all of 5 up to that 6.x version + * + * @return All earlier versions that should be tested for index BWC with the current version. */ - Version getCurrentVersion() { - return versions[-1] + List getIndexCompatible() { + int actualMajor = (currentVersion.major == 5 ? 2 : currentVersion.major - 1) + return versionSet + .tailSet(Version.fromString("${actualMajor}.0.0")) + .headSet(currentVersion) + .asList() + } + + /** + * Ensures the types of snapshot are not null and are also in the index compat list + */ + List getSnapshotsIndexCompatible() { + List compatSnapshots = [] + List allCompatVersions = getIndexCompatible() + if (allCompatVersions.contains(nextMinorSnapshot)) { + compatSnapshots.add(nextMinorSnapshot) + } + if (allCompatVersions.contains(stagedMinorSnapshot)) { + compatSnapshots.add(stagedMinorSnapshot) + } + if (allCompatVersions.contains(nextBugfixSnapshot)) { + compatSnapshots.add(nextBugfixSnapshot) + } + if (allCompatVersions.contains(maintenanceBugfixSnapshot)) { + compatSnapshots.add(maintenanceBugfixSnapshot) + } + + return compatSnapshots } /** - * @return The snapshot at the end of the previous minor series in the current major series, or null if this is the first minor series. + * Wire compat supports the last minor of the previous major. For instance, any 6.x test would test 5.6 up to that 6.x version + * + * @return All earlier versions that should be tested for wire BWC with the current version. */ - Version getBWCSnapshotForCurrentMajor() { - return getLastSnapshotWithMajor(currentVersion.major) + List getWireCompatible() { + // Get the last minor of the previous major + Version lowerBound = getHighestPreviousMinor(currentVersion.major) + return versionSet + .tailSet(Version.fromString("${lowerBound.major}.${lowerBound.minor}.0")) + .headSet(currentVersion) + .toList() } /** - * @return The snapshot at the end of the previous major series, which must not be null. + * Ensures the types of snapshot are not null and are also in the wire compat list */ - Version getBWCSnapshotForPreviousMajor() { - Version version = getLastSnapshotWithMajor(currentVersion.major - 1) - assert version != null : "getBWCSnapshotForPreviousMajor(): found no versions in the previous major" - return version + List getSnapshotsWireCompatible() { + List compatSnapshots = [] + List allCompatVersions = getWireCompatible() + if (allCompatVersions.contains(nextMinorSnapshot)) { + compatSnapshots.add(nextMinorSnapshot) + } + if (allCompatVersions.contains(stagedMinorSnapshot)) { + compatSnapshots.add(stagedMinorSnapshot) + } + if (allCompatVersions.contains(nextBugfixSnapshot)) { + compatSnapshots.add(nextBugfixSnapshot) + } + if (allCompatVersions.contains(maintenanceBugfixSnapshot)) { + compatSnapshots.add(maintenanceBugfixSnapshot) + } + // There was no wire compat for the 2.x line + compatSnapshots.removeAll {it.major == 2} + + return compatSnapshots } - private Version getLastSnapshotWithMajor(int targetMajor) { - final String currentVersion = currentVersion.toString() - final int snapshotIndex = versions.findLastIndexOf { - it.major == targetMajor && it.before(currentVersion) && it.snapshot == buildSnapshot + /** + * Grabs the proper snapshot based on the name passed in. These names should correspond with gradle project names under bwc. If you + * are editing this if/else it is only because you added another project under :distribution:bwc. Do not modify this method or its + * reasoning for throwing the exception unless you are sure that it will not harm :distribution:bwc. + */ + Version getSnapshotForProject(String snapshotProjectName) { + if (snapshotProjectName == 'next-minor-snapshot') { + return nextMinorSnapshot + } else if (snapshotProjectName == 'staged-minor-snapshot') { + return stagedMinorSnapshot + } else if (snapshotProjectName == 'maintenance-bugfix-snapshot') { + return maintenanceBugfixSnapshot + } else if (snapshotProjectName == 'next-bugfix-snapshot') { + return nextBugfixSnapshot + } else { + throw new InvalidUserDataException("Unsupported project name ${snapshotProjectName}") } - return snapshotIndex == -1 ? null : versions[snapshotIndex] } - private List versionsOnOrAfterExceptCurrent(Version minVersion) { - final String minVersionString = minVersion.toString() - return Collections.unmodifiableList(versions.findAll { - it.onOrAfter(minVersionString) && it != currentVersion - }) + /** + * Uses basic logic about our releases to determine if this version has been previously released + */ + private boolean isReleased(Version version) { + return version.revision > 0 } /** - * @return All earlier versions that should be tested for index BWC with the current version. + * Validates that the count of non suffixed (alpha/beta/rc) versions in a given major to major+1 is greater than 1. + * This means that there is more than just a major.0.0 or major.0.0-alpha in a branch to signify it has been prevously released. */ - List getVersionsIndexCompatibleWithCurrent() { - final Version firstVersionOfCurrentMajor = versions.find { it.major >= currentVersion.major - 1 } - return versionsOnOrAfterExceptCurrent(firstVersionOfCurrentMajor) + private boolean isMajorReleased(Version version, TreeSet items) { + return items + .tailSet(Version.fromString("${version.major}.0.0")) + .headSet(Version.fromString("${version.major + 1}.0.0")) + .count { it.suffix.isEmpty() } // count only non suffix'd versions as actual versions that may be released + .intValue() > 1 } - private Version getMinimumWireCompatibilityVersion() { - final int firstIndexOfThisMajor = versions.findIndexOf { it.major == currentVersion.major } - if (firstIndexOfThisMajor == 0) { - return versions[0] + /** + * Gets the largest version previous major version based on the nextMajorVersion passed in. + * If you have a list [5.0.2, 5.1.2, 6.0.1, 6.1.1] and pass in 6 for the nextMajorVersion, it will return you 5.1.2 + */ + private Version getHighestPreviousMinor(Integer nextMajorVersion) { + return versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")).last() + } + + /** + * Helper function for turning a version into a snapshot version, removing and readding it to the tree + */ + private Version replaceAsSnapshot(Version version) { + versionSet.remove(version) + Version snapshotVersion = new Version(version.major, version.minor, version.revision, version.suffix, true) + safeAddToSet(snapshotVersion) + return snapshotVersion + } + + /** + * Safely adds a value to the treeset, or bails if the value already exists. + * @param version + */ + private void safeAddToSet(Version version) { + if (versionSet.add(version) == false) { + throw new GradleException("Versions.java contains duplicate entries for ${version}") } - final Version lastVersionOfEarlierMajor = versions[firstIndexOfThisMajor - 1] - return versions.find { it.major == lastVersionOfEarlierMajor.major && it.minor == lastVersionOfEarlierMajor.minor } } /** - * @return All earlier versions that should be tested for wire BWC with the current version. + * Gets the entire set of major.minor.* given those parameters. + */ + private SortedSet getMinorSetForMajor(Integer major, Integer minor) { + return versionSet + .tailSet(Version.fromString("${major}.${minor}.0")) + .headSet(Version.fromString("${major}.${minor + 1}.0")) + } + + /** + * Gets the entire set of major.* to the currentVersion */ - List getVersionsWireCompatibleWithCurrent() { - return versionsOnOrAfterExceptCurrent(minimumWireCompatibilityVersion) + private SortedSet getMajorSet(Integer major) { + return versionSet + .tailSet(Version.fromString("${major}.0.0")) + .headSet(currentVersion) } /** - * `gradle check` does not run all BWC tests. This defines which tests it does run. - * @return Versions to test for BWC during gradle check. + * Gets the tip of each minor set and puts it in a list. + * + * examples: + * [1.0.0, 1.1.0, 1.1.1, 1.2.0, 1.3.1] will return [1.0.0, 1.1.1, 1.2.0, 1.3.1] + * [1.0.0, 1.0.1, 1.0.2, 1.0.3, 1.0.4] will return [1.0.4] */ - List getBasicIntegrationTestVersions() { - // TODO these are the versions checked by `gradle check` for BWC tests. Their choice seems a litle arbitrary. - List result = [BWCSnapshotForPreviousMajor, BWCSnapshotForCurrentMajor] - return Collections.unmodifiableList(result.findAll { it != null }) + private List getMinorTips(Integer major) { + TreeSet majorSet = getMajorSet(major) + List minorList = new ArrayList<>() + for (int minor = majorSet.last().minor; minor >= 0; minor--) { + TreeSet minorSetInMajor = getMinorSetForMajor(major, minor) + minorList.add(minorSetInMajor.last()) + } + return minorList } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy index 3df9b604c1309..4dc355a48608a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -58,11 +58,9 @@ class MetaPluginBuildPlugin implements Plugin { // create the actual bundle task, which zips up all the files for the plugin Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [buildProperties]) { - into('elasticsearch') { - from(buildProperties.descriptorOutput.parentFile) { - // plugin properties file - include(buildProperties.descriptorOutput.name) - } + from(buildProperties.descriptorOutput.parentFile) { + // plugin properties file + include(buildProperties.descriptorOutput.name) } // due to how the renames work for each bundled plugin, we must exclude empty dirs or every subdir // within bundled plugin zips will show up at the root as an empty dir @@ -85,10 +83,8 @@ class MetaPluginBuildPlugin implements Plugin { dependsOn bundledPluginProject.bundlePlugin from(project.zipTree(bundledPluginProject.bundlePlugin.outputs.files.singleFile)) { eachFile { FileCopyDetails details -> - // paths in the individual plugins begin with elasticsearch, and we want to add in the - // bundled plugin name between that and each filename - details.relativePath = new RelativePath(true, 'elasticsearch', bundledPluginProjectName, - details.relativePath.toString().replace('elasticsearch/', '')) + // we want each path to have the plugin name interjected + details.relativePath = new RelativePath(true, bundledPluginProjectName, details.relativePath.toString()) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 950acad9a5eb4..ff3b8f857b543 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -90,15 +90,15 @@ public class PluginBuildPlugin extends BuildPlugin { private static void configureDependencies(Project project) { project.dependencies { - provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" + compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}" // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps - provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" - provided "com.vividsolutions:jts:${project.versions.jts}" - provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}" - provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}" - provided "org.elasticsearch:jna:${project.versions.jna}" + compileOnly "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" + compileOnly "com.vividsolutions:jts:${project.versions.jts}" + compileOnly "org.apache.logging.log4j:log4j-api:${project.versions.log4j}" + compileOnly "org.apache.logging.log4j:log4j-core:${project.versions.log4j}" + compileOnly "org.elasticsearch:jna:${project.versions.jna}" } } @@ -133,16 +133,13 @@ public class PluginBuildPlugin extends BuildPlugin { } from pluginMetadata // metadata (eg custom security policy) from project.jar // this plugin's jar - from project.configurations.runtime - project.configurations.provided // the dep jars + from project.configurations.runtime - project.configurations.compileOnly // the dep jars // extra files for the plugin to go into the zip from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging from('src/main') { include 'config/**' include 'bin/**' } - if (project.path.startsWith(':modules:') == false) { - into('elasticsearch') - } } project.assemble.dependsOn(bundle) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy index 0feed8ccc4e04..6050d4e278dd6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy @@ -74,10 +74,10 @@ public class NamingConventionsTask extends LoggedExec { "org.elasticsearch.gradle:build-tools:${VersionProperties.elasticsearch}") buildToolsDep.transitive = false // We don't need gradle in the classpath. It conflicts. } - FileCollection extraClasspath = project.configurations.namingConventions - dependsOn(extraClasspath) - - FileCollection classpath = project.sourceSets.test.runtimeClasspath + FileCollection classpath = project.files(project.configurations.namingConventions, + project.sourceSets.test.compileClasspath, + project.sourceSets.test.output) + dependsOn(classpath) inputs.files(classpath) description = "Tests that test classes aren't misnamed or misplaced" executable = new File(project.runtimeJavaHome, 'bin/java') @@ -95,7 +95,7 @@ public class NamingConventionsTask extends LoggedExec { project.afterEvaluate { doFirst { args('-Djna.nosys=true') - args('-cp', (classpath + extraClasspath).asPath, 'org.elasticsearch.test.NamingConventionsCheck') + args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck') args('--test-class', testClass) if (skipIntegTestInDisguise) { args('--skip-integ-tests-in-disguise') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 33ca6dccfa32e..d6babbbfbb8b2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -74,14 +74,19 @@ public class ThirdPartyAuditTask extends AntTask { description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'"; project.afterEvaluate { - Configuration configuration = project.configurations.findByName('runtime'); + Configuration configuration = project.configurations.findByName('runtime') + Configuration compileOnly = project.configurations.findByName('compileOnly') if (configuration == null) { // some projects apparently do not have 'runtime'? what a nice inconsistency, // basically only serves to waste time in build logic! - configuration = project.configurations.findByName('testCompile'); + configuration = project.configurations.findByName('testCompile') + } + assert configuration != null + if (compileOnly == null) { + classpath = configuration + } else { + classpath = project.files(configuration, compileOnly) } - assert configuration != null; - classpath = configuration // we only want third party dependencies. jars = configuration.fileCollection({ dependency -> @@ -90,9 +95,8 @@ public class ThirdPartyAuditTask extends AntTask { // we don't want provided dependencies, which we have already scanned. e.g. don't // scan ES core's dependencies for every single plugin - Configuration provided = project.configurations.findByName('provided') - if (provided != null) { - jars -= provided + if (compileOnly != null) { + jars -= compileOnly } inputs.files(jars) onlyIf { jars.isEmpty() == false } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 5c008c45b7218..4d6b54fa3bbee 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -601,11 +601,18 @@ class ClusterFormationTasks { } } } + if (ant.properties.containsKey("failed${name}".toString())) { + waitFailed(project, nodes, logger, "Failed to start elasticsearch: timed out after ${waitSeconds} seconds") + } + boolean anyNodeFailed = false for (NodeInfo node : nodes) { - anyNodeFailed |= node.failedMarker.exists() + if (node.failedMarker.exists()) { + logger.error("Failed to start elasticsearch: ${node.failedMarker.toString()} exists") + anyNodeFailed = true + } } - if (ant.properties.containsKey("failed${name}".toString()) || anyNodeFailed) { + if (anyNodeFailed) { waitFailed(project, nodes, logger, 'Failed to start elasticsearch') } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 82e4ac9b71cd0..1683a19fab1bc 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -108,13 +108,18 @@ class VagrantTestPlugin implements Plugin { if (upgradeFromVersion == null) { String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0) final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16) - final def indexCompatVersions = project.versionCollection.versionsIndexCompatibleWithCurrent + final def indexCompatVersions = project.bwcVersions.indexCompatible upgradeFromVersion = indexCompatVersions[new Random(seed).nextInt(indexCompatVersions.size())] } DISTRIBUTION_ARCHIVES.each { // Adds a dependency for the current version - project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'archives')) + if (it == 'tar') { + it = 'archives:tar' + } else { + it = "packages:${it}" + } + project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) } UPGRADE_FROM_ARCHIVES.each { diff --git a/buildSrc/src/main/resources/meta-plugin-descriptor.properties b/buildSrc/src/main/resources/meta-plugin-descriptor.properties index 16dbe4c38a55b..950cb03240083 100644 --- a/buildSrc/src/main/resources/meta-plugin-descriptor.properties +++ b/buildSrc/src/main/resources/meta-plugin-descriptor.properties @@ -1,13 +1,12 @@ # Elasticsearch meta plugin descriptor file -# This file must exist as 'meta-plugin-descriptor.properties' in a folder named `elasticsearch`. +# This file must exist as 'meta-plugin-descriptor.properties' inside a plugin. # ### example meta plugin for "meta-foo" # # meta-foo.zip <-- zip file for the meta plugin, with this structure: -#|____elasticsearch/ -#| |____ <-- The plugin files for bundled_plugin_1 (the content of the elastisearch directory) -#| |____ <-- The plugin files for bundled_plugin_2 -#| |____ meta-plugin-descriptor.properties <-- example contents below: +# |____ <-- The plugin files for bundled_plugin_1 +# |____ <-- The plugin files for bundled_plugin_2 +# |____ meta-plugin-descriptor.properties <-- example contents below: # # description=My meta plugin # name=meta-foo diff --git a/buildSrc/src/main/resources/plugin-descriptor.properties b/buildSrc/src/main/resources/plugin-descriptor.properties index d9c51b3a73507..dd1a267c0b38e 100644 --- a/buildSrc/src/main/resources/plugin-descriptor.properties +++ b/buildSrc/src/main/resources/plugin-descriptor.properties @@ -1,14 +1,12 @@ # Elasticsearch plugin descriptor file -# This file must exist as 'plugin-descriptor.properties' in a folder named `elasticsearch` -# inside all plugins. +# This file must exist as 'plugin-descriptor.properties' inside a plugin. # ### example plugin for "foo" # # foo.zip <-- zip file for the plugin, with this structure: -#|____elasticsearch/ -#| |____ .jar <-- classes, resources, dependencies -#| |____ .jar <-- any number of jars -#| |____ plugin-descriptor.properties <-- example contents below: +# |____ .jar <-- classes, resources, dependencies +# |____ .jar <-- any number of jars +# |____ plugin-descriptor.properties <-- example contents below: # # classname=foo.bar.BazPlugin # description=My cool plugin diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTest.groovy b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTest.groovy new file mode 100644 index 0000000000000..14f6d1b8523f7 --- /dev/null +++ b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTest.groovy @@ -0,0 +1,226 @@ +package org.elasticsearch.gradle + +class VersionCollectionTest extends GroovyTestCase { + + String formatVersion(String version) { + return " public static final Version V_${version.replaceAll("\\.", "_")} " + } + def allVersions = [formatVersion('5.0.0'), formatVersion('5.0.0_alpha1'), formatVersion('5.0.0_alpha2'), formatVersion('5.0.0_beta1'), + formatVersion('5.0.0_rc1'),formatVersion('5.0.0_rc2'),formatVersion('5.0.1'), formatVersion('5.0.2'), + formatVersion('5.1.1'), formatVersion('5.1.2'), formatVersion('5.2.0'), formatVersion('5.2.1'), formatVersion('6.0.0'), + formatVersion('6.0.1'), formatVersion('6.1.0'), formatVersion('6.1.1'), formatVersion('6.2.0'), formatVersion('6.3.0'), + formatVersion('7.0.0_alpha1'), formatVersion('7.0.0_alpha2')] + + /** + * This validates the logic of being on a unreleased major branch with a staged major-1.minor sibling. This case happens when a version is + * branched from Major-1.x At the time of this writing 6.2 is unreleased and 6.3 is the 6.x branch. This test simulates the behavior + * from 7.0 perspective, or master at the time of this writing. + */ + void testAgainstMajorUnreleasedWithExistingStagedMinorRelease() { + VersionCollection vc = new VersionCollection(allVersions) + assertNotNull(vc) + assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) + assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) + assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) + assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + + vc.indexCompatible.containsAll(vc.versions) + + // This should contain the same list sans the current version + List indexCompatList = [Version.fromString("6.0.0"), Version.fromString("6.0.1"), + Version.fromString("6.1.0"), Version.fromString("6.1.1-SNAPSHOT"), + Version.fromString("6.2.0-SNAPSHOT"), Version.fromString("6.3.0-SNAPSHOT")] + assertTrue(indexCompatList.containsAll(vc.indexCompatible)) + assertTrue(vc.indexCompatible.containsAll(indexCompatList)) + + List wireCompatList = [Version.fromString("6.3.0-SNAPSHOT")] + assertTrue(wireCompatList.containsAll(vc.wireCompatible)) + assertTrue(vc.wireCompatible.containsAll(wireCompatList)) + + assertEquals(vc.snapshotsIndexCompatible.size(), 3) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.3.0-SNAPSHOT"))) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) + + assertEquals(vc.snapshotsWireCompatible.size(), 1) + assertEquals(vc.snapshotsWireCompatible.first(), Version.fromString("6.3.0-SNAPSHOT")) + } + + /** + * This validates the logic of being on a unreleased major branch without a staged major-1.minor sibling. This case happens once a staged, + * unreleased minor is released. At the time of this writing 6.2 is unreleased, so adding a 6.2.1 simulates a 6.2 release. This test + * simulates the behavior from 7.0 perspective, or master at the time of this writing. + */ + void testAgainstMajorUnreleasedWithoutStagedMinorRelease() { + List localVersion = allVersions.clone() + localVersion.add(formatVersion('6.2.1')) // release 6.2 + + VersionCollection vc = new VersionCollection(localVersion) + assertNotNull(vc) + assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) + assertEquals(vc.stagedMinorSnapshot, null) + assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) + assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + + vc.indexCompatible.containsAll(vc.versions) + + // This should contain the same list sans the current version + List indexCompatList = [Version.fromString("6.0.0"), Version.fromString("6.0.1"), + Version.fromString("6.1.0"), Version.fromString("6.1.1"), + Version.fromString("6.2.0"), Version.fromString("6.2.1-SNAPSHOT"), + Version.fromString("6.3.0-SNAPSHOT")] + assertTrue(indexCompatList.containsAll(vc.indexCompatible)) + assertTrue(vc.indexCompatible.containsAll(indexCompatList)) + + List wireCompatList = [Version.fromString("6.3.0-SNAPSHOT")] + assertTrue(wireCompatList.containsAll(vc.wireCompatible)) + assertTrue(vc.wireCompatible.containsAll(wireCompatList)) + + assertEquals(vc.snapshotsIndexCompatible.size(), 2) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.3.0-SNAPSHOT"))) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) + + assertEquals(vc.snapshotsWireCompatible.size(), 1) + assertEquals(vc.snapshotsWireCompatible.first(), Version.fromString("6.3.0-SNAPSHOT")) + } + + /** + * This validates the logic of being on a unreleased minor branch with a staged minor sibling. This case happens when a version is + * branched from Major.x At the time of this writing 6.2 is unreleased and 6.3 is the 6.x branch. This test simulates the behavior + * from 6.3 perspective. + */ + void testAgainstMinorReleasedBranch() { + List localVersion = allVersions.clone() + localVersion.removeAll { it.toString().contains('7_0_0')} // remove all the 7.x so that the actual version is 6.3 (6.x) + VersionCollection vc = new VersionCollection(localVersion) + assertNotNull(vc) + assertEquals(vc.nextMinorSnapshot, null) + assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) + assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) + assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + + // This should contain the same list sans the current version + List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) + assertTrue(indexCompatList.containsAll(vc.indexCompatible)) + assertTrue(vc.indexCompatible.containsAll(indexCompatList)) + + List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), + Version.fromString("6.0.1"), Version.fromString("6.1.0"), Version.fromString("6.1.1-SNAPSHOT"), + Version.fromString("6.2.0-SNAPSHOT")] + assertTrue(wireCompatList.containsAll(vc.wireCompatible)) + assertTrue(vc.wireCompatible.containsAll(wireCompatList)) + + assertEquals(vc.snapshotsIndexCompatible.size(), 3) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) + + assertEquals(vc.snapshotsWireCompatible.size(), 3) + assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) + assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) + assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) + } + + /** + * This validates the logic of being on a unreleased minor branch without a staged minor sibling. This case happens once a staged, + * unreleased minor is released. At the time of this writing 6.2 is unreleased, so adding a 6.2.1 simulates a 6.2 release. This test + * simulates the behavior from 6.3 perspective. + */ + void testAgainstMinorReleasedBranchNoStagedMinor() { + List localVersion = allVersions.clone() + // remove all the 7.x and add a 6.2.1 which means 6.2 was released + localVersion.removeAll { it.toString().contains('7_0_0')} + localVersion.add(formatVersion('6.2.1')) + VersionCollection vc = new VersionCollection(localVersion) + assertNotNull(vc) + assertEquals(vc.nextMinorSnapshot, null) + assertEquals(vc.stagedMinorSnapshot, null) + assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) + assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + + // This should contain the same list sans the current version + List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) + assertTrue(indexCompatList.containsAll(vc.indexCompatible)) + assertTrue(vc.indexCompatible.containsAll(indexCompatList)) + + List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), + Version.fromString("6.0.1"), Version.fromString("6.1.0"), Version.fromString("6.1.1"), + Version.fromString("6.2.0"), Version.fromString("6.2.1-SNAPSHOT")] + assertTrue(wireCompatList.containsAll(vc.wireCompatible)) + assertTrue(vc.wireCompatible.containsAll(wireCompatList)) + + assertEquals(vc.snapshotsIndexCompatible.size(), 2) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) + + assertEquals(vc.snapshotsWireCompatible.size(), 2) + assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) + assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) + } + + /** + * This validates the logic of being on a released minor branch. At the time of writing, 6.2 is unreleased, so this is equivalent of being + * on 6.1. + */ + void testAgainstOldMinor() { + + List localVersion = allVersions.clone() + // remove the 7 alphas and the ones greater than 6.1 + localVersion.removeAll { it.toString().contains('7_0_0') || it.toString().contains('V_6_2') || it.toString().contains('V_6_3') } + VersionCollection vc = new VersionCollection(localVersion) + assertNotNull(vc) + assertEquals(vc.nextMinorSnapshot, null) + assertEquals(vc.stagedMinorSnapshot, null) + assertEquals(vc.nextBugfixSnapshot, null) + assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + + // This should contain the same list sans the current version + List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) + assertTrue(indexCompatList.containsAll(vc.indexCompatible)) + assertTrue(vc.indexCompatible.containsAll(indexCompatList)) + + List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), + Version.fromString("6.0.1"), Version.fromString("6.1.0")] + assertTrue(wireCompatList.containsAll(vc.wireCompatible)) + assertTrue(vc.wireCompatible.containsAll(wireCompatList)) + + assertEquals(vc.snapshotsIndexCompatible.size(), 1) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) + + assertEquals(vc.snapshotsWireCompatible.size(), 1) + assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) + } + + /** + * This validates the lower bound of wire compat, which is 5.0. It also validates that the span of 2.x to 5.x if it is decided to port + * this fix all the way to the maint 5.6 release. + */ + void testFloorOfWireCompatVersions() { + List localVersion = [formatVersion('2.0.0'), formatVersion('2.0.1'), formatVersion('2.1.0'), formatVersion('2.1.1'), + formatVersion('5.0.0'), formatVersion('5.0.1'), formatVersion('5.1.0'), formatVersion('5.1.1'), + formatVersion('5.2.0'),formatVersion('5.2.1'),formatVersion('5.3.0'),formatVersion('5.3.1'), + formatVersion('5.3.2')] + VersionCollection vc = new VersionCollection(localVersion) + assertNotNull(vc) + assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("2.1.1-SNAPSHOT")) + + // This should contain the same list sans the current version + List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) + assertTrue(indexCompatList.containsAll(vc.indexCompatible)) + assertTrue(vc.indexCompatible.containsAll(indexCompatList)) + + List wireCompatList = [Version.fromString("2.1.0"), Version.fromString("2.1.1-SNAPSHOT"), Version.fromString("5.0.0"), + Version.fromString("5.0.1"), Version.fromString("5.1.0"), + Version.fromString("5.1.1"), Version.fromString("5.2.0"), Version.fromString("5.2.1"), + Version.fromString("5.3.0"), Version.fromString("5.3.1")] + + assertTrue(wireCompatList.containsAll(vc.wireCompatible)) + assertTrue(vc.wireCompatible.containsAll(wireCompatList)) + + assertEquals(vc.snapshotsIndexCompatible.size(), 1) + assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("2.1.1-SNAPSHOT"))) + + // ensure none of the 2.x snapshots appear here, as this is the floor of bwc for wire compat + assertEquals(vc.snapshotsWireCompatible.size(), 0) + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java new file mode 100644 index 0000000000000..177e33d727010 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + +/** + * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Cluster API. + *

+ * See Cluster API on elastic.co + */ +public final class ClusterClient { + private final RestHighLevelClient restHighLevelClient; + + ClusterClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Updates cluster wide specific settings using the Cluster Update Settings API + *

+ * See Cluster Update Settings + * API on elastic.co + */ + public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings, + ClusterUpdateSettingsResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously updates cluster wide specific settings using the Cluster Update Settings API + *

+ * See Cluster Update Settings + * API on elastic.co + */ + public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, + ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings, + ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 98d6736f7864a..6968839d1ee42 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -35,6 +35,8 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -272,7 +274,7 @@ public void shrinkAsync(ResizeRequest resizeRequest, ActionListener * See - * Shrink Index API on elastic.co + * Split Index API on elastic.co */ public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent, @@ -289,4 +291,26 @@ public void splitAsync(ResizeRequest resizeRequest, ActionListener + * See + * Rollover Index API on elastic.co + */ + public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously rolls over an index using the Rollover Index API + *

+ * See + * Rollover Index API on elastic.co + */ + public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent, + listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 4ec7315b73b58..fd849f5e47883 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -29,6 +29,7 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -37,6 +38,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -73,6 +75,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.nio.charset.Charset; import java.util.Collections; import java.util.HashMap; @@ -495,11 +499,10 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { } static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { - // TODO maybe indices should be propery of RankEvalRequest and not of the spec + // TODO maybe indices should be property of RankEvalRequest and not of the spec List indices = rankEvalRequest.getRankEvalSpec().getIndices(); String endpoint = endpoint(indices.toArray(new String[indices.size()]), Strings.EMPTY_ARRAY, "_rank_eval"); - HttpEntity entity = null; - entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE); + HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE); return new Request(HttpGet.METHOD_NAME, endpoint, Collections.emptyMap(), entity); } @@ -528,6 +531,30 @@ private static Request resize(ResizeRequest resizeRequest) throws IOException { return new Request(HttpPut.METHOD_NAME, endpoint, params.getParams(), entity); } + static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { + Params parameters = Params.builder(); + parameters.withFlatSettings(clusterUpdateSettingsRequest.flatSettings()); + parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); + parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); + + String endpoint = buildEndpoint("_cluster", "settings"); + HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + } + + static Request rollover(RolloverRequest rolloverRequest) throws IOException { + Params params = Params.builder(); + params.withTimeout(rolloverRequest.timeout()); + params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); + params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); + if (rolloverRequest.isDryRun()) { + params.putParam("dry_run", Boolean.TRUE.toString()); + } + String endpoint = buildEndpoint(rolloverRequest.getAlias(), "_rollover", rolloverRequest.getNewIndexName()); + HttpEntity entity = createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity); + } + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); @@ -568,7 +595,16 @@ static String buildEndpoint(String... parts) { StringJoiner joiner = new StringJoiner("/", "/", ""); for (String part : parts) { if (Strings.hasLength(part)) { - joiner.add(part); + try { + //encode each part (e.g. index, type and id) separately before merging them into the path + //we prepend "/" to the path part to make this pate absolute, otherwise there can be issues with + //paths that start with `-` or contain `:` + URI uri = new URI(null, null, null, -1, "/" + part, null, null); + //manually encode any slash that each part may contain + joiner.add(uri.getRawPath().substring(1).replaceAll("/", "%2F")); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Path part [" + part + "] couldn't be encoded", e); + } } } return joiner.toString(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 5d5f0e145ef98..bf80aa7720741 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -186,6 +186,7 @@ public class RestHighLevelClient implements Closeable { private final CheckedConsumer doClose; private final IndicesClient indicesClient = new IndicesClient(this); + private final ClusterClient clusterClient = new ClusterClient(this); /** * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the @@ -222,7 +223,7 @@ protected RestHighLevelClient(RestClient restClient, CheckedConsumerCluster API on elastic.co + */ + public final ClusterClient cluster() { + return clusterClient; + } + /** * Executes a bulk request using the Bulk API * @@ -306,7 +316,7 @@ public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header.. * * See Multi Get API on elastic.co */ - public void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener listener, Header... headers) { + public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, listener, singleton(404), headers); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java new file mode 100644 index 0000000000000..9314bb2e36cea --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class ClusterClientIT extends ESRestHighLevelClientTestCase { + + public void testClusterPutSettings() throws IOException { + final String transientSettingKey = RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(); + final int transientSettingValue = 10; + + final String persistentSettingKey = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); + final String persistentSettingValue = EnableAllocationDecider.Allocation.NONE.name(); + + Settings transientSettings = Settings.builder().put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES).build(); + Map map = new HashMap<>(); + map.put(persistentSettingKey, persistentSettingValue); + + ClusterUpdateSettingsRequest setRequest = new ClusterUpdateSettingsRequest(); + setRequest.transientSettings(transientSettings); + setRequest.persistentSettings(map); + + ClusterUpdateSettingsResponse setResponse = execute(setRequest, highLevelClient().cluster()::putSettings, + highLevelClient().cluster()::putSettingsAsync); + + assertAcked(setResponse); + assertThat(setResponse.getTransientSettings().get(transientSettingKey), notNullValue()); + assertThat(setResponse.getTransientSettings().get(persistentSettingKey), nullValue()); + assertThat(setResponse.getTransientSettings().get(transientSettingKey), + equalTo(transientSettingValue + ByteSizeUnit.BYTES.getSuffix())); + assertThat(setResponse.getPersistentSettings().get(transientSettingKey), nullValue()); + assertThat(setResponse.getPersistentSettings().get(persistentSettingKey), notNullValue()); + assertThat(setResponse.getPersistentSettings().get(persistentSettingKey), equalTo(persistentSettingValue)); + + Map setMap = getAsMap("/_cluster/settings"); + String transientSetValue = (String) XContentMapValues.extractValue("transient." + transientSettingKey, setMap); + assertThat(transientSetValue, equalTo(transientSettingValue + ByteSizeUnit.BYTES.getSuffix())); + String persistentSetValue = (String) XContentMapValues.extractValue("persistent." + persistentSettingKey, setMap); + assertThat(persistentSetValue, equalTo(persistentSettingValue)); + + ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); + resetRequest.transientSettings(Settings.builder().putNull(transientSettingKey)); + resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON); + + ClusterUpdateSettingsResponse resetResponse = execute(resetRequest, highLevelClient().cluster()::putSettings, + highLevelClient().cluster()::putSettingsAsync); + + assertThat(resetResponse.getTransientSettings().get(transientSettingKey), equalTo(null)); + assertThat(resetResponse.getPersistentSettings().get(persistentSettingKey), equalTo(null)); + assertThat(resetResponse.getTransientSettings(), equalTo(Settings.EMPTY)); + assertThat(resetResponse.getPersistentSettings(), equalTo(Settings.EMPTY)); + + Map resetMap = getAsMap("/_cluster/settings"); + String transientResetValue = (String) XContentMapValues.extractValue("transient." + transientSettingKey, resetMap); + assertThat(transientResetValue, equalTo(null)); + String persistentResetValue = (String) XContentMapValues.extractValue("persistent." + persistentSettingKey, resetMap); + assertThat(persistentResetValue, equalTo(null)); + } + + public void testClusterUpdateSettingNonExistent() { + String setting = "no_idea_what_you_are_talking_about"; + int value = 10; + ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); + clusterUpdateSettingsRequest.transientSettings(Settings.builder().put(setting, value).build()); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(clusterUpdateSettingsRequest, + highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync)); + assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(exception.getMessage(), equalTo( + "Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 7d3f67bfa8333..559dded4f4d7e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; @@ -52,6 +53,9 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; import java.io.IOException; import java.util.Collections; @@ -648,7 +652,7 @@ public void testBulk() throws IOException { validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest); } - public void testBulkProcessorIntegration() throws IOException, InterruptedException { + public void testBulkProcessorIntegration() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; @@ -762,4 +766,69 @@ private void validateBulkResponses(int nbItems, boolean[] errors, BulkResponse b } } } + + public void testUrlEncode() throws IOException { + String indexPattern = ""; + String expectedIndex = "logstash-" + + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(DateTimeZone.UTC).monthOfYear().roundFloorCopy()); + { + IndexRequest indexRequest = new IndexRequest(indexPattern, "type", "id#1"); + indexRequest.source("field", "value"); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(expectedIndex, indexResponse.getIndex()); + assertEquals("type", indexResponse.getType()); + assertEquals("id#1", indexResponse.getId()); + } + { + GetRequest getRequest = new GetRequest(indexPattern, "type", "id#1"); + GetResponse getResponse = highLevelClient().get(getRequest); + assertTrue(getResponse.isExists()); + assertEquals(expectedIndex, getResponse.getIndex()); + assertEquals("type", getResponse.getType()); + assertEquals("id#1", getResponse.getId()); + } + + String docId = "this/is/the/id/中文"; + { + IndexRequest indexRequest = new IndexRequest("index", "type", docId); + indexRequest.source("field", "value"); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals("index", indexResponse.getIndex()); + assertEquals("type", indexResponse.getType()); + assertEquals(docId, indexResponse.getId()); + } + { + GetRequest getRequest = new GetRequest("index", "type", docId); + GetResponse getResponse = highLevelClient().get(getRequest); + assertTrue(getResponse.isExists()); + assertEquals("index", getResponse.getIndex()); + assertEquals("type", getResponse.getType()); + assertEquals(docId, getResponse.getId()); + } + + assertTrue(highLevelClient().indices().exists(new GetIndexRequest().indices(indexPattern, "index"))); + } + + public void testParamsEncode() throws IOException { + //parameters are encoded by the low-level client but let's test that everything works the same when we use the high-level one + String routing = "routing/中文value#1?"; + { + IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + indexRequest.source("field", "value"); + indexRequest.routing(routing); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals("index", indexResponse.getIndex()); + assertEquals("type", indexResponse.getType()); + assertEquals("id", indexResponse.getId()); + } + { + GetRequest getRequest = new GetRequest("index", "type", "id").routing(routing); + GetResponse getResponse = highLevelClient().get(getRequest); + assertTrue(getResponse.isExists()); + assertEquals("index", getResponse.getIndex()); + assertEquals("type", getResponse.getType()); + assertEquals("id", getResponse.getId()); + assertEquals(routing, getResponse.getField("_routing").getValue()); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 5baef93c0dee1..fbc15b10a7488 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -39,11 +39,18 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -167,7 +174,6 @@ public void testCreateIndex() throws IOException { } } - @SuppressWarnings({"unchecked", "rawtypes"}) public void testPutMapping() throws IOException { { // Add mappings to index @@ -436,4 +442,57 @@ public void testSplit() throws IOException { Map aliasData = (Map)XContentMapValues.extractValue("target.aliases.alias", getIndexResponse); assertNotNull(aliasData); } + + public void testRollover() throws IOException { + highLevelClient().indices().create(new CreateIndexRequest("test").alias(new Alias("alias"))); + RolloverRequest rolloverRequest = new RolloverRequest("alias", "test_new"); + rolloverRequest.addMaxIndexDocsCondition(1); + + { + RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync); + assertFalse(rolloverResponse.isRolledOver()); + assertFalse(rolloverResponse.isDryRun()); + Map conditionStatus = rolloverResponse.getConditionStatus(); + assertEquals(1, conditionStatus.size()); + assertFalse(conditionStatus.get("[max_docs: 1]")); + assertEquals("test", rolloverResponse.getOldIndex()); + assertEquals("test_new", rolloverResponse.getNewIndex()); + } + + highLevelClient().index(new IndexRequest("test", "type", "1").source("field", "value")); + highLevelClient().index(new IndexRequest("test", "type", "2").source("field", "value") + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL)); + //without the refresh the rollover may not happen as the number of docs seen may be off + + { + rolloverRequest.addMaxIndexAgeCondition(new TimeValue(1)); + rolloverRequest.dryRun(true); + RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync); + assertFalse(rolloverResponse.isRolledOver()); + assertTrue(rolloverResponse.isDryRun()); + Map conditionStatus = rolloverResponse.getConditionStatus(); + assertEquals(2, conditionStatus.size()); + assertTrue(conditionStatus.get("[max_docs: 1]")); + assertTrue(conditionStatus.get("[max_age: 1ms]")); + assertEquals("test", rolloverResponse.getOldIndex()); + assertEquals("test_new", rolloverResponse.getNewIndex()); + } + { + rolloverRequest.dryRun(false); + rolloverRequest.addMaxIndexSizeCondition(new ByteSizeValue(1, ByteSizeUnit.MB)); + RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync); + assertTrue(rolloverResponse.isRolledOver()); + assertFalse(rolloverResponse.isDryRun()); + Map conditionStatus = rolloverResponse.getConditionStatus(); + assertEquals(3, conditionStatus.size()); + assertTrue(conditionStatus.get("[max_docs: 1]")); + assertTrue(conditionStatus.get("[max_age: 1ms]")); + assertFalse(conditionStatus.get("[max_size: 1mb]")); + assertEquals("test", rolloverResponse.getOldIndex()); + assertEquals("test_new", rolloverResponse.getNewIndex()); + } + } } \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 3e4add16707ff..71724cab82ec7 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -30,6 +30,7 @@ import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; @@ -39,6 +40,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -73,6 +75,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.rankeval.PrecisionAtK; @@ -272,7 +275,7 @@ public void testIndicesExist() { Map expectedParams = new HashMap<>(); setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams); setRandomLocal(getIndexRequest, expectedParams); - setRandomFlatSettings(getIndexRequest, expectedParams); + setRandomFlatSettings(getIndexRequest::flatSettings, expectedParams); setRandomHumanReadable(getIndexRequest, expectedParams); setRandomIncludeDefaults(getIndexRequest, expectedParams); @@ -1115,15 +1118,9 @@ private static void resizeTest(ResizeType resizeType, CheckedFunction expectedParams = new HashMap<>(); + setRandomFlatSettings(request::flatSettings, expectedParams); + setRandomMasterTimeout(request, expectedParams); + setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request expectedRequest = Request.clusterPutSettings(request); + assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); + assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); + assertEquals(expectedParams, expectedRequest.getParameters()); + } + + public void testRollover() throws IOException { + RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10), + randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10)); + Map expectedParams = new HashMap<>(); + setRandomTimeout(rolloverRequest::timeout, rolloverRequest.timeout(), expectedParams); + setRandomMasterTimeout(rolloverRequest, expectedParams); + if (randomBoolean()) { + rolloverRequest.dryRun(randomBoolean()); + if (rolloverRequest.isDryRun()) { + expectedParams.put("dry_run", "true"); + } + } + if (randomBoolean()) { + rolloverRequest.addMaxIndexAgeCondition(new TimeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + String type = randomAlphaOfLengthBetween(3, 10); + rolloverRequest.getCreateIndexRequest().mapping(type, RandomCreateIndexGenerator.randomMapping(type)); + } + if (randomBoolean()) { + RandomCreateIndexGenerator.randomAliases(rolloverRequest.getCreateIndexRequest()); + } + if (randomBoolean()) { + rolloverRequest.getCreateIndexRequest().settings(RandomCreateIndexGenerator.randomIndexSettings()); + } + setRandomWaitForActiveShards(rolloverRequest.getCreateIndexRequest()::waitForActiveShards, expectedParams); + + Request request = Request.rollover(rolloverRequest); + if (rolloverRequest.getNewIndexName() == null) { + assertEquals("/" + rolloverRequest.getAlias() + "/_rollover", request.getEndpoint()); + } else { + assertEquals("/" + rolloverRequest.getAlias() + "/_rollover/" + rolloverRequest.getNewIndexName(), request.getEndpoint()); + } + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertToXContentBody(rolloverRequest, request.getEntity()); + assertEquals(expectedParams, request.getParameters()); + } private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); @@ -1178,6 +1226,22 @@ public void testBuildEndpoint() { assertEquals("/a/_create", Request.buildEndpoint("a", null, null, "_create")); } + public void testBuildEndPointEncodeParts() { + assertEquals("/-%23index1,index%232/type/id", Request.buildEndpoint("-#index1,index#2", "type", "id")); + assertEquals("/index/type%232/id", Request.buildEndpoint("index", "type#2", "id")); + assertEquals("/index/type/this%2Fis%2Fthe%2Fid", Request.buildEndpoint("index", "type", "this/is/the/id")); + assertEquals("/index/type/this%7Cis%7Cthe%7Cid", Request.buildEndpoint("index", "type", "this|is|the|id")); + assertEquals("/index/type/id%231", Request.buildEndpoint("index", "type", "id#1")); + assertEquals("/%3Clogstash-%7Bnow%2FM%7D%3E/_search", Request.buildEndpoint("", "_search")); + assertEquals("/中文", Request.buildEndpoint("中文")); + assertEquals("/foo%20bar", Request.buildEndpoint("foo bar")); + assertEquals("/foo+bar", Request.buildEndpoint("foo+bar")); + assertEquals("/foo%2Fbar", Request.buildEndpoint("foo/bar")); + assertEquals("/foo%5Ebar", Request.buildEndpoint("foo^bar")); + assertEquals("/cluster1:index1,index2/_search", Request.buildEndpoint("cluster1:index1,index2", "_search")); + assertEquals("/*", Request.buildEndpoint("*")); + } + public void testEndpoint() { assertEquals("/index/type/id", Request.endpoint("index", "type", "id")); assertEquals("/index/type/id/_endpoint", Request.endpoint("index", "type", "id", "_endpoint")); @@ -1289,16 +1353,6 @@ private static void setRandomHumanReadable(GetIndexRequest request, Map expectedParams) { - if (randomBoolean()) { - boolean flatSettings = randomBoolean(); - request.flatSettings(flatSettings); - if (flatSettings) { - expectedParams.put("flat_settings", String.valueOf(flatSettings)); - } - } - } - private static void setRandomLocal(MasterNodeReadRequest request, Map expectedParams) { if (randomBoolean()) { boolean local = randomBoolean(); @@ -1319,6 +1373,16 @@ private static void setRandomTimeout(Consumer setter, TimeValue defaultT } } + private static void setRandomFlatSettings(Consumer setter, Map expectedParams) { + if (randomBoolean()) { + boolean flatSettings = randomBoolean(); + setter.accept(flatSettings); + if (flatSettings) { + expectedParams.put("flat_settings", String.valueOf(flatSettings)); + } + } + } + private static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { if (randomBoolean()) { String masterTimeout = randomTimeValue(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java new file mode 100644 index 0000000000000..e9fc4ec01ba99 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.documentation; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.indices.recovery.RecoverySettings; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; + +/** + * This class is used to generate the Java Cluster API documentation. + * You need to wrap your code between two tags like: + * // tag::example[] + * // end::example[] + * + * Where example is your tag name. + * + * Then in the documentation, you can extract what is between tag and end tags with + * ["source","java",subs="attributes,callouts,macros"] + * -------------------------------------------------- + * include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[example] + * -------------------------------------------------- + */ +public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase { + + public void testClusterPutSettings() throws IOException { + RestHighLevelClient client = highLevelClient(); + + // tag::put-settings-request + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + // end::put-settings-request + + // tag::put-settings-create-settings + String transientSettingKey = + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(); + int transientSettingValue = 10; + Settings transientSettings = + Settings.builder() + .put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES) + .build(); // <1> + + String persistentSettingKey = + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); + String persistentSettingValue = + EnableAllocationDecider.Allocation.NONE.name(); + Settings persistentSettings = + Settings.builder() + .put(persistentSettingKey, persistentSettingValue) + .build(); // <2> + // end::put-settings-create-settings + + // tag::put-settings-request-cluster-settings + request.transientSettings(transientSettings); // <1> + request.persistentSettings(persistentSettings); // <2> + // end::put-settings-request-cluster-settings + + { + // tag::put-settings-settings-builder + Settings.Builder transientSettingsBuilder = + Settings.builder() + .put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES); + request.transientSettings(transientSettingsBuilder); // <1> + // end::put-settings-settings-builder + } + { + // tag::put-settings-settings-map + Map map = new HashMap<>(); + map.put(transientSettingKey + , transientSettingValue + ByteSizeUnit.BYTES.getSuffix()); + request.transientSettings(map); // <1> + // end::put-settings-settings-map + } + { + // tag::put-settings-settings-source + request.transientSettings( + "{\"indices.recovery.max_bytes_per_sec\": \"10b\"}" + , XContentType.JSON); // <1> + // end::put-settings-settings-source + } + + // tag::put-settings-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::put-settings-request-timeout + // tag::put-settings-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::put-settings-request-masterTimeout + + // tag::put-settings-request-flat-settings + request.flatSettings(true); // <1> + // end::put-settings-request-flat-settings + + // tag::put-settings-execute + ClusterUpdateSettingsResponse response = client.cluster().putSettings(request); + // end::put-settings-execute + + // tag::put-settings-response + boolean acknowledged = response.isAcknowledged(); // <1> + Settings transientSettingsResponse = response.getTransientSettings(); // <2> + Settings persistentSettingsResponse = response.getPersistentSettings(); // <3> + // end::put-settings-response + assertTrue(acknowledged); + assertThat(transientSettingsResponse.get(transientSettingKey), equalTo(transientSettingValue + ByteSizeUnit.BYTES.getSuffix())); + assertThat(persistentSettingsResponse.get(persistentSettingKey), equalTo(persistentSettingValue)); + + // tag::put-settings-request-reset-transient + request.transientSettings(Settings.builder().putNull(transientSettingKey).build()); // <1> + // tag::put-settings-request-reset-transient + request.persistentSettings(Settings.builder().putNull(persistentSettingKey)); + ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request); + + assertTrue(resetResponse.isAcknowledged()); + } + + public void testClusterUpdateSettingsAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + + // tag::put-settings-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ClusterUpdateSettingsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-settings-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-settings-execute-async + client.cluster().putSettingsAsync(request, listener); // <1> + // end::put-settings-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index f43ec7b3e283a..843c5c42fffa1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -38,6 +38,8 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; @@ -46,6 +48,8 @@ import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -70,7 +74,7 @@ * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- - * include-tagged::{doc-tests}/CRUDDocumentationIT.java[example] + * include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[example] * -------------------------------------------------- */ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase { @@ -105,7 +109,7 @@ public void testIndicesExist() throws IOException { } } - public void testIndicesExistAsync() throws IOException { + public void testIndicesExistAsync() throws Exception { RestHighLevelClient client = highLevelClient(); { @@ -138,6 +142,8 @@ public void onFailure(Exception e) { // tag::indices-exists-async client.indices().existsAsync(request, listener); // <1> // end::indices-exists-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } public void testDeleteIndex() throws IOException { @@ -411,19 +417,21 @@ public void testPutMapping() throws IOException { request.type("tweet"); // <2> // end::put-mapping-request - // tag::put-mapping-request-source - request.source( - "{\n" + - " \"tweet\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}", // <1> - XContentType.JSON); - // end::put-mapping-request-source + { + // tag::put-mapping-request-source + request.source( + "{\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + "}", // <1> + XContentType.JSON); + // end::put-mapping-request-source + PutMappingResponse putMappingResponse = client.indices().putMapping(request); + assertTrue(putMappingResponse.isAcknowledged()); + } { //tag::put-mapping-map @@ -432,9 +440,7 @@ public void testPutMapping() throws IOException { message.put("type", "text"); Map properties = new HashMap<>(); properties.put("message", message); - Map tweet = new HashMap<>(); - tweet.put("properties", properties); - jsonMap.put("tweet", tweet); + jsonMap.put("properties", properties); request.source(jsonMap); // <1> //end::put-mapping-map PutMappingResponse putMappingResponse = client.indices().putMapping(request); @@ -445,15 +451,11 @@ public void testPutMapping() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); { - builder.startObject("tweet"); + builder.startObject("properties"); { - builder.startObject("properties"); + builder.startObject("message"); { - builder.startObject("message"); - { - builder.field("type", "text"); - } - builder.endObject(); + builder.field("type", "text"); } builder.endObject(); } @@ -733,7 +735,6 @@ public void onFailure(Exception e) { } } - @SuppressWarnings({"unchecked", "rawtypes"}) public void testUpdateAliases() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -751,17 +752,31 @@ public void testUpdateAliases() throws Exception { { // tag::update-aliases-request IndicesAliasesRequest request = new IndicesAliasesRequest(); // <1> - AliasActions aliasAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("alias1"); // <2> + AliasActions aliasAction = + new AliasActions(AliasActions.Type.ADD) + .index("index1") + .alias("alias1"); // <2> request.addAliasAction(aliasAction); // <3> // end::update-aliases-request // tag::update-aliases-request2 - AliasActions addIndexAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("alias1") - .filter("{\"term\":{\"year\":2016}}"); // <1> - AliasActions addIndicesAction = new AliasActions(AliasActions.Type.ADD).indices("index1", "index2").alias("alias2") - .routing("1"); // <2> - AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index("index3").alias("alias3"); // <3> - AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index("index4"); // <4> + AliasActions addIndexAction = + new AliasActions(AliasActions.Type.ADD) + .index("index1") + .alias("alias1") + .filter("{\"term\":{\"year\":2016}}"); // <1> + AliasActions addIndicesAction = + new AliasActions(AliasActions.Type.ADD) + .indices("index1", "index2") + .alias("alias2") + .routing("1"); // <2> + AliasActions removeAction = + new AliasActions(AliasActions.Type.REMOVE) + .index("index3") + .alias("alias3"); // <3> + AliasActions removeIndexAction = + new AliasActions(AliasActions.Type.REMOVE_INDEX) + .index("index4"); // <4> // end::update-aliases-request2 // tag::update-aliases-request-timeout @@ -774,7 +789,8 @@ public void testUpdateAliases() throws Exception { // end::update-aliases-request-masterTimeout // tag::update-aliases-execute - IndicesAliasesResponse indicesAliasesResponse = client.indices().updateAliases(request); + IndicesAliasesResponse indicesAliasesResponse = + client.indices().updateAliases(request); // end::update-aliases-execute // tag::update-aliases-response @@ -782,13 +798,15 @@ public void testUpdateAliases() throws Exception { // end::update-aliases-response assertTrue(acknowledged); } + { - IndicesAliasesRequest request = new IndicesAliasesRequest(); // <1> - AliasActions aliasAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("async"); // <2> + IndicesAliasesRequest request = new IndicesAliasesRequest(); + AliasActions aliasAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("async"); request.addAliasAction(aliasAction); // tag::update-aliases-execute-listener - ActionListener listener = new ActionListener() { + ActionListener listener = + new ActionListener() { @Override public void onResponse(IndicesAliasesResponse indicesAliasesResponse) { // <1> @@ -813,12 +831,12 @@ public void onFailure(Exception e) { } } - @SuppressWarnings({"unchecked", "rawtypes"}) public void testShrinkIndex() throws Exception { RestHighLevelClient client = highLevelClient(); { Map nodes = getAsMap("_nodes"); + @SuppressWarnings("unchecked") String firstNode = ((Map) nodes.get("nodes")).keySet().iterator().next(); createIndex("source_index", Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build()); updateIndexSettings("source_index", Settings.builder().put("index.routing.allocation.require._name", firstNode) @@ -838,8 +856,8 @@ public void testShrinkIndex() throws Exception { request.masterNodeTimeout("1m"); // <2> // end::shrink-index-request-masterTimeout // tag::shrink-index-request-waitForActiveShards - request.getTargetIndexRequest().waitForActiveShards(2); // <1> - request.getTargetIndexRequest().waitForActiveShards(ActiveShardCount.DEFAULT); // <2> + request.setWaitForActiveShards(2); // <1> + request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <2> // end::shrink-index-request-waitForActiveShards // tag::shrink-index-request-settings request.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 2)); // <1> @@ -884,7 +902,6 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - @SuppressWarnings({"unchecked", "rawtypes"}) public void testSplitIndex() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -908,8 +925,8 @@ public void testSplitIndex() throws Exception { request.masterNodeTimeout("1m"); // <2> // end::split-index-request-masterTimeout // tag::split-index-request-waitForActiveShards - request.getTargetIndexRequest().waitForActiveShards(2); // <1> - request.getTargetIndexRequest().waitForActiveShards(ActiveShardCount.DEFAULT); // <2> + request.setWaitForActiveShards(2); // <1> + request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <2> // end::split-index-request-waitForActiveShards // tag::split-index-request-settings request.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 4)); // <1> @@ -953,4 +970,89 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testRolloverIndex() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + client.indices().create(new CreateIndexRequest("index-1").alias(new Alias("alias"))); + } + + // tag::rollover-request + RolloverRequest request = new RolloverRequest("alias", "index-2"); // <1> + request.addMaxIndexAgeCondition(new TimeValue(7, TimeUnit.DAYS)); // <2> + request.addMaxIndexDocsCondition(1000); // <3> + request.addMaxIndexSizeCondition(new ByteSizeValue(5, ByteSizeUnit.GB)); // <4> + // end::rollover-request + + // tag::rollover-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::rollover-request-timeout + // tag::rollover-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::rollover-request-masterTimeout + // tag::rollover-request-dryRun + request.dryRun(true); // <1> + // end::rollover-request-dryRun + // tag::rollover-request-waitForActiveShards + request.getCreateIndexRequest().waitForActiveShards(2); // <1> + request.getCreateIndexRequest().waitForActiveShards(ActiveShardCount.DEFAULT); // <2> + // end::rollover-request-waitForActiveShards + // tag::rollover-request-settings + request.getCreateIndexRequest().settings(Settings.builder().put("index.number_of_shards", 4)); // <1> + // end::rollover-request-settings + // tag::rollover-request-mapping + request.getCreateIndexRequest().mapping("type", "field", "type=keyword"); // <1> + // end::rollover-request-mapping + // tag::rollover-request-alias + request.getCreateIndexRequest().alias(new Alias("another_alias")); // <1> + // end::rollover-request-alias + + // tag::rollover-execute + RolloverResponse rolloverResponse = client.indices().rollover(request); + // end::rollover-execute + + // tag::rollover-response + boolean acknowledged = rolloverResponse.isAcknowledged(); // <1> + boolean shardsAcked = rolloverResponse.isShardsAcknowledged(); // <2> + String oldIndex = rolloverResponse.getOldIndex(); // <3> + String newIndex = rolloverResponse.getNewIndex(); // <4> + boolean isRolledOver = rolloverResponse.isRolledOver(); // <5> + boolean isDryRun = rolloverResponse.isDryRun(); // <6> + Map conditionStatus = rolloverResponse.getConditionStatus();// <7> + // end::rollover-response + assertFalse(acknowledged); + assertFalse(shardsAcked); + assertEquals("index-1", oldIndex); + assertEquals("index-2", newIndex); + assertFalse(isRolledOver); + assertTrue(isDryRun); + assertEquals(3, conditionStatus.size()); + + // tag::rollover-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(RolloverResponse rolloverResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::rollover-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::rollover-execute-async + client.indices().rolloverAsync(request,listener); // <1> + // end::rollover-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 6d4e3ba4bc861..3d282a642e0da 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -74,7 +74,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { @BeforeClass public static void startHttpServer() throws Exception { - pathPrefix = randomBoolean() ? "/testPathPrefix/" + randomAsciiOfLengthBetween(1, 5) : ""; + pathPrefix = randomBoolean() ? "/testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5) : ""; httpServer = createHttpServer(); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); restClient = createRestClient(false, true); @@ -101,6 +101,7 @@ private static class ResponseHandler implements HttpHandler { @Override public void handle(HttpExchange httpExchange) throws IOException { + //copy request body to response body so we can verify it was sent StringBuilder body = new StringBuilder(); try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { char[] buffer = new char[256]; @@ -109,6 +110,7 @@ public void handle(HttpExchange httpExchange) throws IOException { body.append(buffer, 0, read); } } + //copy request headers to response headers so we can verify they were sent Headers requestHeaders = httpExchange.getRequestHeaders(); Headers responseHeaders = httpExchange.getResponseHeaders(); for (Map.Entry> header : requestHeaders.entrySet()) { @@ -214,6 +216,41 @@ public void testGetWithBody() throws IOException { bodyTest("GET"); } + public void testEncodeParams() throws IOException { + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "this/is/the/routing")); + assertEquals(pathPrefix + "/200?routing=this%2Fis%2Fthe%2Frouting", response.getRequestLine().getUri()); + } + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "this|is|the|routing")); + assertEquals(pathPrefix + "/200?routing=this%7Cis%7Cthe%7Crouting", response.getRequestLine().getUri()); + } + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "routing#1")); + assertEquals(pathPrefix + "/200?routing=routing%231", response.getRequestLine().getUri()); + } + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "中文")); + assertEquals(pathPrefix + "/200?routing=%E4%B8%AD%E6%96%87", response.getRequestLine().getUri()); + } + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "foo bar")); + assertEquals(pathPrefix + "/200?routing=foo+bar", response.getRequestLine().getUri()); + } + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "foo+bar")); + assertEquals(pathPrefix + "/200?routing=foo%2Bbar", response.getRequestLine().getUri()); + } + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "foo/bar")); + assertEquals(pathPrefix + "/200?routing=foo%2Fbar", response.getRequestLine().getUri()); + } + { + Response response = restClient.performRequest("PUT", "/200", Collections.singletonMap("routing", "foo^bar")); + assertEquals(pathPrefix + "/200?routing=foo%5Ebar", response.getRequestLine().getUri()); + } + } + /** * Verify that credentials are sent on the first request with preemptive auth enabled (default when provided with credentials). */ diff --git a/dev-tools/es_release_notes.pl b/dev-tools/es_release_notes.pl index 36b084ebfa5ce..4b5112859bedd 100755 --- a/dev-tools/es_release_notes.pl +++ b/dev-tools/es_release_notes.pl @@ -28,24 +28,22 @@ my $Issue_URL = "http://github.com/${User_Repo}issues/"; my @Groups = ( - "breaking", "breaking-java", "deprecation", "feature", - "enhancement", "bug", "regression", "upgrade", "non-issue", "build", - "docs", "test" + ">breaking", ">breaking-java", ">deprecation", ">feature", + ">enhancement", ">bug", ">regression", ">upgrade" ); +my %Ignore = map { $_ => 1 } + ( ">non-issue", ">refactoring", ">docs", ">test", ":Core/Build" ); + my %Group_Labels = ( - breaking => 'Breaking changes', - 'breaking-java' => 'Breaking Java changes', - build => 'Build', - deprecation => 'Deprecations', - docs => 'Docs', - feature => 'New features', - enhancement => 'Enhancements', - bug => 'Bug fixes', - regression => 'Regressions', - test => 'Tests', - upgrade => 'Upgrades', - "non-issue" => 'Non-issue', - other => 'NOT CLASSIFIED', + '>breaking' => 'Breaking changes', + '>breaking-java' => 'Breaking Java changes', + '>deprecation' => 'Deprecations', + '>feature' => 'New features', + '>enhancement' => 'Enhancements', + '>bug' => 'Bug fixes', + '>regression' => 'Regressions', + '>upgrade' => 'Upgrades', + 'other' => 'NOT CLASSIFIED', ); use JSON(); @@ -71,6 +69,9 @@ sub dump_issues { my $issues = shift; $version =~ s/v//; + my $branch = $version; + $branch =~ s/\.\d+$//; + my ( $day, $month, $year ) = (gmtime)[ 3 .. 5 ]; $month++; $year += 1900; @@ -82,11 +83,17 @@ sub dump_issues { [[release-notes-$version]] == $version Release Notes +coming[$version] + +Also see <>. + ASCIIDOC for my $group ( @Groups, 'other' ) { my $group_issues = $issues->{$group} or next; - print "[[$group-$version]]\n" + my $group_id = $group; + $group_id =~ s/^>//; + print "[[$group_id-$version]]\n" . "[float]\n" . "=== $Group_Labels{$group}\n\n"; @@ -161,10 +168,15 @@ sub fetch_issues { for my $issue (@issues) { next if $seen{ $issue->{number} } && !$issue->{pull_request}; + for ( @{ $issue->{labels} } ) { + next ISSUE if $Ignore{ $_->{name} }; + } + # uncomment for including/excluding PRs already issued in other versions # next if grep {$_->{name}=~/^v2/} @{$issue->{labels}}; my %labels = map { $_->{name} => 1 } @{ $issue->{labels} }; - my ($header) = map { substr( $_, 1 ) } grep {/^:/} sort keys %labels; + my ($header) = map { m{:[^/]+/(.+)} && $1 } + grep {/^:/} sort keys %labels; $header ||= 'NOT CLASSIFIED'; for (@Groups) { if ( $labels{$_} ) { diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle new file mode 100644 index 0000000000000..93960a3ac21b2 --- /dev/null +++ b/distribution/archives/build.gradle @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.tools.ant.taskdefs.condition.Os +import org.apache.tools.ant.filters.FixCrLfFilter +import org.elasticsearch.gradle.BuildPlugin +import org.elasticsearch.gradle.EmptyDirTask +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.PluginBuildPlugin + +// need this so Zip/Tar tasks get basic defaults... +apply plugin: 'base' + +// CopySpec does not make it easy to create an empty directory so we +// create the directory that we want, and then point CopySpec to its +// parent to copy to the root of the distribution +ext.logsDir = new File(buildDir, 'logs-hack/logs') +task createLogsDir(type: EmptyDirTask) { + dir "${logsDir}" + dirMode 0755 +} +ext.pluginsDir= new File(buildDir, 'plugins-hack/plugins') +task createPluginsDir(type: EmptyDirTask) { + dir "${pluginsDir}" + dirMode 0755 +} + +CopySpec archiveFiles(CopySpec... innerFiles) { + return copySpec { + into("elasticsearch-${version}") { + with libFiles + into('config') { + dirMode 0750 + fileMode 0660 + with configFiles('def') + } + into('bin') { + with copySpec { + with binFiles('def') + from('../src/bin') { + include '*.bat' + filter(FixCrLfFilter, eol: FixCrLfFilter.CrLf.newInstance('crlf')) + } + MavenFilteringHack.filter(it, expansionsForDistribution('def')) + } + } + into('') { + from { + dirMode 0755 + logsDir.getParent() + } + } + into('') { + from { + dirMode 0755 + pluginsDir.getParent() + } + } + with commonFiles + with noticeFile + from('../src') { + include 'bin/*.exe' + } + for (CopySpec files : innerFiles) { + with files + } + } + } +} + +task buildIntegTestZip(type: Zip) { + dependsOn createLogsDir, createPluginsDir + destinationDir = file('integ-test-zip/build/distributions') + baseName = 'elasticsearch' + with archiveFiles(transportModulesFiles) +} + +task buildZip(type: Zip) { + dependsOn createLogsDir, createPluginsDir + destinationDir = file('zip/build/distributions') + baseName = 'elasticsearch' + with archiveFiles(modulesFiles) +} + +task buildTar(type: Tar) { + dependsOn createLogsDir, createPluginsDir + destinationDir = file('tar/build/distributions') + baseName = 'elasticsearch' + extension = 'tar.gz' + compression = Compression.GZIP + dirMode 0755 + fileMode 0644 + with archiveFiles(modulesFiles) +} + +// This configures the default artifact for the distribution specific +// subprojects. We have subprojects for two reasons: +// 1. Gradle project substitutions can only bind to the default +// configuration of a project +// 2. The integ-test-zip and zip distributions have the exact same +// filename, so they must be placed in different directories. +subprojects { + apply plugin: 'distribution' + + archivesBaseName = 'elasticsearch' + + String buildTask = "build${it.name.replaceAll(/-[a-z]/) { it.substring(1).toUpperCase() }.capitalize()}" + ext.buildDist = parent.tasks.getByName(buildTask) + artifacts { + 'default' buildDist + } +} + +/***************************************************************************** + * Rest test config * + *****************************************************************************/ +subprojects { + apply plugin: 'elasticsearch.standalone-rest-test' + apply plugin: 'elasticsearch.rest-test' + + if (project.name == 'integ-test-zip') { + integTest { + includePackaged true + } + } + + integTestCluster { + dependsOn assemble + distribution = project.name + } + integTestRunner { + if (Os.isFamily(Os.FAMILY_WINDOWS) && System.getProperty('tests.timeoutSuite') == null) { + // override the suite timeout to 30 mins for windows, because it has the most inefficient filesystem known to man + systemProperty 'tests.timeoutSuite', '1800000!' + } + } + + processTestResources { + inputs.properties(project(':distribution').restTestExpansions) + MavenFilteringHack.filter(it, project(':distribution').restTestExpansions) + } +} + +/***************************************************************************** + * Maven config * + *****************************************************************************/ +configure(subprojects.findAll { it.name.contains('zip') }) { + // only zip distributions go to maven + BuildPlugin.configurePomGeneration(project) + apply plugin: 'nebula.info-scm' + apply plugin: 'nebula.maven-base-publish' + apply plugin: 'nebula.maven-scm' + + // note: the group must be correct before applying the nexus plugin, or + // it will capture the wrong value... + project.group = "org.elasticsearch.distribution.${project.name}" + + publishing { + publications { + nebula { + artifactId 'elasticsearch' + artifact buildDist + } + /* + * HUGE HACK: the underlying maven publication library refuses to + * deploy any attached artifacts when the packaging type is set to + * 'pom'. But Sonatype's OSS repositories require source files for + * artifacts that are of type 'zip'. We already publish the source + * and javadoc for Elasticsearch under the various other subprojects. + * So here we create another publication using the same name that + * has the "real" pom, and rely on the fact that gradle will execute + * the publish tasks in alphabetical order. This lets us publish the + * zip file and even though the pom says the type is 'pom' instead of + * 'zip'. We cannot setup a dependency between the tasks because the + * publishing tasks are created *extremely* late in the configuration + * phase, so that we cannot get ahold of the actual task. Furthermore, + * this entire hack only exists so we can make publishing to maven + * local work, since we publish to maven central externally. + */ + nebulaRealPom(MavenPublication) { + artifactId 'elasticsearch' + pom.packaging = 'pom' + pom.withXml { XmlProvider xml -> + Node root = xml.asNode() + root.appendNode('name', 'Elasticsearch') + root.appendNode('description', 'A Distributed RESTful Search Engine') + root.appendNode('url', PluginBuildPlugin.urlFromOrigin(project.scminfo.origin)) + Node scmNode = root.appendNode('scm') + scmNode.appendNode('url', project.scminfo.origin) + } + } + } + } +} + diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/integ-test-zip/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java similarity index 100% rename from distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java diff --git a/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java similarity index 100% rename from distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java diff --git a/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeRestUsageIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeRestUsageIT.java similarity index 100% rename from distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeRestUsageIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeRestUsageIT.java diff --git a/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java similarity index 100% rename from distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/RequestsWithoutContentIT.java diff --git a/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java similarity index 100% rename from distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java diff --git a/distribution/archives/tar/build.gradle b/distribution/archives/tar/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/tar/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java b/distribution/archives/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java similarity index 100% rename from distribution/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java rename to distribution/archives/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java diff --git a/distribution/tar/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml b/distribution/archives/tar/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml similarity index 100% rename from distribution/tar/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml rename to distribution/archives/tar/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml diff --git a/distribution/archives/zip/build.gradle b/distribution/archives/zip/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/zip/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java b/distribution/archives/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java similarity index 100% rename from distribution/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java rename to distribution/archives/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java diff --git a/distribution/zip/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml b/distribution/archives/zip/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml similarity index 100% rename from distribution/zip/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml rename to distribution/archives/zip/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml diff --git a/distribution/build.gradle b/distribution/build.gradle index d322aa9c1ff12..5bb9944b4a362 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -21,7 +21,6 @@ import org.apache.tools.ant.filters.FixCrLfFilter import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin -import org.elasticsearch.gradle.EmptyDirTask import org.elasticsearch.gradle.ConcatFilesTask import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.NoticeTask @@ -29,20 +28,7 @@ import org.elasticsearch.gradle.precommit.DependencyLicensesTask import org.elasticsearch.gradle.precommit.UpdateShasTask import org.elasticsearch.gradle.test.RunTask -// for deb/rpm -buildscript { - repositories { - maven { - url "https://plugins.gradle.org/m2/" - } - } - dependencies { - classpath 'com.netflix.nebula:gradle-ospackage-plugin:3.4.0' - } -} - -Collection distributions = project.subprojects.findAll { - it.path.contains(':tools') == false && it.path.contains(':bwc') == false } +Collection distributions = project('archives').subprojects + project('packages').subprojects /***************************************************************************** * Third party dependencies report * @@ -96,17 +82,9 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { from { zipTree(project(module.path).bundlePlugin.outputs.files.singleFile) } } } - // We would like to make sure integ tests for the distribution run after - // integ tests for the modules included in the distribution. - project.configure(distributions.findAll { it.name != 'integ-test-zip'}) { Project distribution -> - distribution.afterEvaluate({ - // some integTest tasks will have multiple finalizers - distribution.integTest.mustRunAfter module.tasks.find { t -> t.name.matches(".*integTest\$") } - }) - } - // also want to make sure the module's integration tests run after the integ-test-zip (ie rest tests) + // make sure the module's integration tests run after the integ-test-zip (ie rest tests) module.afterEvaluate({ - module.integTest.mustRunAfter(':distribution:integ-test-zip:integTest') + module.integTest.mustRunAfter(':distribution:archives:integ-test-zip:integTest') }) restTestExpansions['expected.modules.count'] += 1 } @@ -132,41 +110,12 @@ task clean(type: Delete) { delete 'build' } -configure(distributions) { - /***************************************************************************** - * Rest test config * - *****************************************************************************/ - apply plugin: 'elasticsearch.standalone-rest-test' - apply plugin: 'elasticsearch.rest-test' - project.integTest { - includePackaged project.name == 'integ-test-zip' - if (project.name != 'integ-test-zip') { - mustRunAfter ':distribution:integ-test-zip:integTest' - } - } - project.integTestCluster { - dependsOn project.assemble - distribution = project.name - } - - processTestResources { - inputs.properties(project(':distribution').restTestExpansions) - MavenFilteringHack.filter(it, project(':distribution').restTestExpansions) - } - - /***************************************************************************** - * Maven config * - *****************************************************************************/ - // note: the group must be correct before applying the nexus plugin, or it will capture the wrong value... - project.group = "org.elasticsearch.distribution.${project.name}" - project.archivesBaseName = 'elasticsearch' - +configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { // TODO: the map needs to be an input of the tasks, so that when it changes, the task will re-run... /***************************************************************************** * Properties to expand when copying packaging files * *****************************************************************************/ project.ext { - expansions = expansionsForDistribution(project.name) /***************************************************************************** * Common files in all distributions * @@ -191,18 +140,22 @@ configure(distributions) { from project(':distribution').buildTransportModules } - configFiles = copySpec { - from '../src/main/resources/config' - MavenFilteringHack.filter(it, expansions) + configFiles = { distributionType -> + copySpec { + from '../src/config' + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType)) + } } - binFiles = copySpec { - // everything except windows files - from '../src/main/resources/bin' - exclude '*.bat' - exclude '*.exe' - eachFile { it.setMode(0755) } - MavenFilteringHack.filter(it, expansions) + binFiles = { distributionType -> + copySpec { + // everything except windows files + from '../src/bin' + exclude '*.bat' + exclude '*.exe' + eachFile { it.setMode(0755) } + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType)) + } } commonFiles = copySpec { @@ -220,270 +173,6 @@ configure(distributions) { } } - /***************************************************************************** - * Publishing setup * - *****************************************************************************/ - if (['zip', 'integ-test-zip'].contains(it.name)) { - BuildPlugin.configurePomGeneration(project) - apply plugin: 'nebula.info-scm' - apply plugin: 'nebula.maven-base-publish' - apply plugin: 'nebula.maven-scm' - } -} - -/***************************************************************************** - * Zip and tgz configuration * - *****************************************************************************/ -configure(distributions.findAll { ['zip', 'tar', 'integ-test-zip'].contains(it.name) }) { - // CopySpec does not make it easy to create an empty director so we create the directory that we want, and then point CopySpec to its - // parent to copy to the root of the distribution - File logs = new File(buildDir, 'logs-hack/logs') - task createLogDir(type: EmptyDirTask) { - dir "${logs}" - dirMode 0755 - } - File plugins = new File(buildDir, 'plugins-hack/plugins') - task createPluginsDir(type: EmptyDirTask) { - dir "${plugins}" - dirMode 0755 - } - project.ext.archivesFiles = copySpec { - into("elasticsearch-${version}") { - with libFiles - into('config') { - dirMode 0750 - fileMode 0660 - with configFiles - } - into('bin') { - with copySpec { - with binFiles - from('../src/main/resources/bin') { - include '*.bat' - filter(FixCrLfFilter, eol: FixCrLfFilter.CrLf.newInstance('crlf')) - } - MavenFilteringHack.filter(it, expansions) - } - } - into('') { - from { - dirMode 0755 - logs.getParent() - } - } - into('') { - from { - dirMode 0755 - plugins.getParent() - } - } - with commonFiles - with noticeFile - from('../src/main/resources') { - include 'bin/*.exe' - } - if (project.name != 'integ-test-zip') { - with modulesFiles - } else { - with transportModulesFiles - } - } - } -} - -/***************************************************************************** - * Deb and rpm configuration * - ***************************************************************************** - * - * The general strategy here is to build a directory on disk, packagingFiles - * that contains stuff that needs to be copied into the distributions. This is - * important for two reasons: - * 1. ospackage wants to copy the directory permissions that it sees off of the - * filesystem. If you ask it to create a directory that doesn't already - * exist on disk it petulantly creates it with 0755 permissions, no matter - * how hard you try to convince it otherwise. - * 2. Convincing ospackage to pick up an empty directory as part of a set of - * directories on disk is reasonably easy. Convincing it to just create an - * empty directory requires more wits than I have. - * 3. ospackage really wants to suck up some of the debian control scripts - * directly from the filesystem. It doesn't want to process them through - * MavenFilteringHack or any other copy-style action. - * - * The following commands are useful when it comes to check the user/group - * and files permissions set within the RPM and DEB packages: - * - * rpm -qlp --dump path/to/elasticsearch.rpm - * dpkg -c path/to/elasticsearch.deb - */ -configure(distributions.findAll { ['deb', 'rpm'].contains(it.name) }) { - integTest.enabled = Os.isFamily(Os.FAMILY_WINDOWS) == false - File packagingFiles = new File(buildDir, 'packaging') - project.ext.packagingFiles = packagingFiles - task processPackagingFiles(type: Copy) { - from '../src/main/packaging' - from 'src/main/packaging' - - MavenFilteringHack.filter(it, expansions) - into packagingFiles - /* Explicitly declare the outputs so that gradle won't skip this task if - one of the other tasks like createEtc run first and create the packaging - directory as a side effect. */ - outputs.dir("${packagingFiles}/env") - outputs.dir("${packagingFiles}/systemd") - } - - task createEtc(type: EmptyDirTask) { - dir "${packagingFiles}/etc/elasticsearch" - dirMode 0750 - outputs.dir dir - } - - task fillEtc(type: Copy) { - dependsOn createEtc - with configFiles - into "${packagingFiles}/etc/elasticsearch" - /* Explicitly declare the output files so this task doesn't consider itself - up to date when the directory is created, which it would by default. And - that'll happen when createEtc runs. */ - outputs.file "${packagingFiles}/etc/elasticsearch/elasticsearch.yml" - outputs.file "${packagingFiles}/etc/elasticsearch/jvm.options" - outputs.file "${packagingFiles}/etc/elasticsearch/log4j2.properties" - } - - task createPidDir(type: EmptyDirTask) { - dir "${packagingFiles}/var/run/elasticsearch" - } - task createLogDir(type: EmptyDirTask) { - dir "${packagingFiles}/var/log/elasticsearch" - } - task createDataDir(type: EmptyDirTask) { - dir "${packagingFiles}/var/lib/elasticsearch" - } - task createPluginsDir(type: EmptyDirTask) { - dir "${packagingFiles}/usr/share/elasticsearch/plugins" - } - - /** - * Setup the build/packaging directory to be like the target filesystem - * because ospackage will use arbitrary permissions if you try to create a - * directory that doesn't exist on the filesystem. - */ - task preparePackagingFiles { - dependsOn processPackagingFiles, fillEtc, createPidDir, createLogDir, - createDataDir, createPluginsDir - } - - apply plugin: 'nebula.ospackage-base' - ospackage { - packageName 'elasticsearch' - maintainer 'Elasticsearch Team ' - summary ''' - Elasticsearch is a distributed RESTful search engine built for the cloud. - Reference documentation can be found at - https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html - and the 'Elasticsearch: The Definitive Guide' book can be found at - https://www.elastic.co/guide/en/elasticsearch/guide/current/index.html - '''.stripIndent().replace('\n', ' ').trim() - url 'https://www.elastic.co/' - - // signing setup - if (project.hasProperty('signing.password') && System.getProperty('build.snapshot', 'true') == 'false') { - signingKeyId = project.hasProperty('signing.keyId') ? project.property('signing.keyId') : 'D88E42B4' - signingKeyPassphrase = project.property('signing.password') - signingKeyRingFile = project.hasProperty('signing.secretKeyRingFile') ? - project.file(project.property('signing.secretKeyRingFile')) : - new File(new File(System.getProperty('user.home'), '.gnupg'), 'secring.gpg') - } - - String scripts = "${packagingFiles}/scripts" - preInstall file("${scripts}/preinst") - postInstall file("${scripts}/postinst") - preUninstall file("${scripts}/prerm") - postUninstall file("${scripts}/postrm") - - if (project.name == 'rpm') { - requires('/bin/bash') - } else if (project.name == 'deb') { - requires('bash') - } - requires('coreutils') - - into '/usr/share/elasticsearch' - fileMode 0644 - dirMode 0755 - user 'root' - permissionGroup 'root' - with libFiles - with modulesFiles - into('bin') { - with binFiles - } - with copySpec { - with commonFiles - if (project.name == 'deb') { - // Deb gets a copyright file instead. - exclude 'LICENSE.txt' - } - } - with noticeFile - - configurationFile '/etc/elasticsearch/elasticsearch.yml' - configurationFile '/etc/elasticsearch/jvm.options' - configurationFile '/etc/elasticsearch/log4j2.properties' - into('/etc/elasticsearch') { - dirMode 0750 - fileMode 0660 - permissionGroup 'elasticsearch' - includeEmptyDirs true - createDirectoryEntry true - fileType CONFIG | NOREPLACE - from "${packagingFiles}/etc/elasticsearch" - } - - into('/usr/lib/tmpfiles.d') { - from "${packagingFiles}/systemd/elasticsearch.conf" - } - configurationFile '/usr/lib/systemd/system/elasticsearch.service' - into('/usr/lib/systemd/system') { - fileType CONFIG | NOREPLACE - from "${packagingFiles}/systemd/elasticsearch.service" - } - into('/usr/lib/sysctl.d') { - fileType CONFIG | NOREPLACE - from "${packagingFiles}/systemd/sysctl/elasticsearch.conf" - } - configurationFile '/etc/init.d/elasticsearch' - into('/etc/init.d') { - fileMode 0750 - fileType CONFIG | NOREPLACE - from "${packagingFiles}/init.d/elasticsearch" - } - configurationFile project.expansions['path.env'] - into(new File(project.expansions['path.env']).getParent()) { - fileType CONFIG | NOREPLACE - fileMode 0660 - from "${project.packagingFiles}/env/elasticsearch" - } - - /** - * Suck up all the empty directories that we need to install into the path. - */ - Closure suckUpEmptyDirectories = { path, u, g, mode -> - into(path) { - from "${packagingFiles}/${path}" - includeEmptyDirs true - createDirectoryEntry true - user u - permissionGroup g - dirMode mode - fileMode mode - } - } - suckUpEmptyDirectories('/var/run', 'elasticsearch', 'elasticsearch', 0755) - suckUpEmptyDirectories('/var/log', 'elasticsearch', 'elasticsearch', 0750) - suckUpEmptyDirectories('/var/lib', 'elasticsearch', 'elasticsearch', 0750) - suckUpEmptyDirectories('/usr/share/elasticsearch', 'root', 'root', 0755) - } } task run(type: RunTask) { @@ -520,83 +209,84 @@ task run(type: RunTask) { * day. DEB retries forever. * */ -Map expansionsForDistribution(distributionType) { - final String defaultHeapSize = "1g" - final String packagingPathData = "path.data: /var/lib/elasticsearch" - final String pathLogs = "/var/log/elasticsearch" - final String packagingPathLogs = "path.logs: ${pathLogs}" - final String packagingLoggc = "${pathLogs}/gc.log" - - String footer = "# Built for ${project.name}-${project.version} " + - "(${distributionType})" - Map expansions = [ - 'project.name': project.name, - 'project.version': version, - - 'path.conf': [ - 'tar': '"$ES_HOME"/config', - 'zip': '"$ES_HOME"/config', - 'integ-test-zip': '"$ES_HOME"/config', - 'def': '/etc/elasticsearch', - ], - 'path.data': [ - 'deb': packagingPathData, - 'rpm': packagingPathData, - 'def': '#path.data: /path/to/data' - ], - 'path.env': [ - 'deb': '/etc/default/elasticsearch', - 'rpm': '/etc/sysconfig/elasticsearch', - /* There isn't one of these files for tar or zip but its important to - make an empty string here so the script can properly skip it. */ - 'def': 'if [ -z "$ES_PATH_CONF" ]; then ES_PATH_CONF="$ES_HOME"/config; done', - ], - 'source.path.env': [ - 'deb': 'source /etc/default/elasticsearch', - 'rpm': 'source /etc/sysconfig/elasticsearch', - 'def': 'if [ -z "$ES_PATH_CONF" ]; then ES_PATH_CONF="$ES_HOME"/config; fi', - ], - 'path.logs': [ - 'deb': packagingPathLogs, - 'rpm': packagingPathLogs, - 'def': '#path.logs: /path/to/logs' - ], - 'loggc': [ - 'deb': packagingLoggc, - 'rpm': packagingLoggc, - 'def': 'logs/gc.log' - ], - - 'heap.min': defaultHeapSize, - 'heap.max': defaultHeapSize, - - 'heap.dump.path': [ - 'deb': "-XX:HeapDumpPath=/var/lib/elasticsearch", - 'rpm': "-XX:HeapDumpPath=/var/lib/elasticsearch", - 'def': "#-XX:HeapDumpPath=/heap/dump/path" - ], - - 'stopping.timeout': [ - 'rpm': 86400, - ], - - 'scripts.footer': [ - /* Debian needs exit 0 on these scripts so we add it here and preserve - the pretty footer. */ - 'deb': "exit 0\n${footer}", - 'def': footer - ], - ] - Map result = [:] - expansions = expansions.each { key, value -> - if (value instanceof Map) { - // 'def' is for default but its three characters like 'rpm' and 'deb' - value = value[distributionType] ?: value['def'] - if (value == null) { - return +subprojects { + ext.expansionsForDistribution = { distributionType -> + final String defaultHeapSize = "1g" + final String packagingPathData = "path.data: /var/lib/elasticsearch" + final String pathLogs = "/var/log/elasticsearch" + final String packagingPathLogs = "path.logs: ${pathLogs}" + final String packagingLoggc = "${pathLogs}/gc.log" + + String footer = "# Built for ${project.name}-${project.version} " + + "(${distributionType})" + Map expansions = [ + 'project.name': project.name, + 'project.version': version, + + 'path.conf': [ + 'deb': '/etc/elasticsearch', + 'rpm': '/etc/elasticsearch', + 'def': '"$ES_HOME"/config' + ], + 'path.data': [ + 'deb': packagingPathData, + 'rpm': packagingPathData, + 'def': '#path.data: /path/to/data' + ], + 'path.env': [ + 'deb': '/etc/default/elasticsearch', + 'rpm': '/etc/sysconfig/elasticsearch', + /* There isn't one of these files for tar or zip but its important to + make an empty string here so the script can properly skip it. */ + 'def': 'if [ -z "$ES_PATH_CONF" ]; then ES_PATH_CONF="$ES_HOME"/config; done', + ], + 'source.path.env': [ + 'deb': 'source /etc/default/elasticsearch', + 'rpm': 'source /etc/sysconfig/elasticsearch', + 'def': 'if [ -z "$ES_PATH_CONF" ]; then ES_PATH_CONF="$ES_HOME"/config; fi', + ], + 'path.logs': [ + 'deb': packagingPathLogs, + 'rpm': packagingPathLogs, + 'def': '#path.logs: /path/to/logs' + ], + 'loggc': [ + 'deb': packagingLoggc, + 'rpm': packagingLoggc, + 'def': 'logs/gc.log' + ], + + 'heap.min': defaultHeapSize, + 'heap.max': defaultHeapSize, + + 'heap.dump.path': [ + 'deb': "-XX:HeapDumpPath=/var/lib/elasticsearch", + 'rpm': "-XX:HeapDumpPath=/var/lib/elasticsearch", + 'def': "#-XX:HeapDumpPath=/heap/dump/path" + ], + + 'stopping.timeout': [ + 'rpm': 86400, + ], + + 'scripts.footer': [ + /* Debian needs exit 0 on these scripts so we add it here and preserve + the pretty footer. */ + 'deb': "exit 0\n${footer}", + 'def': footer + ], + ] + Map result = [:] + expansions = expansions.each { key, value -> + if (value instanceof Map) { + // 'def' is for default but its three characters like 'rpm' and 'deb' + value = value[distributionType] ?: value['def'] + if (value == null) { + return + } } + result[key] = value } - result[key] = value + return result } - return result } diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index a9a7bd1e0a247..840c69742a0c7 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -29,14 +29,21 @@ import java.util.regex.Matcher * tests to test against the next unreleased version, closest to this version, * without relying on snapshots. */ -final Matcher match = project.name =~ /bwc-snapshot-(\d+\.(\d+|x))/ -if (!match.matches()) { - throw new InvalidUserDataException("Unsupport project name ${project.name}") -} -String bwcBranch = match.group(1) +subprojects { -if (project.hasProperty('bwcVersion')) { - Version bwcVersion = project.ext.bwcVersion + Version bwcVersion = bwcVersions.getSnapshotForProject(project.name) + if (bwcVersion == null) { + // this project wont do anything + return + } + + String bwcBranch + if (project.name == 'next-minor-snapshot') { + // this is always a .x series + bwcBranch = "${bwcVersion.major}.x" + } else { + bwcBranch = "${bwcVersion.major}.${bwcVersion.minor}" + } apply plugin: 'distribution' // Not published so no need to assemble @@ -61,7 +68,7 @@ if (project.hasProperty('bwcVersion')) { doLast { project.ext.remoteExists = false output.toString('UTF-8').eachLine { - if (it.contains("${remote}\thttps://github.com/${remote}/elasticsearch.git")) { + if (it.contains("${remote}\t")) { project.ext.remoteExists = true } } @@ -113,9 +120,17 @@ if (project.hasProperty('bwcVersion')) { } } - File bwcDeb = file("${checkoutDir}/distribution/deb/build/distributions/elasticsearch-${bwcVersion}.deb") - File bwcRpm = file("${checkoutDir}/distribution/rpm/build/distributions/elasticsearch-${bwcVersion}.rpm") - File bwcZip = file("${checkoutDir}/distribution/zip/build/distributions/elasticsearch-${bwcVersion}.zip") + String debDir = 'distribution/packages/deb' + String rpmDir = 'distribution/packages/rpm' + String zipDir = 'distribution/archives/zip' + if (bwcVersion.before('6.3.0')) { + debDir = 'distribution/deb' + rpmDir = 'distribution/rpm' + zipDir = 'distribution/zip' + } + File bwcDeb = file("${checkoutDir}/${debDir}/build/distributions/elasticsearch-${bwcVersion}.deb") + File bwcRpm = file("${checkoutDir}/${rpmDir}/build/distributions/elasticsearch-${bwcVersion}.rpm") + File bwcZip = file("${checkoutDir}/${zipDir}/build/distributions/elasticsearch-${bwcVersion}.zip") task buildBwcVersion(type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata workingDir = checkoutDir @@ -132,7 +147,7 @@ if (project.hasProperty('bwcVersion')) { } else { executable new File(checkoutDir, 'gradlew').toString() } - args ":distribution:deb:assemble", ":distribution:rpm:assemble", ":distribution:zip:assemble", "-Dbuild.snapshot=${System.getProperty('build.snapshot') ?: 'true'}" + args ":${debDir.replace('/', ':')}:assemble", ":${rpmDir.replace('/', ':')}:assemble", ":${zipDir.replace('/', ':')}:assemble", "-Dbuild.snapshot=true" final LogLevel logLevel = gradle.startParameter.logLevel if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" diff --git a/distribution/bwc/maintenance-bugfix-snapshot/build.gradle b/distribution/bwc/maintenance-bugfix-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/distribution/bwc/next-bugfix-snapshot/build.gradle b/distribution/bwc/next-bugfix-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/distribution/bwc/next-minor-snapshot/build.gradle b/distribution/bwc/next-minor-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/distribution/bwc/staged-minor-snapshot/build.gradle b/distribution/bwc/staged-minor-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/distribution/deb/build.gradle b/distribution/deb/build.gradle deleted file mode 100644 index cfc9aa4d8efb1..0000000000000 --- a/distribution/deb/build.gradle +++ /dev/null @@ -1,61 +0,0 @@ -import org.elasticsearch.gradle.LoggedExec - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -task buildDeb(type: Deb) { - dependsOn preparePackagingFiles - baseName 'elasticsearch' // this is what pom generation uses for artifactId - // Follow elasticsearch's deb file naming convention - archiveName "${packageName}-${project.version}.deb" - version = project.version - - packageGroup 'web' - requires 'libc6' - requires 'adduser' - - into('/usr/share/lintian/overrides') { - from("${project.packagingFiles}/lintian/elasticsearch") - } - into('/usr/share/doc/elasticsearch') { - from "${project.packagingFiles}/copyright" - fileMode 0644 - } -} - -artifacts { - 'default' buildDeb - archives buildDeb -} - -integTest.enabled = false -licenseHeaders.enabled = false - -// task that sanity checks if the Deb archive can be extracted -task checkDeb(type: LoggedExec) { - onlyIf { new File('/usr/bin/dpkg-deb').exists() || new File('/usr/local/bin/dpkg-deb').exists() } - final File debExtracted = new File("${buildDir}", 'deb-extracted') - commandLine 'dpkg-deb', '-x', "${buildDir}/distributions/elasticsearch-${project.version}.deb", debExtracted - doFirst { - debExtracted.deleteDir() - } -} - -checkDeb.dependsOn buildDeb -check.dependsOn checkDeb diff --git a/distribution/integ-test-zip/build.gradle b/distribution/integ-test-zip/build.gradle deleted file mode 100644 index 89ad1ebee73da..0000000000000 --- a/distribution/integ-test-zip/build.gradle +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.elasticsearch.gradle.plugin.PluginBuildPlugin -import org.apache.tools.ant.taskdefs.condition.Os - -task buildZip(type: Zip) { - dependsOn createLogDir, createPluginsDir - baseName = 'elasticsearch' - with archivesFiles -} - -artifacts { - 'default' buildZip - archives buildZip -} - -publishing { - publications { - nebula { - artifactId 'elasticsearch' - artifact buildZip - } - /* HUGE HACK: the underlying maven publication library refuses to deploy any attached artifacts - * when the packaging type is set to 'pom'. But Sonatype's OSS repositories require source files - * for artifacts that are of type 'zip'. We already publish the source and javadoc for Elasticsearch - * under the various other subprojects. So here we create another publication using the same - * name that has the "real" pom, and rely on the fact that gradle will execute the publish tasks - * in alphabetical order. This lets us publish the zip file and even though the pom says the - * type is 'pom' instead of 'zip'. We cannot setup a dependency between the tasks because the - * publishing tasks are created *extremely* late in the configuration phase, so that we cannot get - * ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to - * maven local work, since we publish to maven central externally. */ - nebulaRealPom(MavenPublication) { - artifactId 'elasticsearch' - pom.packaging = 'pom' - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.appendNode('name', 'Elasticsearch') - root.appendNode('description', 'A Distributed RESTful Search Engine') - root.appendNode('url', PluginBuildPlugin.urlFromOrigin(project.scminfo.origin)) - Node scmNode = root.appendNode('scm') - scmNode.appendNode('url', project.scminfo.origin) - } - } - } -} - -integTestRunner { - if (Os.isFamily(Os.FAMILY_WINDOWS) && System.getProperty('tests.timeoutSuite') == null) { - // override the suite timeout to 30 mins for windows, because it has the most inefficient filesystem known to man - systemProperty 'tests.timeoutSuite', '1800000!' - } -} - -integTest.dependsOn buildZip diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle new file mode 100644 index 0000000000000..72bd3a739e8b3 --- /dev/null +++ b/distribution/packages/build.gradle @@ -0,0 +1,316 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + */ + +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.MavenFilteringHack + +/***************************************************************************** + * Deb and rpm configuration * + ***************************************************************************** + * + * The general strategy here is to build a directory on disk that contains + * stuff that needs to be copied into the distributions. This is + * important for two reasons: + * 1. ospackage wants to copy the directory permissions that it sees off of the + * filesystem. If you ask it to create a directory that doesn't already + * exist on disk it petulantly creates it with 0755 permissions, no matter + * how hard you try to convince it otherwise. + * 2. Convincing ospackage to pick up an empty directory as part of a set of + * directories on disk is reasonably easy. Convincing it to just create an + * empty directory requires more wits than I have. + * 3. ospackage really wants to suck up some of the debian control scripts + * directly from the filesystem. It doesn't want to process them through + * MavenFilteringHack or any other copy-style action. + * + * The following commands are useful when it comes to check the user/group + * and files permissions set within the RPM and DEB packages: + * + * rpm -qlp --dump path/to/elasticsearch.rpm + * dpkg -c path/to/elasticsearch.deb + */ + +buildscript { + repositories { + maven { + url "https://plugins.gradle.org/m2/" + } + } + dependencies { + classpath 'com.netflix.nebula:gradle-ospackage-plugin:4.7.1' + } +} + +void addProcessFilesTask(String type) { + String packagingFiles = "build/packaging/${type}" + + task("process${type.capitalize()}Files", type: Copy) { + from 'src/common' + from "src/${type}" + into packagingFiles + + into('config') { + from '../src/config' + } + + MavenFilteringHack.filter(it, expansionsForDistribution(type)) + + doLast { + // create empty dirs, we set the permissions when configuring the packages + mkdir "${packagingFiles}/var/run/elasticsearch" + mkdir "${packagingFiles}/var/log/elasticsearch" + mkdir "${packagingFiles}/var/lib/elasticsearch" + mkdir "${packagingFiles}/usr/share/elasticsearch/plugins" + } + } +} +addProcessFilesTask('deb') +addProcessFilesTask('rpm') + +// Common configuration that is package dependent. This can't go in ospackage +// since we have different templated files that need to be consumed, but the structure +// is the same +Closure commonPackageConfig(String type) { + return { + // Follow elasticsearch's file naming convention + archiveName "elasticsearch-${project.version}.${type}" + + destinationDir = file("${type}/build/distributions") + String packagingFiles = "build/packaging/${type}" + + String scripts = "${packagingFiles}/scripts" + preInstall file("${scripts}/preinst") + postInstall file("${scripts}/postinst") + preUninstall file("${scripts}/prerm") + postUninstall file("${scripts}/postrm") + + // top level "into" directive is not inherited from ospackage for some reason, so we must + // specify it again explicitly for copying common files + into('/usr/share/elasticsearch') { + into('bin') { + with binFiles(type) + } + with copySpec { + with commonFiles + if (type == 'deb') { + // Deb gets a copyright file instead. + exclude 'LICENSE.txt' + } + } + } + + // ========= config files ========= + configurationFile '/etc/elasticsearch/elasticsearch.yml' + configurationFile '/etc/elasticsearch/jvm.options' + configurationFile '/etc/elasticsearch/log4j2.properties' + into('/etc/elasticsearch') { + //dirMode 0750 + fileMode 0660 + permissionGroup 'elasticsearch' + includeEmptyDirs true + createDirectoryEntry true + fileType CONFIG | NOREPLACE + from "${packagingFiles}/config" + } + String envFile = expansionsForDistribution(type)['path.env'] + configurationFile envFile + into(new File(envFile).getParent()) { + fileType CONFIG | NOREPLACE + fileMode 0660 + from "${packagingFiles}/env/elasticsearch" + } + + // ========= systemd ========= + configurationFile '/usr/lib/systemd/system/elasticsearch.service' + into('/usr/lib/tmpfiles.d') { + from "${packagingFiles}/systemd/elasticsearch.conf" + } + into('/usr/lib/systemd/system') { + fileType CONFIG | NOREPLACE + from "${packagingFiles}/systemd/elasticsearch.service" + } + into('/usr/lib/sysctl.d') { + fileType CONFIG | NOREPLACE + from "${packagingFiles}/systemd/sysctl/elasticsearch.conf" + } + + // ========= sysV init ========= + configurationFile '/etc/init.d/elasticsearch' + into('/etc/init.d') { + fileMode 0750 + fileType CONFIG | NOREPLACE + from "${packagingFiles}/init.d/elasticsearch" + } + + // ========= empty dirs ========= + // NOTE: these are created under packagingFiles as empty, but the permissions are set here + Closure copyEmptyDir = { path, u, g, mode -> + File file = new File(path) + into(file.parent) { + from "${packagingFiles}/${path}" + include file.name + includeEmptyDirs true + createDirectoryEntry true + user u + permissionGroup g + dirMode mode + } + } + copyEmptyDir('/var/run/elasticsearch', 'elasticsearch', 'elasticsearch', 0755) + copyEmptyDir('/var/log/elasticsearch', 'elasticsearch', 'elasticsearch', 0750) + copyEmptyDir('/var/lib/elasticsearch', 'elasticsearch', 'elasticsearch', 0750) + copyEmptyDir('/usr/share/elasticsearch/plugins', 'root', 'root', 0755) + } +} + +apply plugin: 'nebula.ospackage-base' + +// this is package indepdendent configuration +ospackage { + packageName 'elasticsearch' + maintainer 'Elasticsearch Team ' + summary ''' + Elasticsearch is a distributed RESTful search engine built for the cloud. + Reference documentation can be found at + https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html + and the 'Elasticsearch: The Definitive Guide' book can be found at + https://www.elastic.co/guide/en/elasticsearch/guide/current/index.html + '''.stripIndent().replace('\n', ' ').trim() + url 'https://www.elastic.co/' + + // signing setup + if (project.hasProperty('signing.password') && System.getProperty('build.snapshot', 'true') == 'false') { + signingKeyId = project.hasProperty('signing.keyId') ? project.property('signing.keyId') : 'D88E42B4' + signingKeyPassphrase = project.property('signing.password') + signingKeyRingFile = project.hasProperty('signing.secretKeyRingFile') ? + project.file(project.property('signing.secretKeyRingFile')) : + new File(new File(System.getProperty('user.home'), '.gnupg'), 'secring.gpg') + } + + requires('coreutils') + + fileMode 0644 + dirMode 0755 + user 'root' + permissionGroup 'root' + + into '/usr/share/elasticsearch' + with libFiles + with modulesFiles + with noticeFile +} + +task buildDeb(type: Deb) { + dependsOn processDebFiles + configure(commonPackageConfig('deb')) + + version = project.version + packageGroup 'web' + requires 'bash' + requires 'libc6' + requires 'adduser' + + into('/usr/share/lintian/overrides') { + from('src/deb/lintian/elasticsearch') + } + into('/usr/share/doc/elasticsearch') { + from 'src/deb/copyright' + fileMode 0644 + } +} + +// task that sanity checks if the Deb archive can be extracted +task checkDeb(type: LoggedExec) { + dependsOn buildDeb + onlyIf { new File('/usr/bin/dpkg-deb').exists() || new File('/usr/local/bin/dpkg-deb').exists() } + final File debExtracted = new File("${buildDir}", 'deb-extracted') + commandLine 'dpkg-deb', '-x', "deb/build/distributions/elasticsearch-${project.version}.deb", debExtracted + doFirst { + debExtracted.deleteDir() + } +} + +task buildRpm(type: Rpm) { + dependsOn processRpmFiles + configure(commonPackageConfig('rpm')) + + packageGroup 'Application/Internet' + requires '/bin/bash' + + prefix '/usr' + packager 'Elasticsearch' + version = project.version.replace('-', '_') + release = '1' + arch 'NOARCH' + os 'LINUX' + license '2009' + distribution 'Elasticsearch' + vendor 'Elasticsearch' + // TODO ospackage doesn't support icon but we used to have one + + // without this the rpm will have parent dirs of any files we copy in, eg /etc/elasticsearch + addParentDirs false + + // Declare the folders so that the RPM package manager removes + // them when upgrading or removing the package + directory('/usr/share/elasticsearch/bin', 0755) + directory('/usr/share/elasticsearch/lib', 0755) + directory('/usr/share/elasticsearch/modules', 0755) + modulesFiles.eachFile { FileCopyDetails fcp -> + if (fcp.name == "plugin-descriptor.properties") { + directory('/usr/share/elasticsearch/modules/' + fcp.file.parentFile.name, 0755) + } + } +} + +// task that sanity checks if the RPM archive can be extracted +task checkRpm(type: LoggedExec) { + dependsOn buildRpm + onlyIf { new File('/bin/rpm').exists() || new File('/usr/bin/rpm').exists() || new File('/usr/local/bin/rpm').exists() } + final File rpmDatabase = new File("${buildDir}", 'rpm-database') + final File rpmExtracted = new File("${buildDir}", 'rpm-extracted') + commandLine 'rpm', + '--badreloc', + '--nodeps', + '--noscripts', + '--notriggers', + '--dbpath', + rpmDatabase, + '--relocate', + "/=${rpmExtracted}", + '-i', + "rpm/build/distributions/elasticsearch-${project.version}.rpm" + doFirst { + rpmDatabase.deleteDir() + rpmExtracted.deleteDir() + } +} + +// This configures the default artifact for the distribution specific +// subprojects. We have subprojects because Gradle project substitutions +// can only bind to the default configuration of a project +subprojects { + apply plugin: 'distribution' + + String buildTask = "build${it.name.replaceAll(/-[a-z]/) { it.substring(1).toUpperCase() }.capitalize()}" + ext.buildDist = parent.tasks.getByName(buildTask) + artifacts { + 'default' buildDist + } +} + +check.dependsOn checkDeb, checkRpm + diff --git a/distribution/packages/deb/build.gradle b/distribution/packages/deb/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/deb/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/packages/rpm/build.gradle b/distribution/packages/rpm/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/rpm/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/rpm/src/main/resources/logo/elastic.gif b/distribution/packages/rpm/src/main/resources/logo/elastic.gif similarity index 100% rename from distribution/rpm/src/main/resources/logo/elastic.gif rename to distribution/packages/rpm/src/main/resources/logo/elastic.gif diff --git a/distribution/src/main/packaging/env/elasticsearch b/distribution/packages/src/common/env/elasticsearch similarity index 100% rename from distribution/src/main/packaging/env/elasticsearch rename to distribution/packages/src/common/env/elasticsearch diff --git a/distribution/src/main/packaging/scripts/postinst b/distribution/packages/src/common/scripts/postinst similarity index 100% rename from distribution/src/main/packaging/scripts/postinst rename to distribution/packages/src/common/scripts/postinst diff --git a/distribution/src/main/packaging/scripts/postrm b/distribution/packages/src/common/scripts/postrm similarity index 100% rename from distribution/src/main/packaging/scripts/postrm rename to distribution/packages/src/common/scripts/postrm diff --git a/distribution/src/main/packaging/scripts/preinst b/distribution/packages/src/common/scripts/preinst similarity index 100% rename from distribution/src/main/packaging/scripts/preinst rename to distribution/packages/src/common/scripts/preinst diff --git a/distribution/src/main/packaging/scripts/prerm b/distribution/packages/src/common/scripts/prerm similarity index 100% rename from distribution/src/main/packaging/scripts/prerm rename to distribution/packages/src/common/scripts/prerm diff --git a/distribution/src/main/packaging/systemd/elasticsearch.conf b/distribution/packages/src/common/systemd/elasticsearch.conf similarity index 100% rename from distribution/src/main/packaging/systemd/elasticsearch.conf rename to distribution/packages/src/common/systemd/elasticsearch.conf diff --git a/distribution/src/main/packaging/systemd/elasticsearch.service b/distribution/packages/src/common/systemd/elasticsearch.service similarity index 100% rename from distribution/src/main/packaging/systemd/elasticsearch.service rename to distribution/packages/src/common/systemd/elasticsearch.service diff --git a/distribution/src/main/packaging/systemd/sysctl/elasticsearch.conf b/distribution/packages/src/common/systemd/sysctl/elasticsearch.conf similarity index 100% rename from distribution/src/main/packaging/systemd/sysctl/elasticsearch.conf rename to distribution/packages/src/common/systemd/sysctl/elasticsearch.conf diff --git a/distribution/deb/src/main/packaging/copyright b/distribution/packages/src/deb/copyright similarity index 100% rename from distribution/deb/src/main/packaging/copyright rename to distribution/packages/src/deb/copyright diff --git a/distribution/deb/src/main/packaging/init.d/elasticsearch b/distribution/packages/src/deb/init.d/elasticsearch similarity index 100% rename from distribution/deb/src/main/packaging/init.d/elasticsearch rename to distribution/packages/src/deb/init.d/elasticsearch diff --git a/distribution/deb/src/main/packaging/lintian/elasticsearch b/distribution/packages/src/deb/lintian/elasticsearch similarity index 100% rename from distribution/deb/src/main/packaging/lintian/elasticsearch rename to distribution/packages/src/deb/lintian/elasticsearch diff --git a/distribution/rpm/src/main/packaging/init.d/elasticsearch b/distribution/packages/src/rpm/init.d/elasticsearch similarity index 100% rename from distribution/rpm/src/main/packaging/init.d/elasticsearch rename to distribution/packages/src/rpm/init.d/elasticsearch diff --git a/distribution/rpm/build.gradle b/distribution/rpm/build.gradle deleted file mode 100644 index 4432198f234d7..0000000000000 --- a/distribution/rpm/build.gradle +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.elasticsearch.gradle.LoggedExec - -task buildRpm(type: Rpm) { - dependsOn preparePackagingFiles - baseName 'elasticsearch' // this is what pom generation uses for artifactId - // Follow elasticsearch's rpm file naming convention - archiveName "${packageName}-${project.version}.rpm" - packageGroup 'Application/Internet' - prefix '/usr' - packager 'Elasticsearch' - version = project.version.replace('-', '_') - release = '1' - arch 'NOARCH' - os 'LINUX' - license '2009' - distribution 'Elasticsearch' - vendor 'Elasticsearch' - dirMode 0755 - fileMode 0644 - addParentDirs false - // TODO ospackage doesn't support icon but we used to have one - - // Declare the folders so that the RPM package manager removes - // them when upgrading or removing the package - directory('/usr/share/elasticsearch/bin', 0755) - directory('/usr/share/elasticsearch/lib', 0755) - directory('/usr/share/elasticsearch/modules', 0755) - modulesFiles.eachFile { FileCopyDetails fcp -> - if (fcp.name == "plugin-descriptor.properties") { - directory('/usr/share/elasticsearch/modules/' + fcp.file.parentFile.name, 0755) - } - } -} - -artifacts { - 'default' buildRpm - archives buildRpm -} - -integTest.enabled = false -licenseHeaders.enabled = false - -// task that sanity checks if the RPM archive can be extracted -task checkRpm(type: LoggedExec) { - onlyIf { new File('/bin/rpm').exists() || new File('/usr/bin/rpm').exists() || new File('/usr/local/bin/rpm').exists() } - final File rpmDatabase = new File("${buildDir}", 'rpm-database') - final File rpmExtracted = new File("${buildDir}", 'rpm-extracted') - commandLine 'rpm', - '--badreloc', - '--nodeps', - '--noscripts', - '--notriggers', - '--dbpath', - rpmDatabase, - '--relocate', - "/=${rpmExtracted}", - '-i', - "${buildDir}/distributions/elasticsearch-${project.version}.rpm" - doFirst { - rpmDatabase.deleteDir() - rpmExtracted.deleteDir() - } -} - -checkRpm.dependsOn buildRpm -check.dependsOn checkRpm diff --git a/distribution/src/main/resources/bin/elasticsearch b/distribution/src/bin/elasticsearch similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch rename to distribution/src/bin/elasticsearch diff --git a/distribution/src/main/resources/bin/elasticsearch-env b/distribution/src/bin/elasticsearch-env similarity index 96% rename from distribution/src/main/resources/bin/elasticsearch-env rename to distribution/src/bin/elasticsearch-env index a5cf04da77d2c..cc86a10b184ae 100644 --- a/distribution/src/main/resources/bin/elasticsearch-env +++ b/distribution/src/bin/elasticsearch-env @@ -74,6 +74,9 @@ if [ -z "$ES_PATH_CONF" ]; then exit 1 fi +# now make ES_PATH_CONF absolute +ES_PATH_CONF=`cd "$ES_PATH_CONF"; pwd` + if [ -z "$ES_TMPDIR" ]; then set +e mktemp --version 2>&1 | grep coreutils > /dev/null diff --git a/distribution/src/main/resources/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat similarity index 94% rename from distribution/src/main/resources/bin/elasticsearch-env.bat rename to distribution/src/bin/elasticsearch-env.bat index 4d1ea24b38874..2499c0d99a4da 100644 --- a/distribution/src/main/resources/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -50,6 +50,9 @@ if not defined ES_PATH_CONF ( set ES_PATH_CONF=!ES_HOME!\config ) +rem now make ES_PATH_CONF absolute +for %%I in ("%ES_PATH_CONF%..") do set ES_PATH_CONF=%%~dpfI + if not defined ES_TMPDIR ( set ES_TMPDIR=!TMP!\elasticsearch ) diff --git a/distribution/src/main/resources/bin/elasticsearch-keystore b/distribution/src/bin/elasticsearch-keystore similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-keystore rename to distribution/src/bin/elasticsearch-keystore diff --git a/distribution/src/main/resources/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-keystore.bat rename to distribution/src/bin/elasticsearch-keystore.bat diff --git a/distribution/src/main/resources/bin/elasticsearch-plugin b/distribution/src/bin/elasticsearch-plugin similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-plugin rename to distribution/src/bin/elasticsearch-plugin diff --git a/distribution/src/main/resources/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-plugin.bat rename to distribution/src/bin/elasticsearch-plugin.bat diff --git a/distribution/src/main/resources/bin/elasticsearch-service-mgr.exe b/distribution/src/bin/elasticsearch-service-mgr.exe similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-service-mgr.exe rename to distribution/src/bin/elasticsearch-service-mgr.exe diff --git a/distribution/src/main/resources/bin/elasticsearch-service-x64.exe b/distribution/src/bin/elasticsearch-service-x64.exe similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-service-x64.exe rename to distribution/src/bin/elasticsearch-service-x64.exe diff --git a/distribution/src/main/resources/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat similarity index 91% rename from distribution/src/main/resources/bin/elasticsearch-service.bat rename to distribution/src/bin/elasticsearch-service.bat index 72b5f9887943f..065725f8bdb72 100644 --- a/distribution/src/main/resources/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -103,7 +103,7 @@ set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options if not "%ES_JAVA_OPTS%" == "" set ES_JAVA_OPTS=%ES_JAVA_OPTS: =;% @setlocal -for /F "usebackq delims=" %%a in (`"%JAVA% -cp "%ES_CLASSPATH%" "org.elasticsearch.tools.launchers.JvmOptionsParser" "%ES_JVM_OPTIONS%" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a +for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a @endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% %ES_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( @@ -197,7 +197,7 @@ if not "%SERVICE_USERNAME%" == "" ( ) ) -"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %ES_JAVA_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" %SERVICE_PARAMS% +"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main ++StartParams --quiet --StopClass org.elasticsearch.bootstrap.Elasticsearch --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %ES_JAVA_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" %SERVICE_PARAMS% if not errorlevel 1 goto installed echo Failed installing '%SERVICE_ID%' service diff --git a/distribution/src/main/resources/bin/elasticsearch-translog b/distribution/src/bin/elasticsearch-translog similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-translog rename to distribution/src/bin/elasticsearch-translog diff --git a/distribution/src/main/resources/bin/elasticsearch-translog.bat b/distribution/src/bin/elasticsearch-translog.bat similarity index 100% rename from distribution/src/main/resources/bin/elasticsearch-translog.bat rename to distribution/src/bin/elasticsearch-translog.bat diff --git a/distribution/src/main/resources/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat similarity index 86% rename from distribution/src/main/resources/bin/elasticsearch.bat rename to distribution/src/bin/elasticsearch.bat index 4709942d0dc56..f9f668fc61538 100644 --- a/distribution/src/main/resources/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -43,7 +43,7 @@ IF ERRORLEVEL 1 ( set "ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options" @setlocal -for /F "usebackq delims=" %%a in (`"%JAVA% -cp "%ES_CLASSPATH%" "org.elasticsearch.tools.launchers.JvmOptionsParser" "%ES_JVM_OPTIONS%" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a +for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a @endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% %ES_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( diff --git a/distribution/src/main/resources/config/elasticsearch.yml b/distribution/src/config/elasticsearch.yml similarity index 100% rename from distribution/src/main/resources/config/elasticsearch.yml rename to distribution/src/config/elasticsearch.yml diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/config/jvm.options similarity index 100% rename from distribution/src/main/resources/config/jvm.options rename to distribution/src/config/jvm.options diff --git a/distribution/src/main/resources/config/log4j2.properties b/distribution/src/config/log4j2.properties similarity index 100% rename from distribution/src/main/resources/config/log4j2.properties rename to distribution/src/config/log4j2.properties diff --git a/distribution/tar/build.gradle b/distribution/tar/build.gradle deleted file mode 100644 index cbefc223847a2..0000000000000 --- a/distribution/tar/build.gradle +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -task buildTar(type: Tar) { - dependsOn createLogDir, createPluginsDir - baseName = 'elasticsearch' - extension = 'tar.gz' - with archivesFiles - compression = Compression.GZIP - dirMode 0755 - fileMode 0644 -} - -artifacts { - 'default' buildTar -} - diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 374690e65acd9..55ec44da25cb9 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -20,8 +20,8 @@ apply plugin: 'elasticsearch.build' dependencies { - provided "org.elasticsearch:elasticsearch:${version}" - provided "org.elasticsearch:elasticsearch-cli:${version}" + compileOnly "org.elasticsearch:elasticsearch:${version}" + compileOnly "org.elasticsearch:elasticsearch-cli:${version}" testCompile "org.elasticsearch.test:framework:${version}" testCompile 'com.google.jimfs:jimfs:1.1' testCompile 'com.google.guava:guava:18.0' diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 255c2e991b714..155b3a5647ae7 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -462,17 +462,15 @@ private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException final Path target = stagingDirectory(pluginsDir); pathsToDeleteOnShutdown.add(target); - boolean hasEsDir = false; try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) { ZipEntry entry; byte[] buffer = new byte[8192]; while ((entry = zipInput.getNextEntry()) != null) { - if (entry.getName().startsWith("elasticsearch/") == false) { - // only extract the elasticsearch directory - continue; + if (entry.getName().startsWith("elasticsearch/")) { + throw new UserException(PLUGIN_MALFORMED, "This plugin was built with an older plugin structure." + + " Contact the plugin author to remove the intermediate \"elasticsearch\" directory within the plugin zip."); } - hasEsDir = true; - Path targetFile = target.resolve(entry.getName().substring("elasticsearch/".length())); + Path targetFile = target.resolve(entry.getName()); // Using the entry name as a path can result in an entry outside of the plugin dir, // either if the name starts with the root of the filesystem, or it is a relative @@ -499,13 +497,11 @@ private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException } zipInput.closeEntry(); } - } - Files.delete(zip); - if (hasEsDir == false) { + } catch (UserException e) { IOUtils.rm(target); - throw new UserException(PLUGIN_MALFORMED, - "`elasticsearch` directory is missing in the plugin zip"); + throw e; } + Files.delete(zip); return target; } @@ -569,6 +565,7 @@ private void verifyPluginName(Path pluginPath, String pluginName, Path candidate /** Load information about the plugin, and verify it can be installed with no errors. */ private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, boolean isBatch, Environment env) throws Exception { final PluginInfo info = PluginInfo.readFromProperties(pluginRoot); + PluginsService.verifyCompatibility(info); // checking for existing version of the plugin verifyPluginName(env.pluginsFile(), info.getName(), pluginRoot); @@ -653,6 +650,7 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, continue; } final PluginInfo info = PluginInfo.readFromProperties(plugin); + PluginsService.verifyCompatibility(info); verifyPluginName(env.pluginsFile(), info.getName(), plugin); pluginPaths.add(plugin); } diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index 70acf62bd8e1c..fb73554c2b19e 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -20,6 +20,7 @@ package org.elasticsearch.plugins; import joptsimple.OptionSet; +import org.elasticsearch.Version; import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.Nullable; @@ -84,15 +85,11 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th private void printPlugin(Environment env, Terminal terminal, Path plugin, String prefix) throws IOException { terminal.println(Terminal.Verbosity.SILENT, prefix + plugin.getFileName().toString()); - try { - PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath())); - terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix)); - } catch (IllegalArgumentException e) { - if (e.getMessage().contains("incompatible with version")) { - terminal.println("WARNING: " + e.getMessage()); - } else { - throw e; - } + PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath())); + terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix)); + if (info.getElasticsearchVersion().equals(Version.CURRENT) == false) { + terminal.println("WARNING: plugin [" + info.getName() + "] was built for Elasticsearch version " + info.getVersion() + + " but version " + Version.CURRENT + " is required"); } } } diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index ba85173f325a4..4cd83e329b158 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -86,7 +86,7 @@ void execute(Terminal terminal, Environment env, String pluginName, boolean purg // first make sure nothing extends this plugin List usedBy = new ArrayList<>(); - Set bundles = PluginsService.getPluginBundles(env.pluginsFile(), false); + Set bundles = PluginsService.getPluginBundles(env.pluginsFile()); for (PluginsService.Bundle bundle : bundles) { for (String extendedPlugin : bundle.plugin.getExtendedPlugins()) { if (extendedPlugin.equals(pluginName)) { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 724c222af2ba9..4e0cecae12f31 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -259,12 +259,12 @@ static void writePluginSecurityPolicy(Path pluginDir, String... permissions) thr static Path createPlugin(String name, Path structure, String... additionalProps) throws IOException { writePlugin(name, structure, additionalProps); - return writeZip(structure, "elasticsearch"); + return writeZip(structure, null); } static Path createMetaPlugin(String name, Path structure) throws IOException { writeMetaPlugin(name, structure); - return writeZip(structure, "elasticsearch"); + return writeZip(structure, null); } void installPlugin(String pluginUrl, Path home) throws Exception { @@ -811,7 +811,7 @@ public void testMissingDescriptor() throws Exception { Path pluginDir = metaDir.resolve("fake"); Files.createDirectory(pluginDir); Files.createFile(pluginDir.resolve("fake.yml")); - String pluginZip = writeZip(pluginDir, "elasticsearch").toUri().toURL().toString(); + String pluginZip = writeZip(pluginDir, null).toUri().toURL().toString(); NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("plugin-descriptor.properties")); assertInstallCleaned(env.v2()); @@ -822,23 +822,23 @@ public void testMissingDescriptor() throws Exception { assertInstallCleaned(env.v2()); } - public void testMissingDirectory() throws Exception { + public void testContainsIntermediateDirectory() throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); Files.createFile(pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES)); - String pluginZip = writeZip(pluginDir, null).toUri().toURL().toString(); + String pluginZip = writeZip(pluginDir, "elasticsearch").toUri().toURL().toString(); UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("`elasticsearch` directory is missing in the plugin zip")); + assertThat(e.getMessage(), containsString("This plugin was built with an older plugin structure")); assertInstallCleaned(env.v2()); } - public void testMissingDirectoryMeta() throws Exception { + public void testContainsIntermediateDirectoryMeta() throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); Files.createFile(pluginDir.resolve(MetaPluginInfo.ES_META_PLUGIN_PROPERTIES)); - String pluginZip = writeZip(pluginDir, null).toUri().toURL().toString(); + String pluginZip = writeZip(pluginDir, "elasticsearch").toUri().toURL().toString(); UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("`elasticsearch` directory is missing in the plugin zip")); + assertThat(e.getMessage(), containsString("This plugin was built with an older plugin structure")); assertInstallCleaned(env.v2()); } @@ -846,11 +846,12 @@ public void testZipRelativeOutsideEntryName() throws Exception { Tuple env = createEnv(fs, temp); Path zip = createTempDir().resolve("broken.zip"); try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { - stream.putNextEntry(new ZipEntry("elasticsearch/../blah")); + stream.putNextEntry(new ZipEntry("../blah")); } String pluginZip = zip.toUri().toURL().toString(); UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("resolving outside of plugin directory")); + assertInstallCleaned(env.v2()); } public void testOfficialPluginsHelpSorted() throws Exception { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index 372a4cae8f263..fe5176a34ed89 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -130,7 +130,7 @@ private static void buildFakePlugin( "name", name, "version", "1.0", "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), + "java.version", "1.8", "classname", classname, "has.native.controller", Boolean.toString(hasNativeController), "requires.keystore", Boolean.toString(requiresKeystore)); @@ -192,6 +192,8 @@ public void testPluginWithVerbose() throws Exception { "Name: fake_plugin", "Description: fake desc", "Version: 1.0", + "Elasticsearch Version: " + Version.CURRENT.toString(), + "Java Version: 1.8", "Native Controller: false", "Requires Keystore: false", "Extended Plugins: []", @@ -211,6 +213,8 @@ public void testPluginWithNativeController() throws Exception { "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0", + "Elasticsearch Version: " + Version.CURRENT.toString(), + "Java Version: 1.8", "Native Controller: true", "Requires Keystore: false", "Extended Plugins: []", @@ -230,6 +234,8 @@ public void testPluginWithRequiresKeystore() throws Exception { "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0", + "Elasticsearch Version: " + Version.CURRENT.toString(), + "Java Version: 1.8", "Native Controller: false", "Requires Keystore: true", "Extended Plugins: []", @@ -250,6 +256,8 @@ public void testPluginWithVerboseMultiplePlugins() throws Exception { "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0", + "Elasticsearch Version: " + Version.CURRENT.toString(), + "Java Version: 1.8", "Native Controller: false", "Requires Keystore: false", "Extended Plugins: []", @@ -259,6 +267,8 @@ public void testPluginWithVerboseMultiplePlugins() throws Exception { "Name: fake_plugin2", "Description: fake desc 2", "Version: 1.0", + "Elasticsearch Version: " + Version.CURRENT.toString(), + "Java Version: 1.8", "Native Controller: false", "Requires Keystore: false", "Extended Plugins: []", @@ -281,6 +291,8 @@ public void testPluginWithVerboseMetaPlugins() throws Exception { "\tName: fake_plugin1", "\tDescription: fake desc 1", "\tVersion: 1.0", + "\tElasticsearch Version: " + Version.CURRENT.toString(), + "\tJava Version: 1.8", "\tNative Controller: false", "\tRequires Keystore: false", "\tExtended Plugins: []", @@ -290,6 +302,8 @@ public void testPluginWithVerboseMetaPlugins() throws Exception { "\tName: fake_plugin2", "\tDescription: fake desc 2", "\tVersion: 1.0", + "\tElasticsearch Version: " + Version.CURRENT.toString(), + "\tJava Version: 1.8", "\tNative Controller: false", "\tRequires Keystore: false", "\tExtended Plugins: []", @@ -348,11 +362,7 @@ public void testExistingIncompatiblePlugin() throws Exception { buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); MockTerminal terminal = listPlugins(home); - final String message = String.format(Locale.ROOT, - "plugin [%s] is incompatible with version [%s]; was designed for version [%s]", - "fake_plugin1", - Version.CURRENT.toString(), - "1.0.0"); + String message = "plugin [fake_plugin1] was built for Elasticsearch version 1.0 but version " + Version.CURRENT + " is required"; assertEquals( "fake_plugin1\n" + "WARNING: " + message + "\n" + "fake_plugin2\n", terminal.getOutput()); @@ -374,11 +384,7 @@ public void testExistingIncompatibleMetaPlugin() throws Exception { buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); MockTerminal terminal = listPlugins(home); - final String message = String.format(Locale.ROOT, - "plugin [%s] is incompatible with version [%s]; was designed for version [%s]", - "fake_plugin1", - Version.CURRENT.toString(), - "1.0.0"); + String message = "plugin [fake_plugin1] was built for Elasticsearch version 1.0 but version " + Version.CURRENT + " is required"; assertEquals( "fake_plugin2\nmeta_plugin\n\tfake_plugin1\n" + "WARNING: " + message + "\n", terminal.getOutput()); diff --git a/distribution/zip/build.gradle b/distribution/zip/build.gradle deleted file mode 100644 index 53dc98271ec17..0000000000000 --- a/distribution/zip/build.gradle +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.elasticsearch.gradle.plugin.PluginBuildPlugin - -task buildZip(type: Zip) { - dependsOn createLogDir, createPluginsDir - baseName = 'elasticsearch' - with archivesFiles -} - -artifacts { - 'default' buildZip - archives buildZip -} - -publishing { - publications { - nebula { - artifactId 'elasticsearch' - artifact buildZip - } - /* HUGE HACK: the underlying maven publication library refuses to deploy any attached artifacts - * when the packaging type is set to 'pom'. But Sonatype's OSS repositories require source files - * for artifacts that are of type 'zip'. We already publish the source and javadoc for Elasticsearch - * under the various other subprojects. So here we create another publication using the same - * name that has the "real" pom, and rely on the fact that gradle will execute the publish tasks - * in alphabetical order. This lets us publish the zip file and even though the pom says the - * type is 'pom' instead of 'zip'. We cannot setup a dependency between the tasks because the - * publishing tasks are created *extremely* late in the configuration phase, so that we cannot get - * ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to - * maven local work, since we publish to maven central externally. */ - nebulaRealPom(MavenPublication) { - artifactId 'elasticsearch' - pom.packaging = 'pom' - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.appendNode('name', 'Elasticsearch') - root.appendNode('description', 'A Distributed RESTful Search Engine') - root.appendNode('url', PluginBuildPlugin.urlFromOrigin(project.scminfo.origin)) - Node scmNode = root.appendNode('scm') - scmNode.appendNode('url', project.scminfo.origin) - } - } - } -} - -integTest.dependsOn buildZip diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 3008b1bb3e09a..1c55e3b8a4e55 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -23,7 +23,7 @@ release-state can be: released | prerelease | unreleased Javadoc roots used to generate links from Painless's API reference /////// :java8-javadoc: https://docs.oracle.com/javase/8/docs/api -:java9-javadoc: http://download.java.net/java/jigsaw/docs/api +:java9-javadoc: https://docs.oracle.com/javase/9/docs/api :joda-time-javadoc: http://www.joda.org/joda-time/apidocs :lucene-core-javadoc: http://lucene.apache.org/core/{lucene_version_path}/core diff --git a/docs/java-rest/high-level/cluster/put_settings.asciidoc b/docs/java-rest/high-level/cluster/put_settings.asciidoc new file mode 100644 index 0000000000000..2d9f55c1e9419 --- /dev/null +++ b/docs/java-rest/high-level/cluster/put_settings.asciidoc @@ -0,0 +1,129 @@ +[[java-rest-high-cluster-put-settings]] +=== Cluster Update Settings API + +The Cluster Update Settings API allows to update cluster wide settings. + +[[java-rest-high-cluster-put-settings-request]] +==== Cluster Update Settings Request + +A `ClusterUpdateSettingsRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request] +-------------------------------------------------- + +==== Cluster Settings +At least one setting to be updated must be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-cluster-settings] +-------------------------------------------------- +<1> Sets the transient settings to be applied +<2> Sets the persistent setting to be applied + +==== Providing the Settings +The settings to be applied can be provided in different ways: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-create-settings] +-------------------------------------------------- +<1> Creates a transient setting as `Settings` +<2> Creates a persistent setting as `Settings` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-settings-builder] +-------------------------------------------------- +<1> Settings provided as `Settings.Builder` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-settings-source] +-------------------------------------------------- +<1> Settings provided as `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-settings-map] +-------------------------------------------------- +<1> Settings provided as a `Map` + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-flat-settings] +-------------------------------------------------- +<1> Wether the updated settings returned in the `ClusterUpdateSettings` should +be in a flat format + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-cluster-put-settings-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-execute] +-------------------------------------------------- + +[[java-rest-high-cluster-put-settings-async]] +==== Asynchronous Execution + +The asynchronous execution of a cluster update settings requires both the +`ClusterUpdateSettingsRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-execute-async] +-------------------------------------------------- +<1> The `ClusterUpdateSettingsRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ClusterUpdateSettingsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-put-settings-response]] +==== Cluster Update Settings Response + +The returned `ClusterUpdateSettings` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request +<2> Indicates which transient settings have been applied +<3> Indicates which persistent settings have been applied \ No newline at end of file diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 86fe473fb29e0..ba5c7ba273eec 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -46,6 +46,9 @@ Central]. The minimum Java version required is `1.8`. The High Level REST Client is subject to the same release cycle as Elasticsearch. Replace the version with the desired client version. +If you are looking for a SNAPSHOT version, the Elastic Maven Snapshot repository is available +at https://snapshots.elastic.co/maven/. + [[java-rest-high-getting-started-maven-maven]] ==== Maven configuration diff --git a/docs/java-rest/high-level/indices/create_index.asciidoc b/docs/java-rest/high-level/indices/create_index.asciidoc index fa723d3158cf4..b68faa0dd7659 100644 --- a/docs/java-rest/high-level/indices/create_index.asciidoc +++ b/docs/java-rest/high-level/indices/create_index.asciidoc @@ -21,6 +21,7 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque -------------------------------------------------- <1> Settings for this index +[[java-rest-high-create-index-request-mappings]] ==== Index mappings An index may be created with mappings for its document types @@ -98,9 +99,9 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-waitForActiveShards] -------------------------------------------------- <1> The number of active shard copies to wait for before the create index API returns a -response, as an `int`. +response, as an `int` <2> The number of active shard copies to wait for before the create index API returns a -response, as an `ActiveShardCount`. +response, as an `ActiveShardCount` [[java-rest-high-create-index-sync]] ==== Synchronous Execution diff --git a/docs/java-rest/high-level/indices/exists_alias.asciidoc b/docs/java-rest/high-level/indices/exists_alias.asciidoc index 5042b8119a5b0..c92c526444f68 100644 --- a/docs/java-rest/high-level/indices/exists_alias.asciidoc +++ b/docs/java-rest/high-level/indices/exists_alias.asciidoc @@ -41,7 +41,7 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-reque -------------------------------------------------- <1> The `local` flag (defaults to `false`) controls whether the aliases need to be looked up in the local cluster state or in the cluster state held by -the elected master node. +the elected master node [[java-rest-high-exists-alias-sync]] ==== Synchronous Execution diff --git a/docs/java-rest/high-level/indices/open_index.asciidoc b/docs/java-rest/high-level/indices/open_index.asciidoc index 1123e62a7a228..1c690344577b8 100644 --- a/docs/java-rest/high-level/indices/open_index.asciidoc +++ b/docs/java-rest/high-level/indices/open_index.asciidoc @@ -36,9 +36,9 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-waitForActiveShards] -------------------------------------------------- <1> The number of active shard copies to wait for before the open index API -returns a response, as an `int`. +returns a response, as an `int` <2> The number of active shard copies to wait for before the open index API -returns a response, as an `ActiveShardCount`. +returns a response, as an `ActiveShardCount` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/indices/put_mapping.asciidoc b/docs/java-rest/high-level/indices/put_mapping.asciidoc index 1f7d8c1d5e828..5fa985d02c16f 100644 --- a/docs/java-rest/high-level/indices/put_mapping.asciidoc +++ b/docs/java-rest/high-level/indices/put_mapping.asciidoc @@ -20,7 +20,7 @@ A description of the fields to create on the mapping; if not defined, the mappin -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-source] -------------------------------------------------- -<1> The mapping source provided as a `String` +<1> Mapping source provided as a `String` ==== Providing the mapping source The mapping source can be provided in different ways in addition to diff --git a/docs/java-rest/high-level/indices/rollover.asciidoc b/docs/java-rest/high-level/indices/rollover.asciidoc new file mode 100644 index 0000000000000..85a99eb002b6e --- /dev/null +++ b/docs/java-rest/high-level/indices/rollover.asciidoc @@ -0,0 +1,131 @@ +[[java-rest-high-rollover-index]] +=== Rollover Index API + +[[java-rest-high-rollover-request]] +==== Rollover Request + +The Rollover Index API requires a `RolloverRequest` instance. +A `RolloverRequest` requires two string arguments at construction time, and +one or more conditions that determine when the index has to be rolled over: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request] +-------------------------------------------------- +<1> The alias (first argument) that points to the index to rollover, and +optionally the name of the new index in case the rollover operation is performed +<2> Condition on the age of the index +<3> Condition on the number of documents in the index +<4> Condition on the size of the index + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request-dryRun] +-------------------------------------------------- +<1> Whether the rollover should be performed (default) or only simulated + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request-waitForActiveShards] +-------------------------------------------------- +<1> The number of active shard copies to wait for before the rollover index API +returns a response, as an `int` +<2> The number of active shard copies to wait for before the rollover index API +returns a response, as an `ActiveShardCount` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request-settings] +-------------------------------------------------- +<1> Add the settings to apply to the new index, which include the number of +shards to create for it + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request-mapping] +-------------------------------------------------- +<1> Add the mappings to associate the new index with. See <> +for examples on the different ways to provide mappings + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-request-alias] +-------------------------------------------------- +<1> Add the aliases to associate the new index with + +[[java-rest-high-rollover-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-execute] +-------------------------------------------------- + +[[java-rest-high-rollover-async]] +==== Asynchronous Execution + +The asynchronous execution of a rollover request requires both the `RolloverRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-execute-async] +-------------------------------------------------- +<1> The `RolloverRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `RolloverResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-rollover-response]] +==== Rollover Response + +The returned `RolloverResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[rollover-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request +<2> Indicates whether the requisite number of shard copies were started for +each shard in the index before timing out +<3> The name of the old index, eventually rolled over +<4> The name of the new index +<5> Whether the index has been rolled over +<6> Whether the operation was performed or it was a dry run +<7> The different conditions and whether they were matched or not + + diff --git a/docs/java-rest/high-level/indices/split_index.asciidoc b/docs/java-rest/high-level/indices/split_index.asciidoc index 55081a1da37df..cdf59fd416fc4 100644 --- a/docs/java-rest/high-level/indices/split_index.asciidoc +++ b/docs/java-rest/high-level/indices/split_index.asciidoc @@ -38,22 +38,22 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-reques include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-waitForActiveShards] -------------------------------------------------- <1> The number of active shard copies to wait for before the split index API -returns a response, as an `int`. +returns a response, as an `int` <2> The number of active shard copies to wait for before the split index API -returns a response, as an `ActiveShardCount`. +returns a response, as an `ActiveShardCount` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-settings] -------------------------------------------------- <1> The settings to apply to the target index, which include the number of -shards to create for it. +shards to create for it ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-aliases] -------------------------------------------------- -<1> The aliases to associate the target index with. +<1> The aliases to associate the target index with [[java-rest-high-split-index-sync]] ==== Synchronous Execution diff --git a/docs/java-rest/high-level/indices/update_aliases.asciidoc b/docs/java-rest/high-level/indices/update_aliases.asciidoc index 304613a92850a..05c12e64911e3 100644 --- a/docs/java-rest/high-level/indices/update_aliases.asciidoc +++ b/docs/java-rest/high-level/indices/update_aliases.asciidoc @@ -1,10 +1,10 @@ [[java-rest-high-update-aliases]] -=== Update Aliases API +=== Index Aliases API [[java-rest-high-update-aliases-request]] ==== Indices Aliases Request -The Update Aliases API allows aliasing an index with a name, with all APIs +The Index Aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An `IndicesAliasesRequest` must have at least one `AliasActions`: diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 14e4351eb29bd..9269da0923a56 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -52,6 +52,7 @@ Index Management:: * <> * <> * <> +* <> Mapping Management:: * <> @@ -67,7 +68,15 @@ include::indices/open_index.asciidoc[] include::indices/close_index.asciidoc[] include::indices/shrink_index.asciidoc[] include::indices/split_index.asciidoc[] +include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] include::indices/exists_alias.asciidoc[] +== Cluster APIs + +The Java High Level REST Client supports the following Cluster APIs: + +* <> + +include::cluster/put_settings.asciidoc[] diff --git a/docs/java-rest/low-level/sniffer.asciidoc b/docs/java-rest/low-level/sniffer.asciidoc index df772643bf4dc..4f846847615ea 100644 --- a/docs/java-rest/low-level/sniffer.asciidoc +++ b/docs/java-rest/low-level/sniffer.asciidoc @@ -21,6 +21,8 @@ released with `5.0.0-alpha4`. There is no relation between the sniffer version and the Elasticsearch version that the client can communicate with. Sniffer supports fetching the nodes list from Elasticsearch 2.x and onwards. +If you are looking for a SNAPSHOT version, the Elastic Maven Snapshot repository is available +at https://snapshots.elastic.co/maven/. ==== Maven configuration @@ -131,4 +133,4 @@ than from Elasticsearch: -------------------------------------------------- include-tagged::{doc-tests}/SnifferDocumentation.java[custom-hosts-sniffer] -------------------------------------------------- -<1> Fetch the hosts from the external source \ No newline at end of file +<1> Fetch the hosts from the external source diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 6e25c506acb7c..db27b886b4178 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -22,6 +22,9 @@ released with `5.0.0-alpha4`. There is no relation between the client version and the Elasticsearch version that the client can communicate with. The low-level REST client is compatible with all Elasticsearch versions. +If you are looking for a SNAPSHOT version, the Elastic Maven Snapshot repository is available +at https://snapshots.elastic.co/maven/. + [[java-rest-low-usage-maven-maven]] ==== Maven configuration diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index 059d9a5bf6817..b89ac903592ca 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -20,18 +20,13 @@ These examples provide the bare bones needed to get started. For more information about how to write a plugin, we recommend looking at the plugins listed in this documentation for inspiration. -[float] -=== Plugin Structure - -All plugin files must be contained in a directory called `elasticsearch`. - [float] === Plugin descriptor file -All plugins must contain a file called `plugin-descriptor.properties` in the folder named `elasticsearch`. +All plugins must contain a file called `plugin-descriptor.properties`. The format for this file is described in detail in this example: -["source","properties",subs="attributes"] +["source","properties"] -------------------------------------------------- include::{plugin-properties-files}/plugin-descriptor.properties[] -------------------------------------------------- @@ -63,7 +58,7 @@ of nonnegative decimal integers separated by "."'s and may have leading zeros. |======================================================================= -Note that only jar files in the 'elasticsearch' directory are added to the classpath for the plugin! +Note that only jar files at the root of the plugin are added to the classpath for the plugin! If you need other resources, package them into a resources jar. [IMPORTANT] diff --git a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc index 0265968d58a04..4ca9c849b9b61 100644 --- a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc @@ -53,13 +53,13 @@ percentiles: `[ 1, 5, 25, 50, 75, 95, 99 ]`. The response will look like this: "aggregations": { "load_time_outlier": { "values" : { - "1.0": 9.9, - "5.0": 29.500000000000004, - "25.0": 167.5, + "1.0": 5.0, + "5.0": 25.0, + "25.0": 165.0, "50.0": 445.0, - "75.0": 722.5, - "95.0": 940.5, - "99.0": 980.1000000000001 + "75.0": 725.0, + "95.0": 945.0, + "99.0": 985.0 } } } @@ -69,8 +69,8 @@ percentiles: `[ 1, 5, 25, 50, 75, 95, 99 ]`. The response will look like this: As you can see, the aggregation will return a calculated value for each percentile in the default range. If we assume response times are in milliseconds, it is -immediately obvious that the webpage normally loads in 15-30ms, but occasionally -spikes to 60-150ms. +immediately obvious that the webpage normally loads in 10-723ms, but occasionally +spikes to 941-980ms. Often, administrators are only interested in outliers -- the extreme percentiles. We can specify just the percents we are interested in (requested percentiles @@ -129,15 +129,15 @@ Response: "values": [ { "key": 1.0, - "value": 9.9 + "value": 5.0 }, { "key": 5.0, - "value": 29.500000000000004 + "value": 25.0 }, { "key": 25.0, - "value": 167.5 + "value": 165.0 }, { "key": 50.0, @@ -145,15 +145,15 @@ Response: }, { "key": 75.0, - "value": 722.5 + "value": 725.0 }, { "key": 95.0, - "value": 940.5 + "value": 945.0 }, { "key": 99.0, - "value": 980.1000000000001 + "value": 985.0 } ] } diff --git a/docs/reference/analysis/normalizers.asciidoc b/docs/reference/analysis/normalizers.asciidoc index 4f2b08e6a6174..e4bd710900c62 100644 --- a/docs/reference/analysis/normalizers.asciidoc +++ b/docs/reference/analysis/normalizers.asciidoc @@ -8,7 +8,12 @@ token. As a consequence, they do not have a tokenizer and only accept a subset of the available char filters and token filters. Only the filters that work on a per-character basis are allowed. For instance a lowercasing filter would be allowed, but not a stemming filter, which needs to look at the keyword as a -whole. +whole. The current list of filters that can be used in a normalizer is +following: `arabic_normalization`, `asciifolding`, `bengali_normalization`, +`cjk_width`, `decimal_digit`, `elision`, `german_normalization`, +`hindi_normalization`, `indic_normalization`, `lowercase`, +`persian_normalization`, `scandinavian_folding`, `serbian_normalization`, +`sorani_normalization`, `uppercase`. [float] === Custom normalizers diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 31e0bf61707e3..3dff5abc52d9a 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -55,7 +55,7 @@ GET /_cat/master?help -------------------------------------------------- // CONSOLE -Might respond respond with: +Might respond with: [source,txt] -------------------------------------------------- @@ -66,6 +66,11 @@ node | n | node name -------------------------------------------------- // TESTRESPONSE[s/[|]/[|]/ _cat] +NOTE: `help` is not supported if any optional url parameter is used. +For example `GET _cat/shards/twitter?help` or `GET _cat/indices/twi*?help` +results in an error. Use `GET _cat/shards?help` or `GET _cat/indices?help` +instead. + [float] [[headers]] === Headers diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 40c02cf35aa09..ec25d27d2535f 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -62,7 +62,11 @@ of `indices`, `os`, `process`, `jvm`, `transport`, `http`, Statistics about the discovery `ingest`:: - Statistics about ingest preprocessing + Statistics about ingest preprocessing + +`adaptive_selection`:: + Statistics about <>. See + <>. [source,js] -------------------------------------------------- @@ -370,15 +374,43 @@ GET /_nodes/stats/indices?groups=foo,bar The `ingest` flag can be set to retrieve statistics that concern ingest: `ingest.total.count`:: - The total number of document ingested during the lifetime of this node + The total number of document ingested during the lifetime of this node `ingest.total.time_in_millis`:: - The total time spent on ingest preprocessing documents during the lifetime of this node + The total time spent on ingest preprocessing documents during the lifetime of this node `ingest.total.current`:: - The total number of documents currently being ingested. + The total number of documents currently being ingested. `ingest.total.failed`:: - The total number ingest preprocessing operations failed during the lifetime of this node + The total number ingest preprocessing operations failed during the lifetime of this node On top of these overall ingest statistics, these statistics are also provided on a per pipeline basis. + +[float] +[[adaptive-selection-stats]] +=== Adaptive selection statistics + +The `adaptive_selection` flag can be set to retrieve statistics that concern +<>. These statistics are +keyed by node. For each node: + +`adaptive_selection.outgoing_searches`:: + The number of outstanding search requests from the node these stats are for to + the keyed node. + +`avg_queue_size`:: + The exponentially weighted moving average queue size of search requests on the + keyed node. + +`avg_service_time_ns`:: + The exponentially weighted moving average service time of search requests on + the keyed node. + +`avg_response_time_ns`:: + The exponentially weighted moving average response time of search requests on + the keyed node. + +`rank`:: + The rank of this node; used for shard selection when routing search requests. + diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index 81c4bc306b2a0..7bdac42f869a7 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -3,7 +3,7 @@ The get API allows to get a typed JSON document from the index based on its id. The following example gets a JSON document from an index called -twitter, under a type called tweet, with id valued 0: +twitter, under a type called _doc, with id valued 0: [source,js] -------------------------------------------------- diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 0c75fd011b418..e172b53f1a83c 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -98,7 +98,7 @@ which returns something similar to: "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", "local_checkpoint" : "-1", - "translog_generation" : "1", + "translog_generation" : "2", "max_seq_no" : "-1", "sync_id" : "AVvFY-071siAOuFGEO9P", <1> "max_unsafe_auto_id_timestamp" : "-1" diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 60d2c8c46da21..7d92bb3b2e7c7 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -6,7 +6,7 @@ Fields of type `geo_point` accept latitude-longitude pairs, which can be used: * to find geo-points within a <>, within a certain <> of a central point, or within a <>. -* to aggregate documents by <> +* to aggregate documents <> or by <> from a central point. * to integrate distance into a document's <>. * to <> documents by distance. diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 0b8db376e861a..804fb1c65080a 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -184,7 +184,7 @@ The following parameters are accepted by `nested` fields: Because nested documents are indexed as separate documents, they can only be accessed within the scope of the `nested` query, the -`nested`/`reverse_nested`, or <>. +`nested`/`reverse_nested` aggregations, or <>. For instance, if a string field within a nested document has <> set to `offsets` to allow use of the postings diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index ee34707bf9b90..984bef0a3cc3c 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -120,7 +120,7 @@ The `auto_queue_frame_size` setting controls the number of operations during which measurement is taken before the queue is adjusted. It should be large enough that a single operation cannot unduly bias the calculation. -The `target_response_rate` is a time value setting that indicates the targeted +The `target_response_time` is a time value setting that indicates the targeted average response time for tasks in the thread pool queue. If tasks are routinely above this time, the thread pool queue will be adjusted down so that tasks are rejected. @@ -134,7 +134,7 @@ thread_pool: min_queue_size: 10 max_queue_size: 1000 auto_queue_frame_size: 2000 - target_response_rate: 1s + target_response_time: 1s -------------------------------------------------- [float] diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc index 976948edbddce..ca23afb0fae26 100644 --- a/docs/reference/query-dsl/has-child-query.asciidoc +++ b/docs/reference/query-dsl/has-child-query.asciidoc @@ -27,9 +27,9 @@ Note that the `has_child` is a slow query compared to other queries in the query dsl due to the fact that it performs a join. The performance degrades as the number of matching child documents pointing to unique parent documents increases. If you care about query performance you should not use this query. -However if you do happen to use this query then use it as less as possible. Each -`has_child` query that gets added to a search request can increase query time -significantly. +However if you do happen to use this query then use it as little as possible. +Each `has_child` query that gets added to a search request can increase query +time significantly. [float] ==== Scoring capabilities diff --git a/docs/reference/query-dsl/terms-query.asciidoc b/docs/reference/query-dsl/terms-query.asciidoc index 48d80cb05b174..c0e94900d7d82 100644 --- a/docs/reference/query-dsl/terms-query.asciidoc +++ b/docs/reference/query-dsl/terms-query.asciidoc @@ -93,7 +93,7 @@ GET /tweets/_search "terms" : { "user" : { "index" : "users", - "type" : "user", + "type" : "_doc", "id" : "2", "path" : "followers" } diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 6e562731fac00..2a51d705d83ec 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -120,6 +120,12 @@ all clients support GET with body, POST is allowed as well. [float] === Fast check for any matching docs +NOTE: `terminate_after` is always applied **after** the `post_filter` and stops + the query as well as the aggregation executions when enough hits have been + collected on the shard. Though the doc count on aggregations may not reflect + the `hits.total` in the response since aggregations are applied **before** the + post filtering. + In case we only want to know if there are any documents matching a specific query, we can set the `size` to `0` to indicate that we are not interested in the search results. Also we can set `terminate_after` to `1` @@ -128,7 +134,7 @@ matching document was found (per shard). [source,js] -------------------------------------------------- -GET /_search?q=message:elasticsearch&size=0&terminate_after=1 +GET /_search?q=message:number&size=0&terminate_after=1 -------------------------------------------------- // CONSOLE // TEST[setup:twitter] diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 0b6f5fb4d4046..57819d177a534 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -70,7 +70,7 @@ to reject search requests that hit too many shards. The search's `max_concurrent_shard_requests` request parameter can be used to control the maximum number of concurrent shard requests the search API will execute for this request. -This parameter should be used to protect a singe request from overloading a cluster ie. a default +This parameter should be used to protect a single request from overloading a cluster ie. a default request will hit all indices in a cluster which could cause shard request rejections if the number of shards per node is high. This default is based on the number of data nodes in -the cluster but at most `256`. \ No newline at end of file +the cluster but at most `256`. diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index e5e7929d5b8ff..cbeb10c8c8571 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -166,8 +166,7 @@ the two VMs can be substantial. The client JVM check ensures that Elasticsearch is not running inside the client JVM. To pass the client JVM check, you must start Elasticsearch with the server VM. On modern systems and operating systems, the server VM is the -default. Additionally, Elasticsearch is configured by default to force -the server VM. +default. === Use serial collector check diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 748d411f3a52c..0c186bbd80a41 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -196,6 +196,14 @@ command line, or when installing Elasticsearch as a service for the first time. To adjust the heap size for an already installed service, use the service manager: `bin\elasticsearch-service.bat manager`. +NOTE: The service automatically configures a private temporary directory for use +by Elasticsearch when it is running. This private temporary directory is +configured as a sub-directory of the private temporary directory for the user +running the installation. If the service will run under a different user, you +can configure the location of the temporary directory that the service should +use by setting the environment variable `ES_TMPDIR` to the preferred location +before you execute the service installation. + Using the Manager GUI:: It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `elasticsearch-service.bat manager` from the command-line will open up the manager window: @@ -258,11 +266,6 @@ directory so that you do not delete important data later on. d| Not configured | path.repo -| script - | Location of script files. - | %ES_HOME%\scripts - | path.scripts - |======================================================================= include::next-steps.asciidoc[] \ No newline at end of file diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 7989f14eaada3..a8f2f51dad2aa 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -94,13 +94,19 @@ reenable allocation. ------------------------------------------------------ PUT _cluster/settings { - "persistent": { + "transient": { "cluster.routing.allocation.enable": "all" } } ------------------------------------------------------ // CONSOLE +NOTE: Because <<_precedence_of_settings, transient +settings take precedence over persistent settings>>, this overrides the +persistent setting used to disable shard allocation in the first step. If you +don't explicitly reenable shard allocation after a full cluster restart, the +persistent setting is used and shard allocation remains disabled. + Once allocation is reenabled, the cluster starts allocating replica shards to the data nodes. At this point it is safe to resume indexing and searching, but your cluster will recover more quickly if you can wait until all primary diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index f52f354b80b5c..2b46b65f2617f 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -69,6 +69,12 @@ GET _cat/nodes + -- +NOTE: Because <<_precedence_of_settings, transient +settings take precedence over persistent settings>>, this overrides the +persistent setting used to disable shard allocation in the first step. If you +don't explicitly reenable shard allocation after a full cluster restart, the +persistent setting is used and shard allocation remains disabled. + Once the node has joined the cluster, reenable shard allocation to start using the node: @@ -156,4 +162,4 @@ In the unlikely case of a network malfunction during the upgrade process that isolates all remaining old nodes from the cluster, you must take the old nodes offline and upgrade them to enable them to join the cluster. -==================================================== \ No newline at end of file +==================================================== diff --git a/libs/elasticsearch-core/build.gradle b/libs/elasticsearch-core/build.gradle index 4cbee03649bb7..dea5664a14fd1 100644 --- a/libs/elasticsearch-core/build.gradle +++ b/libs/elasticsearch-core/build.gradle @@ -35,8 +35,6 @@ publishing { } dependencies { - compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" - testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" @@ -78,4 +76,4 @@ thirdPartyAudit.excludes = [ 'org/osgi/framework/SynchronousBundleListener', 'org/osgi/framework/wiring/BundleWire', 'org/osgi/framework/wiring/BundleWiring' -] \ No newline at end of file +] diff --git a/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 b/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 deleted file mode 100644 index e1a89fadfed95..0000000000000 --- a/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt b/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt deleted file mode 100644 index 6279e5206de13..0000000000000 --- a/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 1999-2005 The Apache Software Foundation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt b/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt deleted file mode 100644 index 0375732360047..0000000000000 --- a/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Apache log4j -Copyright 2007 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 0e5c9597b7ec8..e171daeb79b85 100644 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -19,10 +19,8 @@ package org.elasticsearch.bootstrap; -import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.net.MalformedURLException; @@ -43,6 +41,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.function.Consumer; import java.util.jar.JarEntry; import java.util.jar.JarFile; import java.util.jar.Manifest; @@ -68,25 +67,23 @@ private JarHell() {} @SuppressForbidden(reason = "command line tool") public static void main(String args[]) throws Exception { System.out.println("checking for jar hell..."); - checkJarHell(); + checkJarHell(System.out::println); System.out.println("no jar hell found"); } /** * Checks the current classpath for duplicate classes + * @param output A {@link String} {@link Consumer} to which debug output will be sent * @throws IllegalStateException if jar hell was found */ - public static void checkJarHell() throws IOException, URISyntaxException { + public static void checkJarHell(Consumer output) throws IOException, URISyntaxException { ClassLoader loader = JarHell.class.getClassLoader(); - Logger logger = Loggers.getLogger(JarHell.class); - if (logger.isDebugEnabled()) { - logger.debug("java.class.path: {}", System.getProperty("java.class.path")); - logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path")); - if (loader instanceof URLClassLoader ) { - logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs())); - } + output.accept("java.class.path: " + System.getProperty("java.class.path")); + output.accept("sun.boot.class.path: " + System.getProperty("sun.boot.class.path")); + if (loader instanceof URLClassLoader) { + output.accept("classloader urls: " + Arrays.toString(((URLClassLoader)loader).getURLs())); } - checkJarHell(parseClassPath()); + checkJarHell(parseClassPath(), output); } /** @@ -152,23 +149,24 @@ static Set parseClassPath(String classPath) { /** * Checks the set of URLs for duplicate classes + * @param urls A set of URLs from the classpath to be checked for conflicting jars + * @param output A {@link String} {@link Consumer} to which debug output will be sent * @throws IllegalStateException if jar hell was found */ @SuppressForbidden(reason = "needs JarFile for speed, just reading entries") - public static void checkJarHell(Set urls) throws URISyntaxException, IOException { - Logger logger = Loggers.getLogger(JarHell.class); + public static void checkJarHell(Set urls, Consumer output) throws URISyntaxException, IOException { // we don't try to be sneaky and use deprecated/internal/not portable stuff // like sun.boot.class.path, and with jigsaw we don't yet have a way to get // a "list" at all. So just exclude any elements underneath the java home String javaHome = System.getProperty("java.home"); - logger.debug("java.home: {}", javaHome); + output.accept("java.home: " + javaHome); final Map clazzes = new HashMap<>(32768); Set seenJars = new HashSet<>(); for (final URL url : urls) { final Path path = PathUtils.get(url.toURI()); // exclude system resources if (path.startsWith(javaHome)) { - logger.debug("excluding system resource: {}", path); + output.accept("excluding system resource: " + path); continue; } if (path.toString().endsWith(".jar")) { @@ -176,7 +174,7 @@ public static void checkJarHell(Set urls) throws URISyntaxException, IOExce throw new IllegalStateException("jar hell!" + System.lineSeparator() + "duplicate jar on classpath: " + path); } - logger.debug("examining jar: {}", path); + output.accept("examining jar: " + path); try (JarFile file = new JarFile(path.toString())) { Manifest manifest = file.getManifest(); if (manifest != null) { @@ -194,7 +192,7 @@ public static void checkJarHell(Set urls) throws URISyntaxException, IOExce } } } else { - logger.debug("examining directory: {}", path); + output.accept("examining directory: " + path); // case for tests: where we have class files in the classpath final Path root = PathUtils.get(url.toURI()); final String sep = root.getFileSystem().getSeparator(); diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java deleted file mode 100644 index 89073bdce54c4..0000000000000 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.Logger; - -public class Loggers { - - public static final String SPACE = " "; - - public static Logger getLogger(Logger parentLogger, String s) { - assert parentLogger instanceof PrefixLogger; - return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); - } - - public static Logger getLogger(String s) { - return ESLoggerFactory.getLogger(s); - } - - public static Logger getLogger(Class clazz) { - return ESLoggerFactory.getLogger(clazz); - } - - public static Logger getLogger(Class clazz, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); - } - - public static Logger getLogger(String name, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); - } - - private static String formatPrefix(String... prefixes) { - String prefix = null; - if (prefixes != null && prefixes.length > 0) { - StringBuilder sb = new StringBuilder(); - for (String prefixX : prefixes) { - if (prefixX != null) { - if (prefixX.equals(SPACE)) { - sb.append(" "); - } else { - sb.append("[").append(prefixX).append("]"); - } - } - } - if (sb.length() > 0) { - sb.append(" "); - prefix = sb.toString(); - } - } - return prefix; - } -} diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index b3dee0b004584..e58268ef19251 100644 --- a/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -66,7 +66,7 @@ public void testDifferentJars() throws Exception { Set jars = asSet(makeJar(dir, "foo.jar", null, "DuplicateClass.class"), makeJar(dir, "bar.jar", null, "DuplicateClass.class")); try { - JarHell.checkJarHell(jars); + JarHell.checkJarHell(jars, logger::debug); fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); @@ -82,7 +82,7 @@ public void testDirsOnClasspath() throws Exception { Set dirs = asSet(makeFile(dir1, "DuplicateClass.class"), makeFile(dir2, "DuplicateClass.class")); try { - JarHell.checkJarHell(dirs); + JarHell.checkJarHell(dirs, logger::debug); fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); @@ -98,7 +98,7 @@ public void testDirAndJar() throws Exception { Set dirs = asSet(makeJar(dir1, "foo.jar", null, "DuplicateClass.class"), makeFile(dir2, "DuplicateClass.class")); try { - JarHell.checkJarHell(dirs); + JarHell.checkJarHell(dirs, logger::debug); fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); @@ -113,7 +113,7 @@ public void testWithinSingleJar() throws Exception { // this bogus jar had to be with https://github.com/jasontedor/duplicate-classes Set jars = Collections.singleton(JarHellTests.class.getResource("duplicate-classes.jar")); try { - JarHell.checkJarHell(jars); + JarHell.checkJarHell(jars, logger::debug); fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); @@ -125,7 +125,7 @@ public void testWithinSingleJar() throws Exception { public void testXmlBeansLeniency() throws Exception { Set jars = Collections.singleton(JarHellTests.class.getResource("duplicate-xmlbeans-classes.jar")); - JarHell.checkJarHell(jars); + JarHell.checkJarHell(jars, logger::debug); } public void testRequiredJDKVersionTooOld() throws Exception { @@ -144,7 +144,7 @@ public void testRequiredJDKVersionTooOld() throws Exception { attributes.put(new Attributes.Name("X-Compile-Target-JDK"), targetVersion.toString()); Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); try { - JarHell.checkJarHell(jars); + JarHell.checkJarHell(jars, logger::debug); fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("requires Java " + targetVersion.toString())); @@ -160,7 +160,7 @@ public void testBadJDKVersionInJar() throws Exception { attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "bogus"); Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); try { - JarHell.checkJarHell(jars); + JarHell.checkJarHell(jars, logger::debug); fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated " + @@ -175,7 +175,7 @@ public void testRequiredJDKVersionIsOK() throws Exception { attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "1.7"); Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); - JarHell.checkJarHell(jars); + JarHell.checkJarHell(jars, logger::debug); } public void testValidVersions() { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java index 2de48fb8899e2..acfec6ca04e3e 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java @@ -106,7 +106,7 @@ public void queueWrite(WriteOperation writeOperation) { if (isOpen() == false) { boolean wasRemoved = queuedWrites.remove(writeOperation); if (wasRemoved) { - executeFailedListener(writeOperation.getListener(), new ClosedSelectorException()); + writeOperation.getListener().accept(null, new ClosedSelectorException()); } } else { wakeup(); diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle new file mode 100644 index 0000000000000..c91312cf9e8af --- /dev/null +++ b/libs/grok/build.gradle @@ -0,0 +1,60 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' + +archivesBaseName = 'elasticsearch-grok' + +dependencies { + compile 'org.jruby.joni:joni:2.1.6' + // joni dependencies: + compile 'org.jruby.jcodings:jcodings:1.0.12' + + if (isEclipse == false || project.path == ":libs:grok-tests") { + testCompile("org.elasticsearch.test:framework:${version}") { + exclude group: 'org.elasticsearch', module: 'grok' + } + } +} + +forbiddenApisMain { + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:grok") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + +thirdPartyAudit.excludes = [ + // joni has AsmCompilerSupport, but that isn't being used: + 'org.objectweb.asm.ClassWriter', + 'org.objectweb.asm.MethodVisitor', + 'org.objectweb.asm.Opcodes', +] diff --git a/modules/ingest-common/licenses/jcodings-1.0.12.jar.sha1 b/libs/grok/licenses/jcodings-1.0.12.jar.sha1 similarity index 100% rename from modules/ingest-common/licenses/jcodings-1.0.12.jar.sha1 rename to libs/grok/licenses/jcodings-1.0.12.jar.sha1 diff --git a/modules/ingest-common/licenses/jcodings-LICENSE.txt b/libs/grok/licenses/jcodings-LICENSE.txt similarity index 100% rename from modules/ingest-common/licenses/jcodings-LICENSE.txt rename to libs/grok/licenses/jcodings-LICENSE.txt diff --git a/modules/ingest-common/licenses/jcodings-NOTICE.txt b/libs/grok/licenses/jcodings-NOTICE.txt similarity index 100% rename from modules/ingest-common/licenses/jcodings-NOTICE.txt rename to libs/grok/licenses/jcodings-NOTICE.txt diff --git a/modules/ingest-common/licenses/joni-2.1.6.jar.sha1 b/libs/grok/licenses/joni-2.1.6.jar.sha1 similarity index 100% rename from modules/ingest-common/licenses/joni-2.1.6.jar.sha1 rename to libs/grok/licenses/joni-2.1.6.jar.sha1 diff --git a/modules/ingest-common/licenses/joni-LICENSE.txt b/libs/grok/licenses/joni-LICENSE.txt similarity index 100% rename from modules/ingest-common/licenses/joni-LICENSE.txt rename to libs/grok/licenses/joni-LICENSE.txt diff --git a/modules/ingest-common/licenses/joni-NOTICE.txt b/libs/grok/licenses/joni-NOTICE.txt similarity index 100% rename from modules/ingest-common/licenses/joni-NOTICE.txt rename to libs/grok/licenses/joni-NOTICE.txt diff --git a/libs/grok/src/main/eclipse-build.gradle b/libs/grok/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..3188c7aff01f7 --- /dev/null +++ b/libs/grok/src/main/eclipse-build.gradle @@ -0,0 +1,3 @@ + +// this is just shell gradle file for eclipse to have separate projects for grok src and tests +apply from: '../../build.gradle' diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java similarity index 77% rename from modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Grok.java rename to libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 576a3b85eb31c..4cbeb84806089 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.ingest.common; +package org.elasticsearch.grok; import org.jcodings.specific.UTF8Encoding; import org.joni.Matcher; @@ -28,13 +28,19 @@ import org.joni.Syntax; import org.joni.exception.ValueException; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Iterator; import java.util.Locale; import java.util.Map; +import java.util.Collections; -final class Grok { +public final class Grok { private static final String NAME_GROUP = "name"; private static final String SUBNAME_GROUP = "subname"; @@ -54,13 +60,24 @@ final class Grok { ")?" + "\\}"; private static final Regex GROK_PATTERN_REGEX = new Regex(GROK_PATTERN.getBytes(StandardCharsets.UTF_8), 0, GROK_PATTERN.getBytes(StandardCharsets.UTF_8).length, Option.NONE, UTF8Encoding.INSTANCE, Syntax.DEFAULT); + + private static final Map builtinPatterns; + + static { + try { + builtinPatterns = loadBuiltinPatterns(); + } catch (IOException e) { + throw new UncheckedIOException("unable to load built-in grok patterns", e); + } + } + private final Map patternBank; private final boolean namedCaptures; private final Regex compiledExpression; private final String expression; - Grok(Map patternBank, String grokPattern) { + public Grok(Map patternBank, String grokPattern) { this(patternBank, grokPattern, true); } @@ -176,5 +193,42 @@ public Map captures(String text) { } return null; } + + public static Map getBuiltinPatterns() { + return builtinPatterns; + } + + private static Map loadBuiltinPatterns() throws IOException { + // Code for loading built-in grok patterns packaged with the jar file: + String[] PATTERN_NAMES = new String[] { + "aws", "bacula", "bro", "exim", "firewalls", "grok-patterns", "haproxy", + "java", "junos", "linux-syslog", "mcollective-patterns", "mongodb", "nagios", + "postgresql", "rails", "redis", "ruby" + }; + Map builtinPatterns = new HashMap<>(); + for (String pattern : PATTERN_NAMES) { + try(InputStream is = Grok.class.getResourceAsStream("/patterns/" + pattern)) { + loadPatterns(builtinPatterns, is); + } + } + return Collections.unmodifiableMap(builtinPatterns); + } + + private static void loadPatterns(Map patternBank, InputStream inputStream) throws IOException { + String line; + BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); + while ((line = br.readLine()) != null) { + String trimmedLine = line.replaceAll("^\\s+", ""); + if (trimmedLine.startsWith("#") || trimmedLine.length() == 0) { + continue; + } + + String[] parts = trimmedLine.split("\\s+", 2); + if (parts.length == 2) { + patternBank.put(parts[0], parts[1]); + } + } + } + } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokMatchGroup.java b/libs/grok/src/main/java/org/elasticsearch/grok/GrokMatchGroup.java similarity index 98% rename from modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokMatchGroup.java rename to libs/grok/src/main/java/org/elasticsearch/grok/GrokMatchGroup.java index 6ddb8d07e7658..43bf16a18b76e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokMatchGroup.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/GrokMatchGroup.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.ingest.common; +package org.elasticsearch.grok; final class GrokMatchGroup { private static final String DEFAULT_TYPE = "string"; diff --git a/modules/ingest-common/src/main/resources/patterns/aws b/libs/grok/src/main/resources/patterns/aws similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/aws rename to libs/grok/src/main/resources/patterns/aws diff --git a/modules/ingest-common/src/main/resources/patterns/bacula b/libs/grok/src/main/resources/patterns/bacula similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/bacula rename to libs/grok/src/main/resources/patterns/bacula diff --git a/modules/ingest-common/src/main/resources/patterns/bro b/libs/grok/src/main/resources/patterns/bro similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/bro rename to libs/grok/src/main/resources/patterns/bro diff --git a/modules/ingest-common/src/main/resources/patterns/exim b/libs/grok/src/main/resources/patterns/exim similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/exim rename to libs/grok/src/main/resources/patterns/exim diff --git a/modules/ingest-common/src/main/resources/patterns/firewalls b/libs/grok/src/main/resources/patterns/firewalls similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/firewalls rename to libs/grok/src/main/resources/patterns/firewalls diff --git a/modules/ingest-common/src/main/resources/patterns/grok-patterns b/libs/grok/src/main/resources/patterns/grok-patterns similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/grok-patterns rename to libs/grok/src/main/resources/patterns/grok-patterns diff --git a/modules/ingest-common/src/main/resources/patterns/haproxy b/libs/grok/src/main/resources/patterns/haproxy similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/haproxy rename to libs/grok/src/main/resources/patterns/haproxy diff --git a/modules/ingest-common/src/main/resources/patterns/java b/libs/grok/src/main/resources/patterns/java similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/java rename to libs/grok/src/main/resources/patterns/java diff --git a/modules/ingest-common/src/main/resources/patterns/junos b/libs/grok/src/main/resources/patterns/junos similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/junos rename to libs/grok/src/main/resources/patterns/junos diff --git a/modules/ingest-common/src/main/resources/patterns/linux-syslog b/libs/grok/src/main/resources/patterns/linux-syslog similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/linux-syslog rename to libs/grok/src/main/resources/patterns/linux-syslog diff --git a/modules/ingest-common/src/main/resources/patterns/mcollective-patterns b/libs/grok/src/main/resources/patterns/mcollective-patterns similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/mcollective-patterns rename to libs/grok/src/main/resources/patterns/mcollective-patterns diff --git a/modules/ingest-common/src/main/resources/patterns/mongodb b/libs/grok/src/main/resources/patterns/mongodb similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/mongodb rename to libs/grok/src/main/resources/patterns/mongodb diff --git a/modules/ingest-common/src/main/resources/patterns/nagios b/libs/grok/src/main/resources/patterns/nagios similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/nagios rename to libs/grok/src/main/resources/patterns/nagios diff --git a/modules/ingest-common/src/main/resources/patterns/postgresql b/libs/grok/src/main/resources/patterns/postgresql similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/postgresql rename to libs/grok/src/main/resources/patterns/postgresql diff --git a/modules/ingest-common/src/main/resources/patterns/rails b/libs/grok/src/main/resources/patterns/rails similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/rails rename to libs/grok/src/main/resources/patterns/rails diff --git a/modules/ingest-common/src/main/resources/patterns/redis b/libs/grok/src/main/resources/patterns/redis similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/redis rename to libs/grok/src/main/resources/patterns/redis diff --git a/modules/ingest-common/src/main/resources/patterns/ruby b/libs/grok/src/main/resources/patterns/ruby similarity index 100% rename from modules/ingest-common/src/main/resources/patterns/ruby rename to libs/grok/src/main/resources/patterns/ruby diff --git a/libs/grok/src/test/eclipse-build.gradle b/libs/grok/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..c5d791c166324 --- /dev/null +++ b/libs/grok/src/test/eclipse-build.gradle @@ -0,0 +1,7 @@ + +// this is just shell gradle file for eclipse to have separate projects for grok src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:grok') +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java similarity index 99% rename from modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokTests.java rename to libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java index 2c2c0f29f83d0..931842d9f247f 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.ingest.common; +package org.elasticsearch.grok; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -38,8 +38,8 @@ public class GrokTests extends ESTestCase { private Map basePatterns; @Before - public void setup() throws IOException { - basePatterns = IngestCommonPlugin.loadBuiltinPatterns(); + public void setup() { + basePatterns = Grok.getBuiltinPatterns(); } public void testMatchWithoutCaptures() { diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 6d93f663116b4..424c1197da3e5 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -23,17 +23,8 @@ esplugin { } dependencies { - compile 'org.jruby.joni:joni:2.1.6' - // joni dependencies: - compile 'org.jruby.jcodings:jcodings:1.0.12' + compile project(':libs:grok') } compileJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" compileTestJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" - -thirdPartyAudit.excludes = [ - // joni has AsmCompilerSupport, but that isn't being used: - 'org.objectweb.asm.ClassWriter', - 'org.objectweb.asm.MethodVisitor', - 'org.objectweb.asm.Opcodes', -] \ No newline at end of file diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java index f8eb49b923989..8d1d2127e7213 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.common; +import org.elasticsearch.grok.Grok; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; @@ -27,7 +28,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; @@ -42,7 +42,7 @@ public final class GrokProcessor extends AbstractProcessor { private final boolean traceMatch; private final boolean ignoreMissing; - public GrokProcessor(String tag, Map patternBank, List matchPatterns, String matchField, + GrokProcessor(String tag, Map patternBank, List matchPatterns, String matchField, boolean traceMatch, boolean ignoreMissing) { super(tag); this.matchField = matchField; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 45ffcc53a4d3d..c68f498c0eaf1 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -23,9 +23,9 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -85,7 +85,7 @@ public RequestBuilder(ElasticsearchClient client) { } } - public static class Response extends AcknowledgedResponse implements ToXContentObject { + public static class Response extends ActionResponse implements ToXContentObject { private Map grokPatterns; public Response(Map grokPatterns) { diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index 0182e290d72b4..a29c994f10d37 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -19,19 +19,6 @@ package org.elasticsearch.ingest.common; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.UncheckedIOException; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -40,6 +27,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.grok.Grok; import org.elasticsearch.ingest.Processor; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.IngestPlugin; @@ -47,24 +35,18 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPlugin { - // Code for loading built-in grok patterns packaged with the jar file: - private static final String[] PATTERN_NAMES = new String[] { - "aws", "bacula", "bro", "exim", "firewalls", "grok-patterns", "haproxy", - "java", "junos", "linux-syslog", "mcollective-patterns", "mongodb", "nagios", - "postgresql", "rails", "redis", "ruby" - }; - static final Map GROK_PATTERNS; - static { - try { - GROK_PATTERNS = loadBuiltinPatterns(); - } catch (IOException e) { - throw new UncheckedIOException("unable to load built-in grok patterns", e); - } - } + static final Map GROK_PATTERNS = Grok.getBuiltinPatterns(); - public IngestCommonPlugin() throws IOException { + public IngestCommonPlugin() { } @Override @@ -108,30 +90,4 @@ public List getRestHandlers(Settings settings, RestController restC return Arrays.asList(new GrokProcessorGetAction.RestAction(settings, restController)); } - - public static Map loadBuiltinPatterns() throws IOException { - Map builtinPatterns = new HashMap<>(); - for (String pattern : PATTERN_NAMES) { - try(InputStream is = IngestCommonPlugin.class.getResourceAsStream("/patterns/" + pattern)) { - loadPatterns(builtinPatterns, is); - } - } - return Collections.unmodifiableMap(builtinPatterns); - } - - private static void loadPatterns(Map patternBank, InputStream inputStream) throws IOException { - String line; - BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); - while ((line = br.readLine()) != null) { - String trimmedLine = line.replaceAll("^\\s+", ""); - if (trimmedLine.startsWith("#") || trimmedLine.length() == 0) { - continue; - } - - String[] parts = trimmedLine.split("\\s+", 2); - if (parts.length == 2) { - patternBank.put(parts[0], parts[1]); - } - } - } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index 8ca31787b5aaa..de726ddb7fbbb 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -24,6 +24,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -76,7 +77,8 @@ boolean isAddToRoot() { public void execute(IngestDocument document) throws Exception { Object fieldValue = document.getFieldValue(field, Object.class); BytesReference bytesRef = (fieldValue == null) ? new BytesArray("null") : new BytesArray(fieldValue.toString()); - try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, bytesRef)) { + try (XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytesRef.streamInput())) { XContentParser.Token token = parser.nextToken(); Object value = null; if (token == XContentParser.Token.VALUE_NULL) { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java index cda2b36a3ecff..66cddd43e6583 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.ingest.common; +import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestDocument.MetaData; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.TestTemplateService; @@ -122,10 +124,10 @@ public void testConvertScalarToList() throws Exception { } } - public void testAppendMetadata() throws Exception { - //here any metadata field value becomes a list, which won't make sense in most of the cases, + public void testAppendMetadataExceptVersion() throws Exception { + // here any metadata field value becomes a list, which won't make sense in most of the cases, // but support for append is streamlined like for set so we test it - IngestDocument.MetaData randomMetaData = randomFrom(IngestDocument.MetaData.values()); + MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING, MetaData.PARENT); List values = new ArrayList<>(); Processor appendProcessor; if (randomBoolean()) { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java index 6736594613954..d052ce0cd44c3 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java @@ -38,7 +38,7 @@ public void testJodaPattern() throws Exception { "events-", "y", "yyyyMMdd" ); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, Collections.singletonMap("_field", "2016-04-25T12:24:20.101Z")); processor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); @@ -48,7 +48,7 @@ public void testTAI64N()throws Exception { Function function = DateFormat.Tai64n.getFunction(null, DateTimeZone.UTC, null); DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function), DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, Collections.singletonMap("_field", (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); @@ -58,12 +58,12 @@ public void testUnixMs()throws Exception { Function function = DateFormat.UnixMs.getFunction(null, DateTimeZone.UTC, null); DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function), DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, Collections.singletonMap("_field", "1000500")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); - document = new IngestDocument("_index", "_type", "_id", null, null, + document = new IngestDocument("_index", "_type", "_id", null, null, null, null, Collections.singletonMap("_field", 1000500L)); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); @@ -73,7 +73,7 @@ public void testUnix()throws Exception { Function function = DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null); DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function), DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, Collections.singletonMap("_field", "1000.5")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java index c043102ef5d3a..95c25bedb6280 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java @@ -45,7 +45,7 @@ public void testExecute() throws Exception { values.add("bar"); values.add("baz"); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) ); ForEachProcessor processor = new ForEachProcessor( @@ -61,7 +61,7 @@ public void testExecute() throws Exception { public void testExecuteWithFailure() throws Exception { IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, Collections.singletonMap("values", Arrays.asList("a", "b", "c")) + "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", Arrays.asList("a", "b", "c")) ); TestProcessor testProcessor = new TestProcessor(id -> { @@ -101,7 +101,7 @@ public void testMetaDataAvailable() throws Exception { values.add(new HashMap<>()); values.add(new HashMap<>()); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) ); TestProcessor innerProcessor = new TestProcessor(id -> { @@ -132,7 +132,7 @@ public void testRestOfTheDocumentIsAvailable() throws Exception { document.put("values", values); document.put("flat_values", new ArrayList<>()); document.put("other", "value"); - IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, document); + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, null, document); ForEachProcessor processor = new ForEachProcessor( "_tag", "values", new SetProcessor("_tag", @@ -171,7 +171,7 @@ public String getTag() { values.add(""); } IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) ); ForEachProcessor processor = new ForEachProcessor("_tag", "values", innerProcessor); @@ -190,7 +190,7 @@ public void testModifyFieldsOutsideArray() throws Exception { values.add(1); values.add(null); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) ); TemplateScript.Factory template = new TestTemplateService.MockTemplateScript.Factory("errors"); @@ -220,7 +220,7 @@ public void testScalarValueAllowsUnderscoreValueFieldToRemainAccessible() throws source.put("_value", "new_value"); source.put("values", values); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, source + "_index", "_type", "_id", null, null, null, null, source ); TestProcessor processor = new TestProcessor(doc -> doc.setFieldValue("_ingest._value", @@ -251,7 +251,7 @@ public void testNestedForEach() throws Exception { values.add(value); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, Collections.singletonMap("values1", values) + "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values1", values) ); TestProcessor testProcessor = new TestProcessor( diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java new file mode 100644 index 0000000000000..c62a8fd237148 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.ingest.common; + +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; + +// Ideally I like this test to live in the server module, but otherwise a large part of the ScriptProcessor +// ends up being copied into this test. +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) +public class IngestRestartIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IngestCommonPlugin.class, CustomScriptPlugin.class); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + @Override + protected Map, Object>> pluginScripts() { + return Collections.singletonMap("my_script", script -> { + @SuppressWarnings("unchecked") + Map ctx = (Map) script.get("ctx"); + ctx.put("z", 0); + return null; + }); + } + } + + public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exception { + internalCluster().startNode(); + + client().admin().cluster().preparePutStoredScript() + .setId("1") + .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptEngine.NAME + + "\", \"source\": \"my_script\"} }"), XContentType.JSON) + .get(); + BytesReference pipeline = new BytesArray("{\n" + + " \"processors\" : [\n" + + " {\"set\" : {\"field\": \"y\", \"value\": 0}},\n" + + " {\"script\" : {\"id\": \"1\"}}\n" + + " ]\n" + + "}"); + client().admin().cluster().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); + + client().prepareIndex("index", "doc", "1") + .setSource("x", 0) + .setPipeline("_id") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + Map source = client().prepareGet("index", "doc", "1").get().getSource(); + assertThat(source.get("x"), equalTo(0)); + assertThat(source.get("y"), equalTo(0)); + assertThat(source.get("z"), equalTo(0)); + + // Prior to making this ScriptService implement ClusterStateApplier instead of ClusterStateListener, + // pipelines with a script processor failed to load causing these pipelines and pipelines that were + // supposed to load after these pipelines to not be available during ingestion, which then causes + // the next index request in this test to fail. + internalCluster().fullRestart(); + ensureYellow("index"); + + client().prepareIndex("index", "doc", "2") + .setSource("x", 0) + .setPipeline("_id") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + source = client().prepareGet("index", "doc", "2").get().getSource(); + assertThat(source.get("x"), equalTo(0)); + assertThat(source.get("y"), equalTo(0)); + assertThat(source.get("z"), equalTo(0)); + } + + public void testWithDedicatedIngestNode() throws Exception { + String node = internalCluster().startNode(); + String ingestNode = internalCluster().startNode(Settings.builder() + .put("node.master", false) + .put("node.data", false) + ); + + BytesReference pipeline = new BytesArray("{\n" + + " \"processors\" : [\n" + + " {\"set\" : {\"field\": \"y\", \"value\": 0}}\n" + + " ]\n" + + "}"); + client().admin().cluster().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); + + client().prepareIndex("index", "doc", "1") + .setSource("x", 0) + .setPipeline("_id") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + Map source = client().prepareGet("index", "doc", "1").get().getSource(); + assertThat(source.get("x"), equalTo(0)); + assertThat(source.get("y"), equalTo(0)); + + logger.info("Stopping"); + internalCluster().restartNode(node, new InternalTestCluster.RestartCallback()); + + client(ingestNode).prepareIndex("index", "doc", "2") + .setSource("x", 0) + .setPipeline("_id") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + source = client(ingestNode).prepareGet("index", "doc", "2").get().getSource(); + assertThat(source.get("x"), equalTo(0)); + assertThat(source.get("y"), equalTo(0)); + } + +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java index 5f0759b5b2157..6fec977e6c268 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.ingest.common; +import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestDocument.MetaData; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.TestTemplateService; @@ -99,14 +101,30 @@ public void testSetExistingNullFieldWithOverrideDisabled() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, Object.class), equalTo(newValue)); } - public void testSetMetadata() throws Exception { - IngestDocument.MetaData randomMetaData = randomFrom(IngestDocument.MetaData.values()); + public void testSetMetadataExceptVersion() throws Exception { + MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING, MetaData.PARENT); Processor processor = createSetProcessor(randomMetaData.getFieldName(), "_value", true); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(randomMetaData.getFieldName(), String.class), Matchers.equalTo("_value")); } + public void testSetMetadataVersion() throws Exception { + long version = randomNonNegativeLong(); + Processor processor = createSetProcessor(MetaData.VERSION.getFieldName(), version, true); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(MetaData.VERSION.getFieldName(), Long.class), Matchers.equalTo(version)); + } + + public void testSetMetadataVersionType() throws Exception { + String versionType = randomFrom("internal", "external", "external_gte"); + Processor processor = createSetProcessor(MetaData.VERSION_TYPE.getFieldName(), versionType, true); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(MetaData.VERSION_TYPE.getFieldName(), String.class), Matchers.equalTo(versionType)); + } + private static Processor createSetProcessor(String fieldName, Object fieldValue, boolean overrideEnabled) { return new SetProcessor(randomAlphaOfLength(10), new TestTemplateService.MockTemplateScript.Factory(fieldName), ValueSource.wrap(fieldValue, TestTemplateService.instance()), overrideEnabled); diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/170_version.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/170_version.yml new file mode 100644 index 0000000000000..10c80c8e30525 --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/170_version.yml @@ -0,0 +1,76 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test set document version & version type": + - do: + cluster.health: + wait_for_status: green + + - do: + ingest.put_pipeline: + id: "my_pipeline1" + body: > + { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "_version", + "value": 1 + } + }, + { + "set" : { + "field" : "_version_type", + "value": "internal" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "my_pipeline2" + body: > + { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "_version", + "value": 1 + } + }, + { + "set" : { + "field" : "_version_type", + "value": "external" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: conflict + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline1" + body: {} + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline2" + body: {} + - match: { _version: 1 } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java index 44373fb645cb0..50f63841231f8 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -108,7 +109,8 @@ static SearchRequest convert(SearchTemplateRequest searchTemplateRequest, Search return null; } - try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(xContentRegistry, source)) { + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source)) { SearchSourceBuilder builder = SearchSourceBuilder.searchSource(); builder.parseXContent(parser); builder.explain(searchTemplateRequest.isExplain()); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java index d5b5b4646bdde..df83471c37125 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java @@ -84,8 +84,7 @@ public ScriptClassInfo(Definition definition, Class baseClass) { componentType -> "Painless can only implement execute methods returning a whitelisted type but [" + baseClass.getName() + "#execute] returns [" + componentType.getName() + "] which isn't whitelisted."); - // Look up the argument names - Set argumentNames = new LinkedHashSet<>(); + // Look up the argument List arguments = new ArrayList<>(); String[] argumentNamesConstant = readArgumentNamesConstant(baseClass); Class[] types = executeMethod.getParameterTypes(); @@ -95,7 +94,6 @@ public ScriptClassInfo(Definition definition, Class baseClass) { } for (int arg = 0; arg < types.length; arg++) { arguments.add(methodArgument(definition, types[arg], argumentNamesConstant[arg])); - argumentNames.add(argumentNamesConstant[arg]); } this.executeArguments = unmodifiableList(arguments); this.needsMethods = unmodifiableList(needsMethods); diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 066ae7bb1e023..db4a716af6513 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -31,7 +31,7 @@ dependencyLicenses { // Don't check the client's license. We know it. dependencies = project.configurations.runtime.fileCollection { it.group.startsWith('org.elasticsearch') == false - } - project.configurations.provided + } - project.configurations.compileOnly } compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes" diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index a813fa4823629..a655184f89e7c 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -57,6 +57,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -345,7 +346,8 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep if (documents.isEmpty() == false) { builder.startArray(DOCUMENTS_FIELD.getPreferredName()); for (BytesReference document : documents) { - try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, document)) { + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, document)) { parser.nextToken(); XContentHelper.copyCurrentStructure(builder.generator(), parser); } @@ -731,8 +733,9 @@ static PercolateQuery.QueryStore createStore(MappedFieldType queryBuilderFieldTy BytesRef qbSource = binaryDocValues.binaryValue(); if (qbSource.length > 0) { XContent xContent = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE.xContent(); - try (XContentParser sourceParser = xContent.createParser(context.getXContentRegistry(), qbSource.bytes, - qbSource.offset, qbSource.length)) { + try (XContentParser sourceParser = xContent + .createParser(context.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, + qbSource.bytes, qbSource.offset, qbSource.length)) { return parseQuery(context, mapUnmappedFieldsAsString, sourceParser); } } else { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index 8ac09993b7c9b..edb69fcb93523 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -164,7 +164,7 @@ private static double computeDCG(List ratings) { private static final ParseField K_FIELD = new ParseField("k"); private static final ParseField NORMALIZE_FIELD = new ParseField("normalize"); private static final ParseField UNKNOWN_DOC_RATING_FIELD = new ParseField("unknown_doc_rating"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("dcg_at", true, + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("dcg_at", false, args -> { Boolean normalized = (Boolean) args[0]; Integer optK = (Integer) args[2]; diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvaluationMetric.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvaluationMetric.java index 982d8fa8d3004..c67511e051f96 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvaluationMetric.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvaluationMetric.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.rankeval; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.index.rankeval.RatedDocument.DocumentKey; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -36,7 +36,7 @@ * Implementations of {@link EvaluationMetric} need to provide a way to compute the quality metric for * a result list returned by some search (@link {@link SearchHits}) and a list of rated documents. */ -public interface EvaluationMetric extends ToXContent, NamedWriteable { +public interface EvaluationMetric extends ToXContentObject, NamedWriteable { /** * Returns a single metric representing the ranking quality of a set of returned diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 3426a6b9be425..770c91e82a6a1 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -213,7 +213,7 @@ public void addSummaryFields(List summaryFields) { private static final ParseField FIELDS_FIELD = new ParseField("summary_fields"); private static final ParseField TEMPLATE_ID_FIELD = new ParseField("template_id"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("requests", + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("request", a -> new RatedRequest((String) a[0], (List) a[1], (SearchSourceBuilder) a[2], (Map) a[3], (String) a[4])); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index 78bbc01be9485..ea14e51512b24 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; @@ -39,6 +40,8 @@ import static org.elasticsearch.index.rankeval.EvaluationMetric.filterUnknownDocuments; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.startsWith; public class DiscountedCumulativeGainTests extends ESTestCase { @@ -243,6 +246,20 @@ public void testXContentRoundtrip() throws IOException { } } + public void testXContentParsingIsNotLenient() throws IOException { + DiscountedCumulativeGain testItem = createTestItem(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + parser.nextToken(); + parser.nextToken(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> DiscountedCumulativeGain.fromXContent(parser)); + assertThat(exception.getMessage(), startsWith("[dcg_at] unknown field")); + } + } + public void testSerialization() throws IOException { DiscountedCumulativeGain original = createTestItem(); DiscountedCumulativeGain deserialized = ESTestCase.copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 42f7e32671f13..8ab4f146ff724 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; @@ -40,6 +41,8 @@ import java.util.Vector; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.startsWith; public class MeanReciprocalRankTests extends ESTestCase { @@ -169,6 +172,20 @@ public void testXContentRoundtrip() throws IOException { } } + public void testXContentParsingIsNotLenient() throws IOException { + MeanReciprocalRank testItem = createTestItem(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + parser.nextToken(); + parser.nextToken(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> MeanReciprocalRank.fromXContent(parser)); + assertThat(exception.getMessage(), startsWith("[reciprocal_rank] unknown field")); + } + } + /** * Create SearchHits for testing, starting from dociId 'from' up to docId 'to'. * The search hits index also need to be provided diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index 1b6a0881b7732..a6d18c3457fa1 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; @@ -40,6 +41,8 @@ import java.util.Vector; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.startsWith; public class PrecisionAtKTests extends ESTestCase { @@ -182,6 +185,19 @@ public void testXContentRoundtrip() throws IOException { } } + public void testXContentParsingIsNotLenient() throws IOException { + PrecisionAtK testItem = createTestItem(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + parser.nextToken(); + parser.nextToken(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> PrecisionAtK.fromXContent(parser)); + assertThat(exception.getMessage(), startsWith("[precision] unknown field")); + } + } + public void testSerialization() throws IOException { PrecisionAtK original = createTestItem(); PrecisionAtK deserialized = ESTestCase.copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java index f3385514da702..2fed2c8311beb 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java @@ -19,12 +19,14 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -47,6 +49,8 @@ import java.util.function.Supplier; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.startsWith; public class RankEvalSpecTests extends ESTestCase { @@ -123,6 +127,17 @@ public void testXContentRoundtrip() throws IOException { } } + public void testXContentParsingIsNotLenient() throws IOException { + RankEvalSpec testItem = createTestItem(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + Exception exception = expectThrows(Exception.class, () -> RankEvalSpec.parse(parser)); + assertThat(exception.getMessage(), startsWith("[rank_eval] failed to parse field")); + } + } + public void testSerialization() throws IOException { RankEvalSpec original = createTestItem(); RankEvalSpec deserialized = copy(original); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java index 5ec9692d83ad6..cd38233bfa9a9 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,6 +32,8 @@ import java.util.Collections; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.startsWith; public class RatedDocumentTests extends ESTestCase { @@ -50,6 +53,17 @@ public void testXContentParsing() throws IOException { } } + public void testXContentParsingIsNotLenient() throws IOException { + RatedDocument testItem = createRatedDocument(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + Exception exception = expectThrows(IllegalArgumentException.class, () -> RatedDocument.fromXContent(parser)); + assertThat(exception.getMessage(), startsWith("[rated_document] unknown field")); + } + } + public void testSerialization() throws IOException { RatedDocument original = createRatedDocument(); RatedDocument deserialized = ESTestCase.copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index e46d253dd4d92..0f23178c68391 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -48,6 +50,8 @@ import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.startsWith; public class RatedRequestsTests extends ESTestCase { @@ -123,6 +127,22 @@ public void testXContentRoundtrip() throws IOException { } } + public void testXContentParsingIsNotLenient() throws IOException { + RatedRequest testItem = createTestItem(randomBoolean()); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + Exception exception = expectThrows(Exception.class, () -> RatedRequest.fromXContent(parser)); + if (exception instanceof IllegalArgumentException) { + assertThat(exception.getMessage(), startsWith("[request] unknown field")); + } + if (exception instanceof ParsingException) { + assertThat(exception.getMessage(), startsWith("[request] failed to parse field")); + } + } + } + public void testSerialization() throws IOException { RatedRequest original = createTestItem(randomBoolean()); RatedRequest deserialized = copy(original); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java index d1dc5de831fc7..ad1385541a6b3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -89,7 +90,8 @@ private XContentParser extractRequestSpecificFields(RestRequest restRequest, consumer.getValue().accept(value); } } - return parser.contentType().xContent().createParser(parser.getXContentRegistry(), builder.map(body).bytes()); + return parser.contentType().xContent().createParser(parser.getXContentRegistry(), + parser.getDeprecationHandler(), builder.map(body).bytes().streamInput()); } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index 90f4afbe440ae..06000d156f969 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContent; @@ -73,7 +74,8 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler buildRequest(ScrollableHitSource.Hit doc) final XContentType mainRequestXContentType = mainRequest.getDestination().getContentType(); if (mainRequestXContentType != null && doc.getXContentType() != mainRequestXContentType) { // we need to convert - try (XContentParser parser = sourceXContentType.xContent().createParser(NamedXContentRegistry.EMPTY, doc.getSource()); + try (XContentParser parser = sourceXContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, doc.getSource().streamInput()); XContentBuilder builder = XContentBuilder.builder(mainRequestXContentType.xContent())) { parser.nextToken(); builder.copyCurrentStructure(parser); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index 50769cc92310b..ccb19fd62c814 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -129,7 +130,8 @@ static Map initialSearchParams(SearchRequest searchRequest, Vers static HttpEntity initialSearchEntity(SearchRequest searchRequest, BytesReference query, Version remoteVersion) { // EMPTY is safe here because we're not calling namedObject try (XContentBuilder entity = JsonXContent.contentBuilder(); - XContentParser queryParser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, query)) { + XContentParser queryParser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, query)) { entity.startObject(); entity.field("query"); { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 05295c1d4da4e..9470424b381e6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -30,22 +30,18 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.ESLoggerFactory; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; -import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Optional; -import java.util.Queue; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; public class Netty4Utils { @@ -172,7 +168,8 @@ public static void closeChannels(final Collection channels) throws IOEx * @param cause the throwable to test */ public static void maybeDie(final Throwable cause) { - final Optional maybeError = maybeError(cause); + final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); + final Optional maybeError = ExceptionsHelper.maybeError(cause, logger); if (maybeError.isPresent()) { /* * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many @@ -182,9 +179,7 @@ public static void maybeDie(final Throwable cause) { */ try { // try to log the current stack trace - final StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); - final String formatted = Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); - final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); + final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); logger.error("fatal error on the network layer\n{}", formatted); } finally { new Thread( @@ -196,40 +191,4 @@ public static void maybeDie(final Throwable cause) { } } - static final int MAX_ITERATIONS = 1024; - - /** - * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. - * - * @param cause the root throwable - * - * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable - */ - static Optional maybeError(final Throwable cause) { - // early terminate if the cause is already an error - if (cause instanceof Error) { - return Optional.of((Error) cause); - } - - final Queue queue = new LinkedList<>(); - queue.add(cause); - int iterations = 0; - while (!queue.isEmpty()) { - iterations++; - if (iterations > MAX_ITERATIONS) { - ESLoggerFactory.getLogger(Netty4Utils.class).warn("giving up looking for fatal errors on the network layer", cause); - break; - } - final Throwable current = queue.remove(); - if (current instanceof Error) { - return Optional.of((Error) current); - } - Collections.addAll(queue, current.getSuppressed()); - if (current.getCause() != null) { - queue.add(current.getCause()); - } - } - return Optional.empty(); - } - } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index 67368cb577a81..acd71749e2333 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; @@ -37,12 +36,12 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - ServerLoggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.start(); } public void tearDown() throws Exception { - ServerLoggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.stop(); super.tearDown(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java index 43be6f0efdda0..8372a8540b8be 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java @@ -22,7 +22,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; -import io.netty.handler.codec.DecoderException; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.AbstractBytesReferenceTestCase; import org.elasticsearch.common.bytes.BytesArray; @@ -33,9 +32,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.Optional; - -import static org.hamcrest.CoreMatchers.equalTo; public class Netty4UtilsTests extends ESTestCase { @@ -79,60 +75,6 @@ public void testToChannelBuffer() throws IOException { assertArrayEquals(BytesReference.toBytes(ref), BytesReference.toBytes(bytesReference)); } - public void testMaybeError() { - final Error outOfMemoryError = new OutOfMemoryError(); - assertError(outOfMemoryError, outOfMemoryError); - - final DecoderException decoderException = new DecoderException(outOfMemoryError); - assertError(decoderException, outOfMemoryError); - - final Exception e = new Exception(); - e.addSuppressed(decoderException); - assertError(e, outOfMemoryError); - - final int depth = randomIntBetween(1, 16); - Throwable cause = new Exception(); - boolean fatal = false; - Error error = null; - for (int i = 0; i < depth; i++) { - final int length = randomIntBetween(1, 4); - for (int j = 0; j < length; j++) { - if (!fatal && rarely()) { - error = new Error(); - cause.addSuppressed(error); - fatal = true; - } else { - cause.addSuppressed(new Exception()); - } - } - if (!fatal && rarely()) { - cause = error = new Error(cause); - fatal = true; - } else { - cause = new Exception(cause); - } - } - if (fatal) { - assertError(cause, error); - } else { - assertFalse(Netty4Utils.maybeError(cause).isPresent()); - } - - assertFalse(Netty4Utils.maybeError(new Exception(new DecoderException())).isPresent()); - - Throwable chain = outOfMemoryError; - for (int i = 0; i < Netty4Utils.MAX_ITERATIONS; i++) { - chain = new Exception(chain); - } - assertFalse(Netty4Utils.maybeError(chain).isPresent()); - } - - private void assertError(final Throwable cause, final Error error) { - final Optional maybeError = Netty4Utils.maybeError(cause); - assertTrue(maybeError.isPresent()); - assertThat(maybeError.get(), equalTo(error)); - } - private BytesReference getRandomizedBytesReference(int length) throws IOException { // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java index 4c90fd5c3731b..7e8fb7cf396d0 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java @@ -31,7 +31,7 @@ import java.util.List; import java.util.function.Function; -public interface GceInstancesService { +public interface GceInstancesService extends Closeable { /** * GCE API Version: Elasticsearch/GceCloud/1.0 diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index ea5b44d994e86..ed0bf07d75c7b 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -19,7 +19,6 @@ package org.elasticsearch.cloud.gce; -import java.io.Closeable; import java.io.IOException; import java.security.GeneralSecurityException; import java.util.ArrayList; @@ -39,7 +38,6 @@ import com.google.api.services.compute.model.InstanceList; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; @@ -48,7 +46,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.gce.RetryHttpInitializerWrapper; -public class GceInstancesServiceImpl extends AbstractComponent implements GceInstancesService, Closeable { +public class GceInstancesServiceImpl extends AbstractComponent implements GceInstancesService { // all settings just used for testing - not registered by default public static final Setting GCE_VALIDATE_CERTIFICATES = diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 139416c1e6403..04685e38b2251 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -53,7 +53,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close private final Settings settings; private static final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class); // stashed when created in order to properly close - private final SetOnce gceInstancesService = new SetOnce<>(); + private final SetOnce gceInstancesService = new SetOnce<>(); static { /* @@ -72,13 +72,16 @@ public GceDiscoveryPlugin(Settings settings) { logger.trace("starting gce discovery plugin..."); } - + // overrideable for tests + protected GceInstancesService createGceInstancesService() { + return new GceInstancesServiceImpl(settings); + } @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { return Collections.singletonMap(GCE, () -> { - gceInstancesService.set(new GceInstancesServiceImpl(settings)); + gceInstancesService.set(createGceInstancesService()); return new GceUnicastHostsProvider(settings, gceInstancesService.get(), transportService, networkService); }); } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index 4b339658f8800..3ceacdedcf7f2 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -19,88 +19,57 @@ package org.elasticsearch.discovery.gce; -import com.sun.net.httpserver.Headers; -import com.sun.net.httpserver.HttpServer; -import com.sun.net.httpserver.HttpsConfigurator; -import com.sun.net.httpserver.HttpsServer; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; -import org.elasticsearch.cloud.gce.GceMetadataService; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Setting; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.NetworkInterface; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cloud.gce.GceInstancesService; +import org.elasticsearch.cloud.gce.util.Access; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.elasticsearch.transport.TransportService; +import org.junit.After; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManagerFactory; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.KeyStore; -import java.util.Arrays; +import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.List; -import java.util.concurrent.ExecutionException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import static java.util.Collections.singletonList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) -@SuppressForbidden(reason = "use http server") -// TODO this should be a IT but currently all ITs in this project run against a real cluster +@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 0, numClientNodes = 0) public class GceDiscoverTests extends ESIntegTestCase { - public static class TestPlugin extends Plugin { - @Override - public List> getSettings() { - return Arrays.asList(GceMetadataService.GCE_HOST, GceInstancesServiceImpl.GCE_ROOT_URL, - GceInstancesServiceImpl.GCE_VALIDATE_CERTIFICATES); - } - } + /** Holds a list of the current discovery nodes started in tests **/ + private static final Map nodes = new ConcurrentHashMap<>(); - private static HttpsServer httpsServer; - private static HttpServer httpServer; - private static Path logDir; + @After + public void clearGceNodes() { + nodes.clear(); + } @Override protected Collection> nodePlugins() { - return Arrays.asList(GceDiscoveryPlugin.class, TestPlugin.class); + return singletonList(TestPlugin.class); } @Override protected Settings nodeSettings(int nodeOrdinal) { - Path resolve = logDir.resolve(Integer.toString(nodeOrdinal)); - try { - Files.createDirectory(resolve); - } catch (IOException e) { - throw new RuntimeException(e); - } - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put("discovery.zen.hosts_provider", "gce") - .put("path.logs", resolve) - .put("transport.tcp.port", 0) - .put("node.portsfile", "true") - .put("cloud.gce.project_id", "testproject") - .put("cloud.gce.zone", "primaryzone") - .put("cloud.gce.host", "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort()) - .put("cloud.gce.root_url", "https://" + httpsServer.getAddress().getHostName() + - ":" + httpsServer.getAddress().getPort()) - // this is annoying but by default the client pulls a static list of trusted CAs - .put("cloud.gce.validate_certificates", false) + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("discovery.zen.hosts_provider", "gce") + .put("cloud.gce.project_id", "test") + .put("cloud.gce.zone", "test") + // Make the test run faster + .put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "1s") + .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "500ms") .build(); } @@ -109,102 +78,102 @@ protected boolean addTestZenDiscovery() { return false; } - @BeforeClass - public static void startHttpd() throws Exception { - logDir = createTempDir(); - SSLContext sslContext = getSSLContext(); - httpsServer = MockHttpServer.createHttps(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); - httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); - httpsServer.setHttpsConfigurator(new HttpsConfigurator(sslContext)); - httpServer.createContext("/computeMetadata/v1/instance/service-accounts/default/token", (s) -> { - String response = GceMockUtils.readGoogleInternalJsonResponse( - "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"); - byte[] responseAsBytes = response.getBytes(StandardCharsets.UTF_8); - s.sendResponseHeaders(200, responseAsBytes.length); - OutputStream responseBody = s.getResponseBody(); - responseBody.write(responseAsBytes); - responseBody.close(); - }); - - httpsServer.createContext("/compute/v1/projects/testproject/zones/primaryzone/instances", (s) -> { - Headers headers = s.getResponseHeaders(); - headers.add("Content-Type", "application/json; charset=UTF-8"); - Logger logger = Loggers.getLogger(GceDiscoverTests.class); - try { - Path[] files = FileSystemUtils.files(logDir); - StringBuilder builder = new StringBuilder("{\"id\": \"dummy\",\"items\":["); - int foundFiles = 0; - for (int i = 0; i < files.length; i++) { - Path resolve = files[i].resolve("transport.ports"); - if (Files.exists(resolve)) { - if (foundFiles++ > 0) { - builder.append(","); - } - List addressses = Files.readAllLines(resolve); - Collections.shuffle(addressses, random()); - logger.debug("addresses for node: [{}] published addresses [{}]", files[i].getFileName(), addressses); - builder.append("{\"description\": \"ES Node ").append(files[i].getFileName()) - .append("\",\"networkInterfaces\": [ {"); - builder.append("\"networkIP\": \"").append(addressses.get(0)).append("\"}],"); - builder.append("\"status\" : \"RUNNING\"}"); - } - } - builder.append("]}"); - String responseString = builder.toString(); - final byte[] responseAsBytes = responseString.getBytes(StandardCharsets.UTF_8); - s.sendResponseHeaders(200, responseAsBytes.length); - OutputStream responseBody = s.getResponseBody(); - responseBody.write(responseAsBytes); - responseBody.close(); - } catch (Exception e) { - // - byte[] responseAsBytes = ("{ \"error\" : {\"message\" : \"" + e.toString() + "\" } }").getBytes(StandardCharsets.UTF_8); - s.sendResponseHeaders(500, responseAsBytes.length); - OutputStream responseBody = s.getResponseBody(); - responseBody.write(responseAsBytes); - responseBody.close(); - } - - - }); - httpsServer.start(); - httpServer.start(); + public void testJoin() { + // start master node + final String masterNode = internalCluster().startMasterOnlyNode(); + registerGceNode(masterNode); + + ClusterStateResponse clusterStateResponse = client(masterNode).admin().cluster().prepareState() + .setMasterNodeTimeout("1s") + .clear() + .setNodes(true) + .get(); + assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); + + // start another node + final String secondNode = internalCluster().startNode(); + registerGceNode(secondNode); + clusterStateResponse = client(secondNode).admin().cluster().prepareState() + .setMasterNodeTimeout("1s") + .clear() + .setNodes(true) + .setLocal(true) + .get(); + assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); + + // wait for the cluster to form + assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); + assertNumberOfNodes(2); + + // add one more node and wait for it to join + final String thirdNode = internalCluster().startDataOnlyNode(); + registerGceNode(thirdNode); + assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get()); + assertNumberOfNodes(3); } - private static SSLContext getSSLContext() throws Exception{ - char[] passphrase = "keypass".toCharArray(); - KeyStore ks = KeyStore.getInstance("JKS"); - try (InputStream stream = GceDiscoverTests.class.getResourceAsStream("/test-node.jks")) { - assertNotNull("can't find keystore file", stream); - ks.load(stream, passphrase); + /** + * Register an existing node as a GCE node + * + * @param nodeName the name of the node + */ + private static void registerGceNode(final String nodeName) { + final TransportService transportService = internalCluster().getInstance(TransportService.class, nodeName); + assertNotNull(transportService); + final DiscoveryNode discoveryNode = transportService.getLocalNode(); + assertNotNull(discoveryNode); + if (nodes.put(discoveryNode.getName(), discoveryNode) != null) { + throw new IllegalArgumentException("Node [" + discoveryNode.getName() + "] cannot be registered twice"); } - KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509"); - kmf.init(ks, passphrase); - TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509"); - tmf.init(ks); - SSLContext ssl = SSLContext.getInstance("TLS"); - ssl.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); - return ssl; } - @AfterClass - public static void stopHttpd() throws IOException { - for (int i = 0; i < internalCluster().size(); i++) { - // shut them all down otherwise we get spammed with connection refused exceptions - internalCluster().stopRandomDataNode(); - } - httpsServer.stop(0); - httpServer.stop(0); - httpsServer = null; - httpServer = null; - logDir = null; + /** + * Asserts that the cluster nodes info contains an expected number of node + * + * @param expected the expected number of nodes + */ + private static void assertNumberOfNodes(final int expected) { + assertEquals(expected, client().admin().cluster().prepareNodesInfo().clear().get().getNodes().size()); } - public void testJoin() throws ExecutionException, InterruptedException { - // only wait for the cluster to form - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); - // add one more node and wait for it to join - internalCluster().startDataOnlyNode(); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get()); + /** + * Test plugin that exposes internal test cluster nodes as if they were real GCE nodes. + * Use {@link #registerGceNode(String)} method to expose nodes in the tests. + */ + public static class TestPlugin extends GceDiscoveryPlugin { + + public TestPlugin(Settings settings) { + super(settings); + } + + @Override + protected GceInstancesService createGceInstancesService() { + return new GceInstancesService() { + @Override + public Collection instances() { + return Access.doPrivileged(() -> { + final List instances = new ArrayList<>(); + + for (DiscoveryNode discoveryNode : nodes.values()) { + Instance instance = new Instance(); + instance.setName(discoveryNode.getName()); + instance.setStatus("STARTED"); + + NetworkInterface networkInterface = new NetworkInterface(); + networkInterface.setNetworkIP(discoveryNode.getAddress().toString()); + instance.setNetworkInterfaces(singletonList(networkInterface)); + + instances.add(instance); + } + + return instances; + }); + } + + @Override + public void close() throws IOException { + } + }; + } } } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapperTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapperTests.java index a49124749feb5..4aad3737d30f0 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapperTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/RetryHttpInitializerWrapperTests.java @@ -160,7 +160,7 @@ public void testIOExceptionRetry() throws Exception { .build(); MockSleeper mockSleeper = new MockSleeper(); RetryHttpInitializerWrapper retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential, mockSleeper, - TimeValue.timeValueMillis(500)); + TimeValue.timeValueSeconds(30L)); Compute client = new Compute.Builder(fakeTransport, new JacksonFactory(), null) .setHttpRequestInitializer(retryHttpInitializerWrapper) diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java index 9c3366d160eb9..97ca1c0b19774 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java @@ -29,6 +29,7 @@ import org.elasticsearch.SpecialPermission; import org.elasticsearch.bootstrap.FilePermissionUtils; import org.elasticsearch.bootstrap.JarHell; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -52,8 +53,8 @@ import java.util.Arrays; import java.util.HashSet; import java.util.LinkedHashSet; -import java.util.Set; import java.util.PropertyPermission; +import java.util.Set; /** * Runs tika with limited parsers and limited permissions. @@ -98,7 +99,6 @@ final class TikaImpl { /** * parses with tika, throwing any exception hit while parsing the document */ - // only package private for testing! static String parse(final byte content[], final Metadata metadata, final int limit) throws TikaException, IOException { // check that its not unprivileged code like a script SpecialPermission.check(); @@ -161,6 +161,15 @@ static PermissionCollection getRestrictedPermissions() { perms.add(new ReflectPermission("suppressAccessChecks")); // xmlbeans, use by POI, needs to get the context classloader perms.add(new RuntimePermission("getClassLoader")); + // ZipFile needs accessDeclaredMembers on JDK 10; cf. https://bugs.openjdk.java.net/browse/JDK-8187485 + if (JavaVersion.current().compareTo(JavaVersion.parse("10")) >= 0) { + /* + * See if this permission can be removed in JDK 11, bump the version here to 12 if not. If this permission can be removed, also + * remove the grant in the plugin-security.policy. + */ + assert JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0; + perms.add(new RuntimePermission("accessDeclaredMembers")); + } perms.setReadOnly(); return perms; } diff --git a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy index adf76991b59a9..0cd359a99731b 100644 --- a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy @@ -29,4 +29,6 @@ grant { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed by xmlbeans, as part of POI for MS xml docs permission java.lang.RuntimePermission "getClassLoader"; + // ZipFile needs accessDeclaredMembers on Java 10 + permission java.lang.RuntimePermission "accessDeclaredMembers"; }; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 78eee24a34de5..3337c07e6eece 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -62,4 +62,23 @@ void moveBlob(String account, LocationMode mode, String container, String source void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException; + + static InputStream giveSocketPermissionsToStream(InputStream stream) { + return new InputStream() { + @Override + public int read() throws IOException { + return SocketAccess.doPrivilegedIOException(stream::read); + } + + @Override + public int read(byte[] b) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> stream.read(b)); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> stream.read(b, off, len)); + } + }; + } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java index 2b8992386eb2d..f21dbdfd269f4 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java @@ -25,6 +25,7 @@ import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.RetryPolicy; import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobInputStream; import com.microsoft.azure.storage.blob.BlobListingDetails; import com.microsoft.azure.storage.blob.BlobProperties; import com.microsoft.azure.storage.blob.CloudBlobClient; @@ -249,12 +250,14 @@ public void deleteBlob(String account, LocationMode mode, String container, Stri } @Override - public InputStream getInputStream(String account, LocationMode mode, String container, String blob) - throws URISyntaxException, StorageException { + public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, + StorageException { logger.trace("reading container [{}], blob [{}]", container, blob); CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob); - return SocketAccess.doPrivilegedException(() -> blockBlobReference.openInputStream(null, null, generateOperationContext(account))); + BlobInputStream is = SocketAccess.doPrivilegedException(() -> + blockBlobReference.openInputStream(null, null, generateOperationContext(account))); + return AzureStorageService.giveSocketPermissionsToStream(is); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java index c4db24a97e958..da8b85430067c 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java @@ -39,6 +39,15 @@ public final class SocketAccess { private SocketAccess() {} + public static T doPrivilegedIOException(PrivilegedExceptionAction operation) throws IOException { + SpecialPermission.check(); + try { + return AccessController.doPrivileged(operation); + } catch (PrivilegedActionException e) { + throw (IOException) e.getCause(); + } + } + public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException, URISyntaxException { SpecialPermission.check(); try { diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 6dfe2db628013..68b84594d62ca 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -32,8 +32,10 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.net.SocketPermission; import java.net.URISyntaxException; import java.nio.file.NoSuchFileException; +import java.security.AccessController; import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -81,7 +83,7 @@ public InputStream getInputStream(String account, LocationMode mode, String cont if (!blobExists(account, mode, container, blob)) { throw new NoSuchFileException("missing blob [" + blob + "]"); } - return new ByteArrayInputStream(blobs.get(blob).toByteArray()); + return AzureStorageService.giveSocketPermissionsToStream(new PermissionRequiringInputStream(blobs.get(blob).toByteArray())); } @Override @@ -170,4 +172,29 @@ public static boolean endsWithIgnoreCase(String str, String suffix) { String lcPrefix = suffix.toLowerCase(Locale.ROOT); return lcStr.equals(lcPrefix); } + + private static class PermissionRequiringInputStream extends ByteArrayInputStream { + + private PermissionRequiringInputStream(byte[] buf) { + super(buf); + } + + @Override + public synchronized int read() { + AccessController.checkPermission(new SocketPermission("*", "connect")); + return super.read(); + } + + @Override + public int read(byte[] b) throws IOException { + AccessController.checkPermission(new SocketPermission("*", "connect")); + return super.read(b); + } + + @Override + public synchronized int read(byte[] b, int off, int len) { + AccessController.checkPermission(new SocketPermission("*", "connect")); + return super.read(b, off, len); + } + } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index f3fada5a46319..7b985ebd176d6 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -23,13 +23,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; -import java.io.IOException; import java.util.Locale; public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase { + @Override - protected BlobStore newBlobStore() throws IOException { + protected BlobStore newBlobStore() { String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockHttpTransport.newStorage(bucket, getTestName())); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockStorage.newStorageClient(bucket, getTestName())); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index ed65caf0d5932..dbad40ec08393 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -65,7 +65,7 @@ protected void createTestRepository(String name) { @BeforeClass public static void setUpStorage() { - storage.set(MockHttpTransport.newStorage(BUCKET, GoogleCloudStorageBlobStoreRepositoryTests.class.getName())); + storage.set(MockStorage.newStorageClient(BUCKET, GoogleCloudStorageBlobStoreRepositoryTests.class.getName())); } public static class MockGoogleCloudStoragePlugin extends GoogleCloudStoragePlugin { diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index fe94237f6c976..00c0538d198bd 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -23,14 +23,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; -import java.io.IOException; import java.util.Locale; public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase { @Override - protected BlobStore newBlobStore() throws IOException { + protected BlobStore newBlobStore() { String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockHttpTransport.newStorage(bucket, getTestName())); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockStorage.newStorageClient(bucket, getTestName())); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java new file mode 100644 index 0000000000000..17255fa90ed2a --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java @@ -0,0 +1,495 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.gcs; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.path.PathTrie; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * {@link GoogleCloudStorageTestServer} emulates a Google Cloud Storage service through a {@link #handle(String, String, byte[])} method + * that provides appropriate responses for specific requests like the real Google Cloud platform would do. It is largely based on official + * documentation available at https://cloud.google.com/storage/docs/json_api/v1/. + */ +public class GoogleCloudStorageTestServer { + + private static byte[] EMPTY_BYTE = new byte[0]; + + /** List of the buckets stored on this test server **/ + private final Map buckets = ConcurrentCollections.newConcurrentMap(); + + /** Request handlers for the requests made by the Google Cloud Storage client **/ + private final PathTrie handlers; + + /** + * Creates a {@link GoogleCloudStorageTestServer} with the default endpoint + */ + GoogleCloudStorageTestServer() { + this("https://www.googleapis.com", true); + } + + /** + * Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint, + * potentially prefixing the URL patterns to match with the endpoint name. + */ + GoogleCloudStorageTestServer(final String endpoint, final boolean prefixWithEndpoint) { + this.handlers = defaultHandlers(endpoint, prefixWithEndpoint, buckets); + } + + /** Creates a bucket in the test server **/ + void createBucket(final String bucketName) { + buckets.put(bucketName, new Bucket(bucketName)); + } + + public Response handle(final String method, final String url, byte[] content) throws IOException { + final Map params = new HashMap<>(); + + // Splits the URL to extract query string parameters + final String rawPath; + int questionMark = url.indexOf('?'); + if (questionMark != -1) { + rawPath = url.substring(0, questionMark); + RestUtils.decodeQueryString(url, questionMark + 1, params); + } else { + rawPath = url; + } + + final RequestHandler handler = handlers.retrieve(method + " " + rawPath, params); + if (handler != null) { + return handler.execute(url, params, content); + } else { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "No handler defined for request [method: " + method + ", url: " + url + "]"); + } + } + + @FunctionalInterface + interface RequestHandler { + + /** + * Simulates the execution of a Storage request and returns a corresponding response. + * + * @param url the request URL + * @param params the request URL parameters + * @param body the request body provided as a byte array + * @return the corresponding response + * + * @throws IOException if something goes wrong + */ + Response execute(String url, Map params, byte[] body) throws IOException; + } + + /** Builds the default request handlers **/ + private static PathTrie defaultHandlers(final String endpoint, + final boolean prefixWithEndpoint, + final Map buckets) { + + final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); + final String prefix = prefixWithEndpoint ? endpoint : ""; + + // GET Bucket + // + // https://cloud.google.com/storage/docs/json_api/v1/buckets/get + handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}", (url, params, body) -> { + String name = params.get("bucket"); + if (Strings.hasText(name) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing"); + } + + if (buckets.containsKey(name)) { + return newResponse(RestStatus.OK, emptyMap(), buildBucketResource(name)); + } else { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + }); + + // GET Object + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/get + handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> { + String objectName = params.get("object"); + if (Strings.hasText(objectName) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + + for (Map.Entry object : bucket.objects.entrySet()) { + if (object.getKey().equals(objectName)) { + return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectName, object.getValue())); + } + } + return newError(RestStatus.NOT_FOUND, "object not found"); + }); + + // Delete Object + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/delete + handlers.insert("DELETE " + prefix + "/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> { + String objectName = params.get("object"); + if (Strings.hasText(objectName) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + + final byte[] bytes = bucket.objects.remove(objectName); + if (bytes != null) { + return new Response(RestStatus.NO_CONTENT, emptyMap(), XContentType.JSON.mediaType(), EMPTY_BYTE); + } + return newError(RestStatus.NOT_FOUND, "object not found"); + }); + + // Insert Object (initialization) + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/insert + handlers.insert("POST " + prefix + "/upload/storage/v1/b/{bucket}/o", (url, params, body) -> { + if ("resumable".equals(params.get("uploadType")) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable"); + } + + final String objectName = params.get("name"); + if (Strings.hasText(objectName) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + + if (bucket.objects.put(objectName, EMPTY_BYTE) == null) { + String location = endpoint + "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id=" + objectName; + return new Response(RestStatus.CREATED, singletonMap("Location", location), XContentType.JSON.mediaType(), EMPTY_BYTE); + } else { + return newError(RestStatus.CONFLICT, "object already exist"); + } + }); + + // Insert Object (upload) + // + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload + handlers.insert("PUT " + prefix + "/upload/storage/v1/b/{bucket}/o", (url, params, body) -> { + String objectId = params.get("upload_id"); + if (Strings.hasText(objectId) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing"); + } + + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + + if (bucket.objects.containsKey(objectId) == false) { + return newError(RestStatus.NOT_FOUND, "object name not found"); + } + + bucket.objects.put(objectId, body); + return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectId, body)); + }); + + // Copy Object + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/copy + handlers.insert("POST " + prefix + "/storage/v1/b/{srcBucket}/o/{src}/copyTo/b/{destBucket}/o/{dest}", (url, params, body) -> { + String source = params.get("src"); + if (Strings.hasText(source) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); + } + + final Bucket srcBucket = buckets.get(params.get("srcBucket")); + if (srcBucket == null) { + return newError(RestStatus.NOT_FOUND, "source bucket not found"); + } + + String dest = params.get("dest"); + if (Strings.hasText(dest) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); + } + + final Bucket destBucket = buckets.get(params.get("destBucket")); + if (destBucket == null) { + return newError(RestStatus.NOT_FOUND, "destination bucket not found"); + } + + final byte[] sourceBytes = srcBucket.objects.get(source); + if (sourceBytes == null) { + return newError(RestStatus.NOT_FOUND, "source object not found"); + } + + destBucket.objects.put(dest, sourceBytes); + return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(destBucket.name, dest, sourceBytes)); + }); + + // List Objects + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/list + handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}/o", (url, params, body) -> { + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + + final XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.field("kind", "storage#objects"); + { + builder.startArray("items"); + + final String prefixParam = params.get("prefix"); + for (Map.Entry object : bucket.objects.entrySet()) { + if (prefixParam != null && object.getKey().startsWith(prefixParam) == false) { + continue; + } + buildObjectResource(builder, bucket.name, object.getKey(), object.getValue()); + } + builder.endArray(); + } + builder.endObject(); + return newResponse(RestStatus.OK, emptyMap(), builder); + }); + + // Download Object + // + // https://cloud.google.com/storage/docs/request-body + handlers.insert("GET " + prefix + "/download/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> { + String object = params.get("object"); + if (Strings.hasText(object) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "object id is missing"); + } + + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + + if (bucket.objects.containsKey(object) == false) { + return newError(RestStatus.NOT_FOUND, "object name not found"); + } + + return new Response(RestStatus.OK, emptyMap(), "application/octet-stream", bucket.objects.get(object)); + }); + + // Batch + // + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch + handlers.insert("POST " + prefix + "/batch", (url, params, req) -> { + final List batchedResponses = new ArrayList<>(); + + // A batch request body looks like this: + // + // --__END_OF_PART__ + // Content-Length: 71 + // Content-Type: application/http + // content-id: 1 + // content-transfer-encoding: binary + // + // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/foo%2Ftest HTTP/1.1 + // + // + // --__END_OF_PART__ + // Content-Length: 71 + // Content-Type: application/http + // content-id: 2 + // content-transfer-encoding: binary + // + // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/bar%2Ftest HTTP/1.1 + // + // + // --__END_OF_PART__-- + + // Here we simply process the request body line by line and delegate to other handlers + // if possible. + Streams.readAllLines(new BufferedInputStream(new ByteArrayInputStream(req)), line -> { + final int indexOfHttp = line.indexOf(" HTTP/1.1"); + if (indexOfHttp > 0) { + line = line.substring(0, indexOfHttp); + } + + RequestHandler handler = handlers.retrieve(line, params); + if (handler != null) { + try { + batchedResponses.add(handler.execute(line, params, req)); + } catch (IOException e) { + batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); + } + } + }); + + // Now we can build the response + String boundary = "__END_OF_PART__"; + String sep = "--"; + String line = "\r\n"; + + StringBuilder builder = new StringBuilder(); + for (Response response : batchedResponses) { + builder.append(sep).append(boundary).append(line); + builder.append(line); + builder.append("HTTP/1.1 ").append(response.status.getStatus()); + builder.append(' ').append(response.status.toString()); + builder.append(line); + builder.append("Content-Length: ").append(response.body.length).append(line); + builder.append(line); + } + builder.append(line); + builder.append(sep).append(boundary).append(sep); + + byte[] content = builder.toString().getBytes(StandardCharsets.UTF_8); + return new Response(RestStatus.OK, emptyMap(), "multipart/mixed; boundary=" + boundary, content); + }); + + return handlers; + } + + /** + * Represents a Storage bucket as if it was created on Google Cloud Storage. + */ + static class Bucket { + + /** Bucket name **/ + final String name; + + /** Blobs contained in the bucket **/ + final Map objects; + + Bucket(final String name) { + this.name = Objects.requireNonNull(name); + this.objects = ConcurrentCollections.newConcurrentMap(); + } + } + + /** + * Represents a Storage HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } + + /** + * Builds a JSON response + */ + private static Response newResponse(final RestStatus status, final Map headers, final XContentBuilder xContentBuilder) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + xContentBuilder.bytes().writeTo(out); + return new Response(status, headers, XContentType.JSON.mediaType(), out.toByteArray()); + } catch (IOException e) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()); + } + } + + /** + * Storage Error JSON representation + */ + private static Response newError(final RestStatus status, final String message) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject() + .startObject("error") + .field("code", status.getStatus()) + .field("message", message) + .startArray("errors") + .startObject() + .field("domain", "global") + .field("reason", status.toString()) + .field("message", message) + .endObject() + .endArray() + .endObject() + .endObject(); + builder.bytes().writeTo(out); + } + return new Response(status, emptyMap(), XContentType.JSON.mediaType(), out.toByteArray()); + } catch (IOException e) { + byte[] bytes = (message != null ? message : "something went wrong").getBytes(StandardCharsets.UTF_8); + return new Response(RestStatus.INTERNAL_SERVER_ERROR, emptyMap(), " text/plain", bytes); + } + } + + /** + * Storage Bucket JSON representation as defined in + * https://cloud.google.com/storage/docs/json_api/v1/bucket#resource + */ + private static XContentBuilder buildBucketResource(final String name) throws IOException { + return jsonBuilder().startObject() + .field("kind", "storage#bucket") + .field("id", name) + .endObject(); + } + + /** + * Storage Object JSON representation as defined in + * https://cloud.google.com/storage/docs/json_api/v1/objects#resource + */ + private static XContentBuilder buildObjectResource(final String bucket, final String name, final byte[] bytes) + throws IOException { + return buildObjectResource(jsonBuilder(), bucket, name, bytes); + } + + /** + * Storage Object JSON representation as defined in + * https://cloud.google.com/storage/docs/json_api/v1/objects#resource + */ + private static XContentBuilder buildObjectResource(final XContentBuilder builder, + final String bucket, + final String name, + final byte[] bytes) throws IOException { + return builder.startObject() + .field("kind", "storage#object") + .field("id", String.join("/", bucket, name)) + .field("name", name) + .field("size", String.valueOf(bytes.length)) + .endObject(); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockHttpTransport.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockHttpTransport.java deleted file mode 100644 index f09854458cc14..0000000000000 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockHttpTransport.java +++ /dev/null @@ -1,433 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.gcs; - -import com.google.api.client.http.HttpTransport; -import com.google.api.client.http.LowLevelHttpRequest; -import com.google.api.client.http.LowLevelHttpResponse; -import com.google.api.client.json.Json; -import com.google.api.client.json.jackson2.JacksonFactory; -import com.google.api.client.testing.http.MockLowLevelHttpRequest; -import com.google.api.client.testing.http.MockLowLevelHttpResponse; -import com.google.api.services.storage.Storage; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.path.PathTrie; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.RestUtils; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; - -/** - * Mock for {@link HttpTransport} to test Google Cloud Storage service. - *

- * This basically handles each type of request used by the {@link GoogleCloudStorageBlobStore} and provides appropriate responses like - * the Google Cloud Storage service would do. It is largely based on official documentation available at https://cloud.google - * .com/storage/docs/json_api/v1/. - */ -public class MockHttpTransport extends com.google.api.client.testing.http.MockHttpTransport { - - private final AtomicInteger objectsCount = new AtomicInteger(0); - private final Map objectsNames = ConcurrentCollections.newConcurrentMap(); - private final Map objectsContent = ConcurrentCollections.newConcurrentMap(); - - private final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); - - public MockHttpTransport(String bucket) { - - // GET Bucket - // - // https://cloud.google.com/storage/docs/json_api/v1/buckets/get - handlers.insert("GET https://www.googleapis.com/storage/v1/b/{bucket}", (url, params, req) -> { - String name = params.get("bucket"); - if (Strings.hasText(name) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing"); - } - - if (name.equals(bucket)) { - return newMockResponse().setContent(buildBucketResource(bucket)); - } else { - return newMockError(RestStatus.NOT_FOUND, "bucket not found"); - } - }); - - // GET Object - // - // https://cloud.google.com/storage/docs/json_api/v1/objects/get - handlers.insert("GET https://www.googleapis.com/storage/v1/b/{bucket}/o/{object}", (url, params, req) -> { - String name = params.get("object"); - if (Strings.hasText(name) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); - } - - for (Map.Entry object : objectsNames.entrySet()) { - if (object.getValue().equals(name)) { - byte[] content = objectsContent.get(object.getKey()); - if (content != null) { - return newMockResponse().setContent(buildObjectResource(bucket, name, object.getKey(), content.length)); - } - } - } - return newMockError(RestStatus.NOT_FOUND, "object not found"); - }); - - // Download Object - // - // https://cloud.google.com/storage/docs/request-endpoints - handlers.insert("GET https://www.googleapis.com/download/storage/v1/b/{bucket}/o/{object}", (url, params, req) -> { - String name = params.get("object"); - if (Strings.hasText(name) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); - } - - for (Map.Entry object : objectsNames.entrySet()) { - if (object.getValue().equals(name)) { - byte[] content = objectsContent.get(object.getKey()); - if (content == null) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object content is missing"); - } - return newMockResponse().setContent(new ByteArrayInputStream(content)); - } - } - return newMockError(RestStatus.NOT_FOUND, "object not found"); - }); - - // Insert Object (initialization) - // - // https://cloud.google.com/storage/docs/json_api/v1/objects/insert - handlers.insert("POST https://www.googleapis.com/upload/storage/v1/b/{bucket}/o", (url, params, req) -> { - if ("resumable".equals(params.get("uploadType")) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable"); - } - - String name = params.get("name"); - if (Strings.hasText(name) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); - } - - String objectId = String.valueOf(objectsCount.getAndIncrement()); - objectsNames.put(objectId, name); - - return newMockResponse() - .setStatusCode(RestStatus.CREATED.getStatus()) - .addHeader("Location", "https://www.googleapis.com/upload/storage/v1/b/" + bucket + - "/o?uploadType=resumable&upload_id=" + objectId); - }); - - // Insert Object (upload) - // - // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload - handlers.insert("PUT https://www.googleapis.com/upload/storage/v1/b/{bucket}/o", (url, params, req) -> { - String objectId = params.get("upload_id"); - if (Strings.hasText(objectId) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing"); - } - - String name = objectsNames.get(objectId); - if (Strings.hasText(name) == false) { - return newMockError(RestStatus.NOT_FOUND, "object name not found"); - } - - ByteArrayOutputStream os = new ByteArrayOutputStream((int) req.getContentLength()); - try { - req.getStreamingContent().writeTo(os); - os.close(); - } catch (IOException e) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()); - } - - byte[] content = os.toByteArray(); - objectsContent.put(objectId, content); - return newMockResponse().setContent(buildObjectResource(bucket, name, objectId, content.length)); - }); - - // List Objects - // - // https://cloud.google.com/storage/docs/json_api/v1/objects/list - handlers.insert("GET https://www.googleapis.com/storage/v1/b/{bucket}/o", (url, params, req) -> { - String prefix = params.get("prefix"); - - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - builder.field("kind", "storage#objects"); - builder.startArray("items"); - for (Map.Entry o : objectsNames.entrySet()) { - if (prefix != null && o.getValue().startsWith(prefix) == false) { - continue; - } - buildObjectResource(builder, bucket, o.getValue(), o.getKey(), objectsContent.get(o.getKey()).length); - } - builder.endArray(); - builder.endObject(); - return newMockResponse().setContent(builder.string()); - } catch (IOException e) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()); - } - }); - - // Delete Object - // - // https://cloud.google.com/storage/docs/json_api/v1/objects/delete - handlers.insert("DELETE https://www.googleapis.com/storage/v1/b/{bucket}/o/{object}", (url, params, req) -> { - String name = params.get("object"); - if (Strings.hasText(name) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); - } - - String objectId = null; - for (Map.Entry object : objectsNames.entrySet()) { - if (object.getValue().equals(name)) { - objectId = object.getKey(); - break; - } - } - - if (objectId != null) { - objectsNames.remove(objectId); - objectsContent.remove(objectId); - return newMockResponse().setStatusCode(RestStatus.NO_CONTENT.getStatus()); - } - return newMockError(RestStatus.NOT_FOUND, "object not found"); - }); - - // Copy Object - // - // https://cloud.google.com/storage/docs/json_api/v1/objects/copy - handlers.insert("POST https://www.googleapis.com/storage/v1/b/{srcBucket}/o/{srcObject}/copyTo/b/{destBucket}/o/{destObject}", - (url, params, req) -> { - String source = params.get("srcObject"); - if (Strings.hasText(source) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); - } - - String dest = params.get("destObject"); - if (Strings.hasText(dest) == false) { - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); - } - - String srcObjectId = null; - for (Map.Entry object : objectsNames.entrySet()) { - if (object.getValue().equals(source)) { - srcObjectId = object.getKey(); - break; - } - } - - if (srcObjectId == null) { - return newMockError(RestStatus.NOT_FOUND, "source object not found"); - } - - byte[] content = objectsContent.get(srcObjectId); - if (content == null) { - return newMockError(RestStatus.NOT_FOUND, "source content can not be found"); - } - - String destObjectId = String.valueOf(objectsCount.getAndIncrement()); - objectsNames.put(destObjectId, dest); - objectsContent.put(destObjectId, content); - - return newMockResponse().setContent(buildObjectResource(bucket, dest, destObjectId, content.length)); - }); - - // Batch - // - // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch - handlers.insert("POST https://www.googleapis.com/batch", (url, params, req) -> { - List responses = new ArrayList<>(); - - // A batch request body looks like this: - // - // --__END_OF_PART__ - // Content-Length: 71 - // Content-Type: application/http - // content-id: 1 - // content-transfer-encoding: binary - // - // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/foo%2Ftest HTTP/1.1 - // - // - // --__END_OF_PART__ - // Content-Length: 71 - // Content-Type: application/http - // content-id: 2 - // content-transfer-encoding: binary - // - // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/bar%2Ftest HTTP/1.1 - // - // - // --__END_OF_PART__-- - - // Here we simply process the request body line by line and delegate to other handlers - // if possible. - try (ByteArrayOutputStream os = new ByteArrayOutputStream((int) req.getContentLength())) { - req.getStreamingContent().writeTo(os); - - Streams.readAllLines(new ByteArrayInputStream(os.toByteArray()), line -> { - final int indexOfHttp = line.indexOf(" HTTP/1.1"); - if (indexOfHttp > 0) { - line = line.substring(0, indexOfHttp); - } - - Handler handler = handlers.retrieve(line, params); - if (handler != null) { - try { - responses.add(handler.execute(line, params, req)); - } catch (IOException e) { - responses.add(newMockError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); - } - } - }); - } - - // Now we can build the response - String boundary = "__END_OF_PART__"; - String sep = "--"; - String line = "\r\n"; - - StringBuilder builder = new StringBuilder(); - for (MockLowLevelHttpResponse resp : responses) { - builder.append(sep).append(boundary).append(line); - builder.append(line); - builder.append("HTTP/1.1 ").append(resp.getStatusCode()).append(' ').append(resp.getReasonPhrase()).append(line); - builder.append("Content-Length: ").append(resp.getContentLength()).append(line); - builder.append(line); - } - builder.append(line); - builder.append(sep).append(boundary).append(sep); - - return newMockResponse().setContentType("multipart/mixed; boundary=" + boundary).setContent(builder.toString()); - }); - } - - @Override - public LowLevelHttpRequest buildRequest(String method, String url) throws IOException { - return new MockLowLevelHttpRequest() { - @Override - public LowLevelHttpResponse execute() throws IOException { - String rawPath = url; - Map params = new HashMap<>(); - - int pathEndPos = url.indexOf('?'); - if (pathEndPos != -1) { - rawPath = url.substring(0, pathEndPos); - RestUtils.decodeQueryString(url, pathEndPos + 1, params); - } - - Handler handler = handlers.retrieve(method + " " + rawPath, params); - if (handler != null) { - return handler.execute(rawPath, params, this); - } - return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "Unable to handle request [method=" + method + ", url=" + url + "]"); - } - }; - } - - private static MockLowLevelHttpResponse newMockResponse() { - return new MockLowLevelHttpResponse() - .setContentType(Json.MEDIA_TYPE) - .setStatusCode(RestStatus.OK.getStatus()) - .setReasonPhrase(RestStatus.OK.name()); - } - - private static MockLowLevelHttpResponse newMockError(RestStatus status, String message) { - MockLowLevelHttpResponse response = newMockResponse().setStatusCode(status.getStatus()).setReasonPhrase(status.name()); - try { - response.setContent(buildErrorResource(status, message)); - } catch (IOException e) { - response.setContent("Failed to build error resource [" + message + "] because of: " + e.getMessage()); - } - return response; - } - - /** - * Storage Error JSON representation - */ - private static String buildErrorResource(RestStatus status, String message) throws IOException { - return jsonBuilder() - .startObject() - .startObject("error") - .field("code", status.getStatus()) - .field("message", message) - .startArray("errors") - .startObject() - .field("domain", "global") - .field("reason", status.toString()) - .field("message", message) - .endObject() - .endArray() - .endObject() - .endObject() - .string(); - } - - /** - * Storage Bucket JSON representation as defined in - * https://cloud.google.com/storage/docs/json_api/v1/bucket#resource - */ - private static String buildBucketResource(String name) throws IOException { - return jsonBuilder().startObject() - .field("kind", "storage#bucket") - .field("id", name) - .endObject() - .string(); - } - - /** - * Storage Object JSON representation as defined in - * https://cloud.google.com/storage/docs/json_api/v1/objects#resource - */ - private static XContentBuilder buildObjectResource(XContentBuilder builder, String bucket, String name, String id, int size) - throws IOException { - return builder.startObject() - .field("kind", "storage#object") - .field("id", String.join("/", bucket, name, id)) - .field("name", name) - .field("size", String.valueOf(size)) - .endObject(); - } - - private static String buildObjectResource(String bucket, String name, String id, int size) throws IOException { - return buildObjectResource(jsonBuilder(), bucket, name, id, size).string(); - } - - interface Handler { - MockLowLevelHttpResponse execute(String url, Map params, MockLowLevelHttpRequest request) throws IOException; - } - - /** - * Instanciates a mocked Storage client for tests. - */ - public static Storage newStorage(String bucket, String applicationName) { - return new Storage.Builder(new MockHttpTransport(bucket), new JacksonFactory(), null) - .setApplicationName(applicationName) - .build(); - } -} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java new file mode 100644 index 0000000000000..8be7511ab58c6 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import com.google.api.client.http.LowLevelHttpRequest; +import com.google.api.client.http.LowLevelHttpResponse; +import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.testing.http.MockLowLevelHttpRequest; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.storage.Storage; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Map; + +/** + * {@link MockStorage} is a utility class that provides {@link Storage} clients that works + * against an embedded {@link GoogleCloudStorageTestServer}. + */ +class MockStorage extends com.google.api.client.testing.http.MockHttpTransport { + + /** + * Embedded test server that emulates a Google Cloud Storage service + **/ + private final GoogleCloudStorageTestServer server = new GoogleCloudStorageTestServer(); + + private MockStorage() { + } + + @Override + public LowLevelHttpRequest buildRequest(String method, String url) throws IOException { + return new MockLowLevelHttpRequest() { + @Override + public LowLevelHttpResponse execute() throws IOException { + final GoogleCloudStorageTestServer.Response response = server.handle(method, url, getContentAsBytes()); + return convert(response); + } + + /** Returns the LowLevelHttpRequest body as an array of bytes **/ + byte[] getContentAsBytes() throws IOException { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + if (getStreamingContent() != null) { + getStreamingContent().writeTo(out); + } + return out.toByteArray(); + } + }; + } + + private static MockLowLevelHttpResponse convert(final GoogleCloudStorageTestServer.Response response) { + final MockLowLevelHttpResponse lowLevelHttpResponse = new MockLowLevelHttpResponse(); + for (Map.Entry header : response.headers.entrySet()) { + lowLevelHttpResponse.addHeader(header.getKey(), header.getValue()); + } + lowLevelHttpResponse.setContentType(response.contentType); + lowLevelHttpResponse.setStatusCode(response.status.getStatus()); + lowLevelHttpResponse.setReasonPhrase(response.status.toString()); + if (response.body != null) { + lowLevelHttpResponse.setContent(response.body); + lowLevelHttpResponse.setContentLength(response.body.length); + } + return lowLevelHttpResponse; + } + + /** + * Instanciates a mocked Storage client for tests. + */ + public static Storage newStorageClient(final String bucket, final String applicationName) { + MockStorage mockStorage = new MockStorage(); + mockStorage.server.createBucket(bucket); + + return new Storage.Builder(mockStorage, JacksonFactory.getDefaultInstance(), null) + .setApplicationName(applicationName) + .build(); + } +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index 8dab47bd1ceee..f53c9d3b1f5e7 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -138,7 +138,7 @@ public void testHierarchy() throws Exception { assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(Level.DEBUG)); final Level level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); - ServerLoggers.setLevel(ESLoggerFactory.getLogger("x"), level); + Loggers.setLevel(ESLoggerFactory.getLogger("x"), level); assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(level)); assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level)); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index 55e359697eb15..d4bc754689e68 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -285,12 +285,12 @@ public void testFindAppender() throws IOException, UserException { final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender"); - final Appender testLoggerConsoleAppender = ServerLoggers.findAppender(hasConsoleAppender, ConsoleAppender.class); + final Appender testLoggerConsoleAppender = Loggers.findAppender(hasConsoleAppender, ConsoleAppender.class); assertNotNull(testLoggerConsoleAppender); assertThat(testLoggerConsoleAppender.getName(), equalTo("console")); final Logger hasCountingNoOpAppender = ESLoggerFactory.getLogger("has_counting_no_op_appender"); - assertNull(ServerLoggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); - final Appender countingNoOpAppender = ServerLoggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); + assertNull(Loggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); + final Appender countingNoOpAppender = Loggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op")); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java new file mode 100644 index 0000000000000..c7848267ff17f --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; + +public class EvilThreadPoolTests extends ESTestCase { + + private ThreadPool threadPool; + + @Before + public void setUpThreadPool() { + threadPool = new TestThreadPool(EvilThreadPoolTests.class.getName()); + } + + @After + public void tearDownThreadPool() throws InterruptedException { + terminate(threadPool); + } + + public void testExecutionException() throws InterruptedException { + runExecutionExceptionTest( + () -> { + throw new Error("future error"); + }, + true, + o -> { + assertTrue(o.isPresent()); + assertThat(o.get(), instanceOf(Error.class)); + assertThat(o.get(), hasToString(containsString("future error"))); + }); + runExecutionExceptionTest( + () -> { + throw new IllegalStateException("future exception"); + }, + false, + o -> assertFalse(o.isPresent())); + } + + private void runExecutionExceptionTest( + final Runnable runnable, + final boolean expectThrowable, + final Consumer> consumer) throws InterruptedException { + final AtomicReference throwableReference = new AtomicReference<>(); + final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); + final CountDownLatch uncaughtExceptionHandlerLatch = new CountDownLatch(1); + + try { + Thread.setDefaultUncaughtExceptionHandler((t, e) -> { + assertTrue(expectThrowable); + throwableReference.set(e); + uncaughtExceptionHandlerLatch.countDown(); + }); + + final CountDownLatch supplierLatch = new CountDownLatch(1); + + threadPool.generic().submit(() -> { + try { + runnable.run(); + } finally { + supplierLatch.countDown(); + } + }); + + supplierLatch.await(); + + if (expectThrowable) { + uncaughtExceptionHandlerLatch.await(); + } + consumer.accept(Optional.ofNullable(throwableReference.get())); + } finally { + Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler); + } + } + +} diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 123b217a38af3..0449cc3ee71ae 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -19,6 +19,7 @@ import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' @@ -30,7 +31,7 @@ task bwcTest { group = 'verification' } -for (Version version : versionCollection.versionsIndexCompatibleWithCurrent) { +for (Version version : bwcVersions.indexCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { @@ -105,10 +106,11 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : versionCollection.basicIntegrationTestVersions) { + for (final def version : bwcVersions.snapshotsIndexCompatible) { dependsOn "v${version}#bwcTest" } } } check.dependsOn(integTest) + diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 712fde94b7d31..ca802cd42754f 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -29,7 +29,7 @@ task bwcTest { group = 'verification' } -for (Version version : versionCollection.versionsWireCompatibleWithCurrent) { +for (Version version : bwcVersions.wireCompatible) { String baseName = "v${version}" Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) { @@ -66,7 +66,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : versionCollection.basicIntegrationTestVersions) { + for (final def version : bwcVersions.snapshotsWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index e0ee8bac8bfe0..a46056e98b434 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -233,7 +233,7 @@ public void testSeqNoCheckpoints() throws Exception { public void testUpdateSnapshotStatus() throws Exception { Nodes nodes = buildNodeAndVersions(); - assertThat(nodes.getNewNodes(), not(empty())); + assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); logger.info("cluster discovered: {}", nodes.toString()); // Create the repository before taking the snapshot. diff --git a/qa/query-builder-bwc/build.gradle b/qa/query-builder-bwc/build.gradle index 16e9f6298feac..b71fe5159bd87 100644 --- a/qa/query-builder-bwc/build.gradle +++ b/qa/query-builder-bwc/build.gradle @@ -30,7 +30,7 @@ task bwcTest { group = 'verification' } -for (Version version : versionCollection.versionsIndexCompatibleWithCurrent) { +for (Version version : bwcVersions.indexCompatible) { String baseName = "v${version}" Task oldQueryBuilderTest = tasks.create(name: "${baseName}#oldQueryBuilderTest", type: RestIntegTestTask) { @@ -82,11 +82,10 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { - if (project.bwc_tests_enabled) { - for (final def version : versionCollection.basicIntegrationTestVersions) { - dependsOn "v${version}#bwcTest" - } - } + if (project.bwc_tests_enabled) { + final def version = bwcVersions.snapshotsIndexCompatible.first() + dependsOn "v${version}#bwcTest" + } } check.dependsOn(integTest) diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 31d056037d865..30e0a311aa673 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -29,7 +29,7 @@ task bwcTest { group = 'verification' } -for (Version version : versionCollection.versionsWireCompatibleWithCurrent) { +for (Version version : bwcVersions.wireCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { @@ -110,7 +110,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : versionCollection.basicIntegrationTestVersions) { + for (final def version : bwcVersions.snapshotsWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 338b8728b6a82..011db854ecdc6 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -141,7 +141,6 @@ - do: tasks.get: - wait_for_completion: true task_id: $task_id - is_false: node_failures diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java index cbe798666129a..16dbbc6f8cbab 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java @@ -33,7 +33,7 @@ public class IngestDocumentMustacheIT extends AbstractScriptTestCase { public void testAccessMetaDataViaTemplate() { Map document = new HashMap<>(); document.put("foo", "bar"); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 bar")); @@ -48,7 +48,7 @@ public void testAccessMapMetaDataViaTemplate() { innerObject.put("baz", "hello baz"); innerObject.put("qux", Collections.singletonMap("fubar", "hello qux and fubar")); document.put("foo", innerObject); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 hello bar hello baz hello qux and fubar")); @@ -67,7 +67,7 @@ public void testAccessListMetaDataViaTemplate() { list.add(value); list.add(null); document.put("list2", list); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 foo {field=value}")); } @@ -77,7 +77,7 @@ public void testAccessIngestMetadataViaTemplate() { Map ingestMap = new HashMap<>(); ingestMap.put("timestamp", "bogus_timestamp"); document.put("_ingest", ingestMap); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); ingestDocument.setFieldValue(compile("ingest_timestamp"), ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", scriptService)); assertThat(ingestDocument.getFieldValue("ingest_timestamp", String.class), diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java index a8c7afcec6fee..a80b693851fc1 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java @@ -64,7 +64,7 @@ public void testValueSourceWithTemplates() { } public void testAccessSourceViaTemplate() { - IngestDocument ingestDocument = new IngestDocument("marvel", "type", "id", null, null, new HashMap<>()); + IngestDocument ingestDocument = new IngestDocument("marvel", "type", "id", null, null, null, null, new HashMap<>()); assertThat(ingestDocument.hasField("marvel"), is(false)); ingestDocument.setFieldValue(compile("{{_index}}"), ValueSource.wrap("{{_index}}", scriptService)); assertThat(ingestDocument.getFieldValue("marvel", String.class), equalTo("marvel")); diff --git a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats index 715d995845963..b456e1339de3c 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats @@ -139,6 +139,23 @@ setup() { stop_elasticsearch_service } +@test "[TAR] relative ES_PATH_CONF" { + local es_path_conf=$ES_PATH_CONF + local temp=`mktemp -d` + mkdir "$temp"/config + cp "$ESCONFIG"/elasticsearch.yml "$temp"/config + cp "$ESCONFIG"/log4j2.properties "$temp"/config + cp "$ESCONFIG/jvm.options" "$temp/config" + chown -R elasticsearch:elasticsearch "$temp" + echo "node.name: relative" >> "$temp"/config/elasticsearch.yml + cd "$temp" + export ES_PATH_CONF=config + start_elasticsearch_service + curl -s -XGET localhost:9200/_nodes | fgrep '"name":"relative"' + stop_elasticsearch_service + export ES_PATH_CONF=$es_path_conf +} + @test "[TAR] remove tar" { rm -rf "/tmp/elasticsearch" } diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 33d8afb09a041..2cb84528383b3 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -415,6 +415,51 @@ stop_elasticsearch_service() { fi } +# the default netcat packages in the distributions we test are not all compatible +# so we use /dev/tcp - a feature of bash which makes tcp connections +# http://tldp.org/LDP/abs/html/devref1.html#DEVTCP +test_port() { + local host="$1" + local port="$2" + cat < /dev/null > "/dev/tcp/$host/$port" +} + +describe_port() { + local host="$1" + local port="$2" + if test_port "$host" "$port"; then + echo "port $port on host $host is open" + else + echo "port $port on host $host is not open" + fi +} + +debug_collect_logs() { + local es_logfile="$ESLOG/elasticsearch.log" + local system_logfile='/var/log/messages' + + if [ -e "$es_logfile" ]; then + echo "Here's the elasticsearch log:" + cat "$es_logfile" + else + echo "The elasticsearch log doesn't exist at $es_logfile" + fi + + if [ -e "$system_logfile" ]; then + echo "Here's the tail of the log at $system_logfile:" + tail -n20 "$system_logfile" + else + echo "The logfile at $system_logfile doesn't exist" + fi + + echo "Current java processes:" + ps aux | grep java || true + + echo "Testing if ES ports are open:" + describe_port 127.0.0.1 9200 + describe_port 127.0.0.1 9201 +} + # Waits for Elasticsearch to reach some status. # $1 - expected status - defaults to green wait_for_elasticsearch_status() { @@ -422,15 +467,10 @@ wait_for_elasticsearch_status() { local index=$2 echo "Making sure elasticsearch is up..." - wget -O - --retry-connrefused --waitretry=1 --timeout=120 --tries 120 http://localhost:9200/_cluster/health || { - echo "Looks like elasticsearch never started. Here is its log:" - if [ -e "$ESLOG/elasticsearch.log" ]; then - cat "$ESLOG/elasticsearch.log" - else - echo "The elasticsearch log doesn't exist. Maybe /var/log/messages has something:" - tail -n20 /var/log/messages - fi - false + wget -O - --retry-connrefused --waitretry=1 --timeout=120 --tries=120 http://localhost:9200/_cluster/health || { + echo "Looks like elasticsearch never started" + debug_collect_logs + false } if [ -z "index" ]; then diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 1d31db6898b7b..2d2135c3a41d9 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -31,7 +31,7 @@ task bwcTest { group = 'verification' } -for (Version version : versionCollection.versionsIndexCompatibleWithCurrent) { +for (Version version : bwcVersions.indexCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { mustRunAfter(precommit) @@ -57,9 +57,10 @@ for (Version version : versionCollection.versionsIndexCompatibleWithCurrent) { test.enabled = false task integTest { - for (final def version : versionCollection.basicIntegrationTestVersions) { - dependsOn "v${version}#bwcTest" - } + if (project.bwc_tests_enabled) { + final def version = bwcVersions.snapshotsIndexCompatible.first() + dependsOn "v${version}#bwcTest" + } } task verifyDocsLuceneVersion { diff --git a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java index 3ab992aeec3e0..72d9d1b74b49d 100644 --- a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -90,6 +91,7 @@ public void testTransportClient() throws URISyntaxException, IOException { XContentParser parser = JsonXContent.jsonXContent.createParser( new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, response.getEntity().getContent())) { final Map map = parser.map(); assertThat(map.get("first_name"), equalTo("John")); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index 6e8be800a1b7b..aec3c41672ddc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -11,7 +11,7 @@ body: settings: number_of_shards: 1 - number_of_replicas: 1 + number_of_replicas: 0 index.sort.field: rank mappings: test: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index f5a9469f357fb..5c9ec3e597ad9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -15,7 +15,7 @@ setup: - do: indices.stats: metric: [ translog ] - - set: { indices.test.primaries.translog.size_in_bytes: empty_size } + - set: { indices.test.primaries.translog.size_in_bytes: creation_size } - do: index: @@ -27,9 +27,11 @@ setup: - do: indices.stats: metric: [ translog ] - - gt: { indices.test.primaries.translog.size_in_bytes: $empty_size } + - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.operations: 1 } - - gt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } +# we can't check this yet as creation size will contain two empty translog generations. A single +# non empty generation with one op may be smaller or larger than that. +# - gt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 1 } - do: @@ -39,9 +41,10 @@ setup: - do: indices.stats: metric: [ translog ] - - gt: { indices.test.primaries.translog.size_in_bytes: $empty_size } + - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.operations: 1 } - - match: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } + ## creation translog size has some overhead due to an initial empty generation that will be trimmed later + - lt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } - do: @@ -59,7 +62,25 @@ setup: - do: indices.stats: metric: [ translog ] - - match: { indices.test.primaries.translog.size_in_bytes: $empty_size } + ## creation translog size has some overhead due to an initial empty generation that will be trimmed later + - lte: { indices.test.primaries.translog.size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.operations: 0 } - - match: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } + - lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } + +--- +"Translog last modified age stats": + - skip: + version: " - 6.2.99" + reason: translog last modified age stats was added in 6.3.0 + - do: + index: + index: test + type: bar + id: 1 + body: { "foo": "bar" } + + - do: + indices.stats: + metric: [ translog ] + - gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml index 0a941f0eca542..4413a7a5c7db1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml @@ -65,21 +65,21 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 4 } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 2.5 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.1\.0: 1.0 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.75\.0: 113.5 } - - match: { aggregations.percentiles_double.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_double.values.99\.0: 149.5 } + - match: { aggregations.percentiles_double.values.75\.0: 126.0 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } - do: search: @@ -100,21 +100,21 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 4 } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 2.5 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.1\.0: 1.0 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.75\.0: 113.5 } - - match: { aggregations.percentiles_double.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_double.values.99\.0: 149.5 } + - match: { aggregations.percentiles_double.values.75\.0: 126.0 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } --- @@ -135,21 +135,21 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 0 } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 2.5 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.1\.0: 1.0 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.75\.0: 113.5 } - - match: { aggregations.percentiles_double.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_double.values.99\.0: 149.5 } + - match: { aggregations.percentiles_double.values.75\.0: 126.0 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } @@ -176,21 +176,21 @@ setup: - match: { hits.total: 3 } - length: { hits.hits: 3 } - - match: { aggregations.percentiles_int.values.1\.0: 52.0 } - - match: { aggregations.percentiles_int.values.5\.0: 56.0 } - - match: { aggregations.percentiles_int.values.25\.0: 76.0 } + - match: { aggregations.percentiles_int.values.1\.0: 51.0 } + - match: { aggregations.percentiles_int.values.5\.0: 51.0 } + - match: { aggregations.percentiles_int.values.25\.0: 63.5 } - match: { aggregations.percentiles_int.values.50\.0: 101.0 } - - match: { aggregations.percentiles_int.values.75\.0: 126.0 } - - match: { aggregations.percentiles_int.values.95\.0: 146.0 } - - match: { aggregations.percentiles_int.values.99\.0: 150.0 } + - match: { aggregations.percentiles_int.values.75\.0: 138.5 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 52.0 } - - match: { aggregations.percentiles_double.values.5\.0: 56.0 } - - match: { aggregations.percentiles_double.values.25\.0: 76.0 } + - match: { aggregations.percentiles_double.values.1\.0: 51.0 } + - match: { aggregations.percentiles_double.values.5\.0: 51.0 } + - match: { aggregations.percentiles_double.values.25\.0: 63.5 } - match: { aggregations.percentiles_double.values.50\.0: 101.0 } - - match: { aggregations.percentiles_double.values.75\.0: 126.0 } - - match: { aggregations.percentiles_double.values.95\.0: 146.0 } - - match: { aggregations.percentiles_double.values.99\.0: 150.0 } + - match: { aggregations.percentiles_double.values.75\.0: 138.5 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } --- "Missing field with missing param": @@ -248,13 +248,13 @@ setup: - match: { aggregations.percentiles_int.meta.foo: "bar" } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } --- "Invalid params test": @@ -329,12 +329,12 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 4 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } - - match: { aggregations.percentiles_int.values.50\.0: 76.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } + - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } --- @@ -355,9 +355,9 @@ setup: - length: { hits.hits: 4 } - match: { aggregations.percentiles_int.values.0.key: 5.0 } - - match: { aggregations.percentiles_int.values.0.value: 8.500000000000002 } + - match: { aggregations.percentiles_int.values.0.value: 1.0 } - match: { aggregations.percentiles_int.values.1.key: 25.0 } - - match: { aggregations.percentiles_int.values.1.value: 38.5 } + - match: { aggregations.percentiles_int.values.1.value: 26.0 } - match: { aggregations.percentiles_int.values.2.key: 50.0 } - match: { aggregations.percentiles_int.values.2.value: 76.0 } diff --git a/server/build.gradle b/server/build.gradle index 9ec3d73e3cc67..7b30f57d885e8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -99,7 +99,7 @@ dependencies { compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" // percentiles aggregation - compile 'com.tdunning:t-digest:3.0' + compile 'com.tdunning:t-digest:3.2' // precentil ranks aggregation compile 'org.hdrhistogram:HdrHistogram:2.1.9' diff --git a/server/licenses/t-digest-3.0.jar.sha1 b/server/licenses/t-digest-3.0.jar.sha1 deleted file mode 100644 index ce2f2e2f04098..0000000000000 --- a/server/licenses/t-digest-3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84ccf145ac2215e6bfa63baa3101c0af41017cfc \ No newline at end of file diff --git a/server/licenses/t-digest-3.2.jar.sha1 b/server/licenses/t-digest-3.2.jar.sha1 new file mode 100644 index 0000000000000..de6e848545f38 --- /dev/null +++ b/server/licenses/t-digest-3.2.jar.sha1 @@ -0,0 +1 @@ +2ab94758b0276a8a26102adf8d528cf6d0567b9a \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index e89e04a301da1..05ac4d942b35e 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -33,9 +33,15 @@ import java.io.PrintWriter; import java.io.StringWriter; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; +import java.util.Optional; +import java.util.Queue; import java.util.Set; +import java.util.stream.Collectors; public final class ExceptionsHelper { @@ -122,6 +128,46 @@ public static String stackTrace(Throwable e) { return stackTraceStringWriter.toString(); } + public static String formatStackTrace(final StackTraceElement[] stackTrace) { + return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); + } + + static final int MAX_ITERATIONS = 1024; + + /** + * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. + * + * @param cause the root throwable + * + * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable + */ + public static Optional maybeError(final Throwable cause, final Logger logger) { + // early terminate if the cause is already an error + if (cause instanceof Error) { + return Optional.of((Error) cause); + } + + final Queue queue = new LinkedList<>(); + queue.add(cause); + int iterations = 0; + while (!queue.isEmpty()) { + iterations++; + if (iterations > MAX_ITERATIONS) { + logger.warn("giving up looking for fatal errors", cause); + break; + } + final Throwable current = queue.remove(); + if (current instanceof Error) { + return Optional.of((Error) current); + } + Collections.addAll(queue, current.getSuppressed()); + if (current.getCause() != null) { + queue.add(current.getCause()); + } + } + return Optional.empty(); + } + /** * Rethrows the first exception in the list and adds all remaining to the suppressed list. * If the given list is empty no exception is thrown diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b6681ef003da8..4cc29cac62faa 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -113,6 +113,8 @@ public class Version implements Comparable { public static final Version V_5_6_7 = new Version(V_5_6_7_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_5_6_8_ID = 5060899; public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); + public static final int V_5_6_9_ID = 5060999; + public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -137,9 +139,6 @@ public class Version implements Comparable { public static final int V_6_0_1_ID = 6000199; public static final Version V_6_0_1 = new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); - public static final int V_6_0_2_ID = 6000299; - public static final Version V_6_0_2 = - new Version(V_6_0_2_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); public static final int V_6_1_0_ID = 6010099; public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_1_ID = 6010199; @@ -148,12 +147,14 @@ public class Version implements Comparable { public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_3_ID = 6010399; public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_4_ID = 6010499; - public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_6_2_1_ID = 6020199; public static final Version V_6_2_1 = new Version(V_6_2_1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final int V_6_2_2_ID = 6020299; + public static final Version V_6_2_2 = new Version(V_6_2_2_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final int V_6_2_3_ID = 6020399; + public static final Version V_6_2_3 = new Version(V_6_2_3_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; @@ -176,12 +177,14 @@ public static Version fromId(int id) { return V_7_0_0_alpha1; case V_6_3_0_ID: return V_6_3_0; + case V_6_2_3_ID: + return V_6_2_3; + case V_6_2_2_ID: + return V_6_2_2; case V_6_2_1_ID: return V_6_2_1; case V_6_2_0_ID: return V_6_2_0; - case V_6_1_4_ID: - return V_6_1_4; case V_6_1_3_ID: return V_6_1_3; case V_6_1_2_ID: @@ -190,8 +193,6 @@ public static Version fromId(int id) { return V_6_1_1; case V_6_1_0_ID: return V_6_1_0; - case V_6_0_2_ID: - return V_6_0_2; case V_6_0_1_ID: return V_6_0_1; case V_6_0_0_ID: @@ -208,6 +209,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_9_ID: + return V_5_6_9; case V_5_6_8_ID: return V_5_6_8; case V_5_6_7_ID: @@ -407,6 +410,15 @@ public int compareTo(Version other) { return Integer.compare(this.id, other.id); } + /* + * We need the declared versions when computing the minimum compatibility version. As computing the declared versions uses reflection it + * is not cheap. Since computing the minimum compatibility version can occur often, we use this holder to compute the declared versions + * lazily once. + */ + private static class DeclaredVersionsHolder { + static final List DECLARED_VERSIONS = Collections.unmodifiableList(getDeclaredVersions(Version.class)); + } + /** * Returns the minimum compatible version based on the current * version. Ie a node needs to have at least the return version in order @@ -417,10 +429,10 @@ public int compareTo(Version other) { public Version minimumCompatibilityVersion() { if (major >= 6) { // all major versions from 6 onwards are compatible with last minor series of the previous major - final List declaredVersions = getDeclaredVersions(getClass()); Version bwcVersion = null; - for (int i = declaredVersions.size() - 1; i >= 0; i--) { - final Version candidateVersion = declaredVersions.get(i); + + for (int i = DeclaredVersionsHolder.DECLARED_VERSIONS.size() - 1; i >= 0; i--) { + final Version candidateVersion = DeclaredVersionsHolder.DECLARED_VERSIONS.get(i); if (candidateVersion.major == major - 1 && candidateVersion.isRelease() && after(candidateVersion)) { if (bwcVersion != null && candidateVersion.minor < bwcVersion.minor) { break; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index d4005c84d0992..113766f3e9f21 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -138,8 +138,14 @@ public NodesStatsRequestBuilder setDiscovery(boolean discovery) { /** * Should ingest statistics be returned. */ - public NodesStatsRequestBuilder ingest(boolean ingest) { + public NodesStatsRequestBuilder setIngest(boolean ingest) { request.ingest(ingest); return this; } + + public NodesStatsRequestBuilder setAdaptiveSelection(boolean adaptiveSelection) { + request.adaptiveSelection(adaptiveSelection); + return this; + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 30d71a992fd95..927ac2a9148ed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -248,7 +249,8 @@ void onGetFinishedTaskFromIndex(GetResponse response, ActionListener { +public class ClusterUpdateSettingsRequest extends AcknowledgedRequest implements ToXContentObject { + + private static final ParseField PERSISTENT = new ParseField("persistent"); + private static final ParseField TRANSIENT = new ParseField("transient"); + + private static final ObjectParser PARSER = new ObjectParser<>("cluster_update_settings_request", + false, ClusterUpdateSettingsRequest::new); + + static { + PARSER.declareObject((r, p) -> r.persistentSettings = p, (p, c) -> Settings.fromXContent(p), PERSISTENT); + PARSER.declareObject((r, t) -> r.transientSettings = t, (p, c) -> Settings.fromXContent(p), TRANSIENT); + } + private boolean flatSettings = false; private Settings transientSettings = EMPTY_SETTINGS; private Settings persistentSettings = EMPTY_SETTINGS; @@ -57,6 +73,29 @@ public ActionRequestValidationException validate() { return validationException; } + /** + * Sets the value of "flat_settings". + * Used only by the high-level REST client. + * + * @param flatSettings + * value of "flat_settings" flag to be set + * @return this request + */ + public ClusterUpdateSettingsRequest flatSettings(boolean flatSettings) { + this.flatSettings = flatSettings; + return this; + } + + /** + * Return settings in flat format. + * Used only by the high-level REST client. + * + * @return true if settings need to be returned in flat format; false otherwise. + */ + public boolean flatSettings() { + return flatSettings; + } + public Settings transientSettings() { return transientSettings; } @@ -92,7 +131,7 @@ public ClusterUpdateSettingsRequest transientSettings(String source, XContentTyp /** * Sets the transient settings to be updated. They will not survive a full cluster restart */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) public ClusterUpdateSettingsRequest transientSettings(Map source) { try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); @@ -131,7 +170,7 @@ public ClusterUpdateSettingsRequest persistentSettings(String source, XContentTy /** * Sets the persistent settings to be updated. They will get applied cross restarts */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) public ClusterUpdateSettingsRequest persistentSettings(Map source) { try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); @@ -156,4 +195,21 @@ public void writeTo(StreamOutput out) throws IOException { writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(PERSISTENT.getPreferredName()); + persistentSettings.toXContent(builder, params); + builder.endObject(); + builder.startObject(TRANSIENT.getPreferredName()); + transientSettings.toXContent(builder, params); + builder.endObject(); + builder.endObject(); + return builder; + } + + public static ClusterUpdateSettingsRequest fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index b783d314c47ca..9ce22268afd8d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -20,17 +20,33 @@ package org.elasticsearch.action.admin.cluster.settings; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; /** * A response for a cluster update settings action. */ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { + private static final ParseField PERSISTENT = new ParseField("persistent"); + private static final ParseField TRANSIENT = new ParseField("transient"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_update_settings_response", true, a -> new ClusterUpdateSettingsResponse((boolean) a[0])); + static { + declareAcknowledgedField(PARSER); + PARSER.declareObject((r, p) -> r.persistentSettings = p, (p, c) -> Settings.fromXContent(p), PERSISTENT); + PARSER.declareObject((r, t) -> r.transientSettings = t, (p, c) -> Settings.fromXContent(p), TRANSIENT); + } + Settings transientSettings; Settings persistentSettings; @@ -39,6 +55,10 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { this.transientSettings = Settings.EMPTY; } + ClusterUpdateSettingsResponse(boolean acknowledged) { + super(acknowledged); + } + ClusterUpdateSettingsResponse(boolean acknowledged, Settings transientSettings, Settings persistentSettings) { super(acknowledged); this.persistentSettings = persistentSettings; @@ -68,4 +88,33 @@ public void writeTo(StreamOutput out) throws IOException { Settings.writeSettingsToStream(persistentSettings, out); writeAcknowledged(out); } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PERSISTENT.getPreferredName()); + persistentSettings.toXContent(builder, params); + builder.endObject(); + builder.startObject(TRANSIENT.getPreferredName()); + transientSettings.toXContent(builder, params); + builder.endObject(); + } + + public static ClusterUpdateSettingsResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + ClusterUpdateSettingsResponse that = (ClusterUpdateSettingsResponse) o; + return Objects.equals(transientSettings, that.transientSettings) && + Objects.equals(persistentSettings, that.persistentSettings); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), transientSettings, persistentSettings); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java index c3cc0b5ebd40b..b6402d5139aad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java @@ -23,8 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -32,7 +30,7 @@ /** * A response for a add/remove alias action. */ -public class IndicesAliasesResponse extends AcknowledgedResponse implements ToXContentObject { +public class IndicesAliasesResponse extends AcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("indices_aliases", true, args -> new IndicesAliasesResponse((boolean) args[0])); @@ -59,15 +57,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - addAcknowledgedField(builder); - builder.endObject(); - return builder; - } - - public static IndicesAliasesResponse fromXContent(XContentParser parser) throws IOException { + public static IndicesAliasesResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index 4607586d9fa91..de56c52f9f6de 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -23,8 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -32,7 +30,7 @@ /** * A response for a close index action. */ -public class CloseIndexResponse extends AcknowledgedResponse implements ToXContentObject { +public class CloseIndexResponse extends AcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("close_index", true, args -> new CloseIndexResponse((boolean) args[0])); @@ -59,15 +57,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - addAcknowledgedField(builder); - builder.endObject(); - return builder; - } - - public static CloseIndexResponse fromXContent(XContentParser parser) throws IOException { + public static CloseIndexResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 93d3530e53680..e22a8be968ed1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -37,6 +37,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -54,9 +56,9 @@ import java.util.Set; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}. @@ -69,7 +71,7 @@ */ public class CreateIndexRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { - private static final ParseField MAPPINGS = new ParseField("mappings"); + public static final ParseField MAPPINGS = new ParseField("mappings"); public static final ParseField SETTINGS = new ParseField("settings"); public static final ParseField ALIASES = new ParseField("aliases"); @@ -316,7 +318,8 @@ public CreateIndexRequest aliases(String source) { */ public CreateIndexRequest aliases(BytesReference source) { // EMPTY is safe here because we never call namedObject - try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, source)) { + try (XContentParser parser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, source)) { //move to the first alias parser.nextToken(); while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -369,7 +372,7 @@ public CreateIndexRequest source(byte[] source, int offset, int length, XContent */ public CreateIndexRequest source(BytesReference source, XContentType xContentType) { Objects.requireNonNull(xContentType); - source(XContentHelper.convertToMap(source, false, xContentType).v2()); + source(XContentHelper.convertToMap(source, false, xContentType).v2(), LoggingDeprecationHandler.INSTANCE); return this; } @@ -377,17 +380,17 @@ public CreateIndexRequest source(BytesReference source, XContentType xContentTyp * Sets the settings and mappings as a single source. */ @SuppressWarnings("unchecked") - public CreateIndexRequest source(Map source) { + public CreateIndexRequest source(Map source, DeprecationHandler deprecationHandler) { for (Map.Entry entry : source.entrySet()) { String name = entry.getKey(); - if (SETTINGS.match(name)) { + if (SETTINGS.match(name, deprecationHandler)) { settings((Map) entry.getValue()); - } else if (MAPPINGS.match(name)) { + } else if (MAPPINGS.match(name, deprecationHandler)) { Map mappings = (Map) entry.getValue(); for (Map.Entry entry1 : mappings.entrySet()) { mapping(entry1.getKey(), (Map) entry1.getValue()); } - } else if (ALIASES.match(name)) { + } else if (ALIASES.match(name, deprecationHandler)) { aliases((Map) entry.getValue()); } else { // maybe custom? @@ -521,7 +524,12 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + innerToXContent(builder, params); + builder.endObject(); + return builder; + } + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(SETTINGS.getPreferredName()); settings.toXContent(builder, params); builder.endObject(); @@ -541,8 +549,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (Map.Entry entry : customs.entrySet()) { builder.field(entry.getKey(), entry.getValue(), params); } - - builder.endObject(); return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index b42b4e9236f0e..bc5bbf9046a88 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -219,7 +220,7 @@ public CreateIndexRequestBuilder setSource(byte[] source, int offset, int length * Sets the settings and mappings as a single source. */ public CreateIndexRequestBuilder setSource(Map source) { - request.source(source); + request.source(source, LoggingDeprecationHandler.INSTANCE); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index be30a8c97b189..edc0852cb2882 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -20,26 +20,25 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.Version; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * A response for a create index action. */ -public class CreateIndexResponse extends AcknowledgedResponse implements ToXContentObject { +public class CreateIndexResponse extends ShardsAcknowledgedResponse { - private static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged"); private static final ParseField INDEX = new ParseField("index"); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("create_index", @@ -50,22 +49,17 @@ public class CreateIndexResponse extends AcknowledgedResponse implements ToXCont } protected static void declareFields(ConstructingObjectParser objectParser) { - declareAcknowledgedField(objectParser); - objectParser.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED, - ObjectParser.ValueType.BOOLEAN); - objectParser.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX, ObjectParser.ValueType.STRING); + declareAcknowledgedAndShardsAcknowledgedFields(objectParser); + objectParser.declareField(constructorArg(), (parser, context) -> parser.textOrNull(), INDEX, ObjectParser.ValueType.STRING_OR_NULL); } - private boolean shardsAcknowledged; private String index; protected CreateIndexResponse() { } protected CreateIndexResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { - super(acknowledged); - assert acknowledged || shardsAcknowledged == false; // if its not acknowledged, then shardsAcknowledged should be false too - this.shardsAcknowledged = shardsAcknowledged; + super(acknowledged, shardsAcknowledged); this.index = index; } @@ -73,7 +67,7 @@ protected CreateIndexResponse(boolean acknowledged, boolean shardsAcknowledged, public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readAcknowledged(in); - shardsAcknowledged = in.readBoolean(); + readShardsAcknowledged(in); if (in.getVersion().onOrAfter(Version.V_5_6_0)) { index = in.readString(); } @@ -83,40 +77,37 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); - out.writeBoolean(shardsAcknowledged); + writeShardsAcknowledged(out); if (out.getVersion().onOrAfter(Version.V_5_6_0)) { out.writeString(index); } } - /** - * Returns true if the requisite number of shards were started before - * returning from the index creation operation. If {@link #isAcknowledged()} - * is false, then this also returns false. - */ - public boolean isShardsAcknowledged() { - return shardsAcknowledged; - } - public String index() { return index; } - public void addCustomFields(XContentBuilder builder) throws IOException { - builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcknowledged()); + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + super.addCustomFields(builder, params); builder.field(INDEX.getPreferredName(), index()); } + public static CreateIndexResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - addAcknowledgedField(builder); - addCustomFields(builder); - builder.endObject(); - return builder; + public boolean equals(Object o) { + if (super.equals(o)) { + CreateIndexResponse that = (CreateIndexResponse) o; + return Objects.equals(index, that.index); + } + return false; } - public static CreateIndexResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), index); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java index 8217668e2177d..a788f272d1877 100755 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -32,7 +31,7 @@ /** * A response for a delete index action. */ -public class DeleteIndexResponse extends AcknowledgedResponse implements ToXContentObject { +public class DeleteIndexResponse extends AcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("delete_index", true, args -> new DeleteIndexResponse((boolean) args[0])); @@ -60,15 +59,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - addAcknowledgedField(builder); - builder.endObject(); - return builder; - } - - public static DeleteIndexResponse fromXContent(XContentParser parser) throws IOException { + public static DeleteIndexResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 477656d96cb58..2a70aa836454e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -26,8 +26,6 @@ import org.elasticsearch.common.util.ArrayUtils; import java.io.IOException; -import java.util.Arrays; -import java.util.List; /** * A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}. @@ -122,6 +120,8 @@ public boolean humanReadable() { /** * Sets the value of "flat_settings". + * Used only by the high-level REST client. + * * @param flatSettings value of "flat_settings" flag to be set * @return this request */ @@ -132,6 +132,8 @@ public GetIndexRequest flatSettings(boolean flatSettings) { /** * Return settings in flat format. + * Used only by the high-level REST client. + * * @return true if settings need to be returned in flat format; false otherwise. */ public boolean flatSettings() { @@ -140,6 +142,8 @@ public boolean flatSettings() { /** * Sets the value of "include_defaults". + * Used only by the high-level REST client. + * * @param includeDefaults value of "include_defaults" to be set. * @return this request */ @@ -150,6 +154,8 @@ public GetIndexRequest includeDefaults(boolean includeDefaults) { /** * Whether to return all default settings for each of the indices. + * Used only by the high-level REST client. + * * @return true if defaults settings for each of the indices need to returned; * false otherwise. */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java index f427a316c2e81..8ccc5c8006a18 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java @@ -23,8 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -32,7 +30,7 @@ /** * The response of put mapping operation. */ -public class PutMappingResponse extends AcknowledgedResponse implements ToXContentObject { +public class PutMappingResponse extends AcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_mapping", true, args -> new PutMappingResponse((boolean) args[0])); @@ -61,15 +59,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - addAcknowledgedField(builder); - builder.endObject(); - return builder; - } - - public static PutMappingResponse fromXContent(XContentParser parser) throws IOException { + public static PutMappingResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java index 4e98c60265c76..3918273cec90d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java @@ -20,54 +20,33 @@ package org.elasticsearch.action.admin.indices.open; import org.elasticsearch.Version; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - /** * A response for a open index action. */ -public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject { - private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged"; - private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED); +public class OpenIndexResponse extends ShardsAcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("open_index", true, args -> new OpenIndexResponse((boolean) args[0], (boolean) args[1])); static { - declareAcknowledgedField(PARSER); - PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED_PARSER, - ObjectParser.ValueType.BOOLEAN); + declareAcknowledgedAndShardsAcknowledgedFields(PARSER); } - private boolean shardsAcknowledged; - OpenIndexResponse() { } OpenIndexResponse(boolean acknowledged, boolean shardsAcknowledged) { - super(acknowledged); - assert acknowledged || shardsAcknowledged == false; // if its not acknowledged, then shards acked should be false too - this.shardsAcknowledged = shardsAcknowledged; - } - - /** - * Returns true if the requisite number of shards were started before - * returning from the indices opening operation. If {@link #isAcknowledged()} - * is false, then this also returns false. - */ - public boolean isShardsAcknowledged() { - return shardsAcknowledged; + super(acknowledged, shardsAcknowledged); } @Override @@ -75,7 +54,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readAcknowledged(in); if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - shardsAcknowledged = in.readBoolean(); + readShardsAcknowledged(in); } } @@ -84,20 +63,11 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(shardsAcknowledged); + writeShardsAcknowledged(out); } } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - addAcknowledgedField(builder); - builder.field(SHARDS_ACKNOWLEDGED, isShardsAcknowledged()); - builder.endObject(); - return builder; - } - - public static OpenIndexResponse fromXContent(XContentParser parser) throws IOException { + public static OpenIndexResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index 83dc73f9e94b3..afbc9a554ed5e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -20,30 +20,16 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.Version; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; -import java.util.Set; +import java.util.Objects; /** * Base class for rollover request conditions */ -public abstract class Condition implements NamedWriteable { - - public static ObjectParser, Void> PARSER = new ObjectParser<>("conditions", null); - static { - PARSER.declareString((conditions, s) -> - conditions.add(new MaxAgeCondition(TimeValue.parseTimeValue(s, MaxAgeCondition.NAME))), - new ParseField(MaxAgeCondition.NAME)); - PARSER.declareLong((conditions, value) -> - conditions.add(new MaxDocsCondition(value)), new ParseField(MaxDocsCondition.NAME)); - PARSER.declareString((conditions, s) -> - conditions.add(new MaxSizeCondition(ByteSizeValue.parseBytesSizeValue(s, MaxSizeCondition.NAME))), - new ParseField(MaxSizeCondition.NAME)); - } +public abstract class Condition implements NamedWriteable, ToXContentFragment { protected T value; protected final String name; @@ -62,6 +48,24 @@ boolean includedInVersion(Version version) { return true; } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Condition condition = (Condition) o; + return Objects.equals(value, condition.value) && + Objects.equals(name, condition.name); + } + + @Override + public int hashCode() { + return Objects.hash(value, name); + } + @Override public final String toString() { return "[" + name + ": " + value + "]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java index 9208193c9849e..c0b0d2a3297da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -55,6 +56,12 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { + //TODO here we should just use TimeValue#writeTo and same for de-serialization in the constructor, we lose information this way out.writeLong(value.getMillis()); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(NAME, value.getStringRep()); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java index 7c1f802389e54..8fddb870e59e9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -55,4 +56,9 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeLong(value); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(NAME, value); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java index 91b18bc050623..bb6f37634ce87 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -61,6 +62,12 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { + //TODO here we should just use ByteSizeValue#writeTo and same for de-serialization in the constructor out.writeVLong(value.getBytes()); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(NAME, value.getStringRep()); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 34d56239b5ce8..fe5ad65c4799b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.ParseField; @@ -30,40 +29,57 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import java.util.HashSet; +import java.util.HashMap; import java.util.Map; -import java.util.Objects; -import java.util.Set; import static org.elasticsearch.action.ValidateActions.addValidationError; /** * Request class to swap index under an alias upon satisfying conditions */ -public class RolloverRequest extends AcknowledgedRequest implements IndicesRequest { +public class RolloverRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { + + private static final ObjectParser PARSER = new ObjectParser<>("rollover"); + private static final ObjectParser, Void> CONDITION_PARSER = new ObjectParser<>("conditions"); + + private static final ParseField CONDITIONS = new ParseField("conditions"); + private static final ParseField MAX_AGE_CONDITION = new ParseField(MaxAgeCondition.NAME); + private static final ParseField MAX_DOCS_CONDITION = new ParseField(MaxDocsCondition.NAME); + private static final ParseField MAX_SIZE_CONDITION = new ParseField(MaxSizeCondition.NAME); - public static final ObjectParser PARSER = new ObjectParser<>("conditions", null); static { - PARSER.declareField((parser, request, context) -> Condition.PARSER.parse(parser, request.conditions, null), - new ParseField("conditions"), ObjectParser.ValueType.OBJECT); + CONDITION_PARSER.declareString((conditions, s) -> + conditions.put(MaxAgeCondition.NAME, new MaxAgeCondition(TimeValue.parseTimeValue(s, MaxAgeCondition.NAME))), + MAX_AGE_CONDITION); + CONDITION_PARSER.declareLong((conditions, value) -> + conditions.put(MaxDocsCondition.NAME, new MaxDocsCondition(value)), MAX_DOCS_CONDITION); + CONDITION_PARSER.declareString((conditions, s) -> + conditions.put(MaxSizeCondition.NAME, new MaxSizeCondition(ByteSizeValue.parseBytesSizeValue(s, MaxSizeCondition.NAME))), + MAX_SIZE_CONDITION); + + PARSER.declareField((parser, request, context) -> CONDITION_PARSER.parse(parser, request.conditions, null), + CONDITIONS, ObjectParser.ValueType.OBJECT); PARSER.declareField((parser, request, context) -> request.createIndexRequest.settings(parser.map()), - new ParseField("settings"), ObjectParser.ValueType.OBJECT); + CreateIndexRequest.SETTINGS, ObjectParser.ValueType.OBJECT); PARSER.declareField((parser, request, context) -> { for (Map.Entry mappingsEntry : parser.map().entrySet()) { - request.createIndexRequest.mapping(mappingsEntry.getKey(), - (Map) mappingsEntry.getValue()); + request.createIndexRequest.mapping(mappingsEntry.getKey(), (Map) mappingsEntry.getValue()); } - }, new ParseField("mappings"), ObjectParser.ValueType.OBJECT); + }, CreateIndexRequest.MAPPINGS, ObjectParser.ValueType.OBJECT); PARSER.declareField((parser, request, context) -> request.createIndexRequest.aliases(parser.map()), - new ParseField("aliases"), ObjectParser.ValueType.OBJECT); + CreateIndexRequest.ALIASES, ObjectParser.ValueType.OBJECT); } private String alias; private String newIndexName; private boolean dryRun; - private Set conditions = new HashSet<>(2); + private Map conditions = new HashMap<>(2); + //the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); RolloverRequest() {} @@ -75,13 +91,10 @@ public RolloverRequest(String alias, String newIndexName) { @Override public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = createIndexRequest == null ? null : createIndexRequest.validate(); + ActionRequestValidationException validationException = createIndexRequest.validate(); if (alias == null) { validationException = addValidationError("index alias is missing", validationException); } - if (createIndexRequest == null) { - validationException = addValidationError("create index request is missing", validationException); - } return validationException; } @@ -93,7 +106,8 @@ public void readFrom(StreamInput in) throws IOException { dryRun = in.readBoolean(); int size = in.readVInt(); for (int i = 0; i < size; i++) { - this.conditions.add(in.readNamedWriteable(Condition.class)); + Condition condition = in.readNamedWriteable(Condition.class); + this.conditions.put(condition.name, condition); } createIndexRequest = new CreateIndexRequest(); createIndexRequest.readFrom(in); @@ -106,7 +120,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(newIndexName); out.writeBoolean(dryRun); out.writeVInt(conditions.size()); - for (Condition condition : conditions) { + for (Condition condition : conditions.values()) { if (condition.includedInVersion(out.getVersion())) { out.writeNamedWriteable(condition); } @@ -148,76 +162,75 @@ public void dryRun(boolean dryRun) { * Adds condition to check if the index is at least age old */ public void addMaxIndexAgeCondition(TimeValue age) { - this.conditions.add(new MaxAgeCondition(age)); + MaxAgeCondition maxAgeCondition = new MaxAgeCondition(age); + if (this.conditions.containsKey(maxAgeCondition.name)) { + throw new IllegalArgumentException(maxAgeCondition.name + " condition is already set"); + } + this.conditions.put(maxAgeCondition.name, maxAgeCondition); } /** * Adds condition to check if the index has at least numDocs */ public void addMaxIndexDocsCondition(long numDocs) { - this.conditions.add(new MaxDocsCondition(numDocs)); + MaxDocsCondition maxDocsCondition = new MaxDocsCondition(numDocs); + if (this.conditions.containsKey(maxDocsCondition.name)) { + throw new IllegalArgumentException(maxDocsCondition.name + " condition is already set"); + } + this.conditions.put(maxDocsCondition.name, maxDocsCondition); } /** * Adds a size-based condition to check if the index size is at least size. */ public void addMaxIndexSizeCondition(ByteSizeValue size) { - this.conditions.add(new MaxSizeCondition(size)); + MaxSizeCondition maxSizeCondition = new MaxSizeCondition(size); + if (this.conditions.containsKey(maxSizeCondition.name)) { + throw new IllegalArgumentException(maxSizeCondition + " condition is already set"); + } + this.conditions.put(maxSizeCondition.name, maxSizeCondition); } - /** - * Sets rollover index creation request to override index settings when - * the rolled over index has to be created - */ - public void setCreateIndexRequest(CreateIndexRequest createIndexRequest) { - this.createIndexRequest = Objects.requireNonNull(createIndexRequest, "create index request must not be null");; - } - boolean isDryRun() { + public boolean isDryRun() { return dryRun; } - Set getConditions() { + Map getConditions() { return conditions; } - String getAlias() { + public String getAlias() { return alias; } - String getNewIndexName() { + public String getNewIndexName() { return newIndexName; } - CreateIndexRequest getCreateIndexRequest() { - return createIndexRequest; - } - /** - * Sets the number of shard copies that should be active for creation of the - * new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will - * wait for one shard copy (the primary) to become active. Set this value to - * {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active - * before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any - * non-negative integer, up to the number of copies per shard (number of replicas + 1), - * to wait for the desired amount of shard copies to become active before returning. - * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link RolloverResponse#isShardsAcknowledged()} to - * determine if the requisite shard copies were all started before returning or timing out. - * - * @param waitForActiveShards number of active shard copies to wait on + * Returns the inner {@link CreateIndexRequest}. Allows to configure mappings, settings and aliases for the new index. */ - public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) { - this.createIndexRequest.waitForActiveShards(waitForActiveShards); + public CreateIndexRequest getCreateIndexRequest() { + return createIndexRequest; } - /** - * A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical - * shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)} - * to get the ActiveShardCount. - */ - public void setWaitForActiveShards(final int waitForActiveShards) { - setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards)); + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + createIndexRequest.innerToXContent(builder, params); + + builder.startObject(CONDITIONS.getPreferredName()); + for (Condition condition : conditions.values()) { + condition.toXContent(builder, params); + } + builder.endObject(); + + builder.endObject(); + return builder; } + public void fromXContent(XContentParser parser) throws IOException { + PARSER.parse(parser, this, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java index 818def9d19a09..be331547b2a20 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -93,7 +93,7 @@ public RolloverRequestBuilder mapping(String type, String source) { * @param waitForActiveShards number of active shard copies to wait on */ public RolloverRequestBuilder waitForActiveShards(ActiveShardCount waitForActiveShards) { - this.request.setWaitForActiveShards(waitForActiveShards); + this.request.getCreateIndexRequest().waitForActiveShards(waitForActiveShards); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 7e2ec1677740d..d3ba00cdfcf06 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -19,51 +19,62 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import java.util.AbstractMap; -import java.util.HashSet; +import java.util.HashMap; import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -public final class RolloverResponse extends ActionResponse implements ToXContentObject { - - private static final String NEW_INDEX = "new_index"; - private static final String OLD_INDEX = "old_index"; - private static final String DRY_RUN = "dry_run"; - private static final String ROLLED_OVER = "rolled_over"; - private static final String CONDITIONS = "conditions"; - private static final String ACKNOWLEDGED = "acknowledged"; - private static final String SHARDS_ACKED = "shards_acknowledged"; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public final class RolloverResponse extends ShardsAcknowledgedResponse implements ToXContentObject { + + private static final ParseField NEW_INDEX = new ParseField("new_index"); + private static final ParseField OLD_INDEX = new ParseField("old_index"); + private static final ParseField DRY_RUN = new ParseField("dry_run"); + private static final ParseField ROLLED_OVER = new ParseField("rolled_over"); + private static final ParseField CONDITIONS = new ParseField("conditions"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("rollover", + true, args -> new RolloverResponse((String) args[0], (String) args[1], (Map) args[2], + (Boolean)args[3], (Boolean)args[4], (Boolean) args[5], (Boolean) args[6])); + + static { + PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), OLD_INDEX, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), NEW_INDEX, ObjectParser.ValueType.STRING); + PARSER.declareObject(constructorArg(), (parser, context) -> parser.map(), CONDITIONS); + PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), DRY_RUN, ObjectParser.ValueType.BOOLEAN); + PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ROLLED_OVER, ObjectParser.ValueType.BOOLEAN); + declareAcknowledgedAndShardsAcknowledgedFields(PARSER); + } private String oldIndex; private String newIndex; - private Set> conditionStatus; + private Map conditionStatus; private boolean dryRun; private boolean rolledOver; - private boolean acknowledged; - private boolean shardsAcknowledged; RolloverResponse() { } - RolloverResponse(String oldIndex, String newIndex, Set conditionResults, - boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { + RolloverResponse(String oldIndex, String newIndex, Map conditionResults, + boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { + super(acknowledged, shardsAcknowledged); this.oldIndex = oldIndex; this.newIndex = newIndex; this.dryRun = dryRun; this.rolledOver = rolledOver; - this.acknowledged = acknowledged; - this.shardsAcknowledged = shardsAcknowledged; - this.conditionStatus = conditionResults.stream() - .map(result -> new AbstractMap.SimpleEntry<>(result.condition.toString(), result.matched)) - .collect(Collectors.toSet()); + this.conditionStatus = conditionResults; } /** @@ -83,7 +94,7 @@ public String getNewIndex() { /** * Returns the statuses of all the request conditions */ - public Set> getConditionStatus() { + public Map getConditionStatus() { return conditionStatus; } @@ -101,42 +112,20 @@ public boolean isRolledOver() { return rolledOver; } - /** - * Returns true if the creation of the new rollover index and switching of the - * alias to the newly created index was successful, and returns false otherwise. - * If {@link #isDryRun()} is true, then this will also return false. If this - * returns false, then {@link #isShardsAcknowledged()} will also return false. - */ - public boolean isAcknowledged() { - return acknowledged; - } - - /** - * Returns true if the requisite number of shards were started in the newly - * created rollover index before returning. If {@link #isAcknowledged()} is - * false, then this will also return false. - */ - public boolean isShardsAcknowledged() { - return shardsAcknowledged; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); oldIndex = in.readString(); newIndex = in.readString(); int conditionSize = in.readVInt(); - Set> conditions = new HashSet<>(conditionSize); + conditionStatus = new HashMap<>(conditionSize); for (int i = 0; i < conditionSize; i++) { - String condition = in.readString(); - boolean satisfied = in.readBoolean(); - conditions.add(new AbstractMap.SimpleEntry<>(condition, satisfied)); + conditionStatus.put(in.readString(), in.readBoolean()); } - conditionStatus = conditions; dryRun = in.readBoolean(); rolledOver = in.readBoolean(); - acknowledged = in.readBoolean(); - shardsAcknowledged = in.readBoolean(); + readAcknowledged(in); + readShardsAcknowledged(in); } @Override @@ -145,31 +134,49 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(oldIndex); out.writeString(newIndex); out.writeVInt(conditionStatus.size()); - for (Map.Entry entry : conditionStatus) { + for (Map.Entry entry : conditionStatus.entrySet()) { out.writeString(entry.getKey()); out.writeBoolean(entry.getValue()); } out.writeBoolean(dryRun); out.writeBoolean(rolledOver); - out.writeBoolean(acknowledged); - out.writeBoolean(shardsAcknowledged); + writeAcknowledged(out); + writeShardsAcknowledged(out); } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(OLD_INDEX, oldIndex); - builder.field(NEW_INDEX, newIndex); - builder.field(ROLLED_OVER, rolledOver); - builder.field(DRY_RUN, dryRun); - builder.field(ACKNOWLEDGED, acknowledged); - builder.field(SHARDS_ACKED, shardsAcknowledged); - builder.startObject(CONDITIONS); - for (Map.Entry entry : conditionStatus) { + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + super.addCustomFields(builder, params); + builder.field(OLD_INDEX.getPreferredName(), oldIndex); + builder.field(NEW_INDEX.getPreferredName(), newIndex); + builder.field(ROLLED_OVER.getPreferredName(), rolledOver); + builder.field(DRY_RUN.getPreferredName(), dryRun); + builder.startObject(CONDITIONS.getPreferredName()); + for (Map.Entry entry : conditionStatus.entrySet()) { builder.field(entry.getKey(), entry.getValue()); } builder.endObject(); - builder.endObject(); - return builder; + } + + public static RolloverResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + RolloverResponse that = (RolloverResponse) o; + return dryRun == that.dryRun && + rolledOver == that.rolledOver && + Objects.equals(oldIndex, that.oldIndex) && + Objects.equals(newIndex, that.newIndex) && + Objects.equals(conditionStatus, that.conditionStatus); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldIndex, newIndex, conditionStatus, dryRun, rolledOver); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index ded01077da2af..a5385c42aa0af 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -51,9 +51,10 @@ import org.elasticsearch.transport.TransportService; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Locale; -import java.util.Set; +import java.util.Map; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -122,7 +123,7 @@ protected void masterOperation(final RolloverRequest rolloverRequest, final Clus new ActionListener() { @Override public void onResponse(IndicesStatsResponse statsResponse) { - final Set conditionResults = evaluateConditions(rolloverRequest.getConditions(), + final Map conditionResults = evaluateConditions(rolloverRequest.getConditions().values(), metaData.index(sourceIndexName), statsResponse); if (rolloverRequest.isDryRun()) { @@ -130,7 +131,7 @@ public void onResponse(IndicesStatsResponse statsResponse) { new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false)); return; } - if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) { + if (conditionResults.size() == 0 || conditionResults.values().stream().anyMatch(result -> result)) { CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(unresolvedName, rolloverIndexName, rolloverRequest); createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> { @@ -197,17 +198,17 @@ static String generateRolloverIndexName(String sourceIndexName, IndexNameExpress } } - static Set evaluateConditions(final Set conditions, - final DocsStats docsStats, final IndexMetaData metaData) { + static Map evaluateConditions(final Collection conditions, + final DocsStats docsStats, final IndexMetaData metaData) { final long numDocs = docsStats == null ? 0 : docsStats.getCount(); final long indexSize = docsStats == null ? 0 : docsStats.getTotalSizeInBytes(); final Condition.Stats stats = new Condition.Stats(numDocs, metaData.getCreationDate(), new ByteSizeValue(indexSize)); return conditions.stream() .map(condition -> condition.evaluate(stats)) - .collect(Collectors.toSet()); + .collect(Collectors.toMap(result -> result.condition.toString(), result -> result.matched)); } - static Set evaluateConditions(final Set conditions, final IndexMetaData metaData, + static Map evaluateConditions(final Collection conditions, final IndexMetaData metaData, final IndicesStatsResponse statsResponse) { return evaluateConditions(conditions, statsResponse.getPrimaries().getDocs(), metaData); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 1553a528c57f9..766c3323c9409 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -436,7 +437,8 @@ public PutIndexTemplateRequest aliases(String source) { */ public PutIndexTemplateRequest aliases(BytesReference source) { // EMPTY is safe here because we never call namedObject - try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, source)) { + try (XContentParser parser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, source)) { //move to the first alias parser.nextToken(); while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index d868f0becf88a..3a081c0ed3ce1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; @@ -304,7 +305,9 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null // now parse the action // EMPTY is safe here because we never call namedObject - try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) { + try (XContentParser parser = xContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, + data.slice(from, nextMarker - from).streamInput())) { // move pointers from = nextMarker + 1; @@ -348,45 +351,45 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (INDEX.match(currentFieldName)){ + if (INDEX.match(currentFieldName, parser.getDeprecationHandler())){ if (!allowExplicitIndex) { throw new IllegalArgumentException("explicit index in bulk is not allowed"); } index = parser.text(); - } else if (TYPE.match(currentFieldName)) { + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { type = parser.text(); - } else if (ID.match(currentFieldName)) { + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); - } else if (ROUTING.match(currentFieldName)) { + } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { routing = parser.text(); - } else if (PARENT.match(currentFieldName)) { + } else if (PARENT.match(currentFieldName, parser.getDeprecationHandler())) { parent = parser.text(); - } else if (OP_TYPE.match(currentFieldName)) { + } else if (OP_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { opType = parser.text(); - } else if (VERSION.match(currentFieldName)) { + } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { version = parser.longValue(); - } else if (VERSION_TYPE.match(currentFieldName)) { + } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { versionType = VersionType.fromString(parser.text()); - } else if (RETRY_ON_CONFLICT.match(currentFieldName)) { + } else if (RETRY_ON_CONFLICT.match(currentFieldName, parser.getDeprecationHandler())) { retryOnConflict = parser.intValue(); - } else if (PIPELINE.match(currentFieldName)) { + } else if (PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { pipeline = parser.text(); - } else if (FIELDS.match(currentFieldName)) { + } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected"); - } else if (SOURCE.match(currentFieldName)) { + } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else { throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { - if (FIELDS.match(currentFieldName)) { + if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead"); List values = parser.list(); fields = values.toArray(new String[values.size()]); } else { throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } - } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName)) { + } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else if (token != XContentParser.Token.VALUE_NULL) { throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); @@ -429,7 +432,7 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null .parent(parent); // EMPTY is safe here because we never call namedObject try (XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, - sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType))) { + LoggingDeprecationHandler.INSTANCE, sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType).streamInput())) { updateRequest.fromXContent(sliceParser); } if (fetchSourceContext != null) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index b173fc074bd82..9985d23b9badb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -22,7 +22,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -102,7 +102,7 @@ static class RetryHandler implements ActionListener { this.backoff = backoffPolicy.iterator(); this.consumer = consumer; this.listener = listener; - this.logger = ServerLoggers.getLogger(getClass(), settings); + this.logger = Loggers.getLogger(getClass(), settings); this.scheduler = scheduler; // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood this.startTimestampNanos = System.nanoTime(); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index a7b63da8974fd..31e5e2dfff20a 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -418,30 +418,30 @@ private static void parseDocuments(XContentParser parser, List items, @Nul if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (INDEX.match(currentFieldName)) { + if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { if (!allowExplicitIndex) { throw new IllegalArgumentException("explicit index in multi get is not allowed"); } index = parser.text(); - } else if (TYPE.match(currentFieldName)) { + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { type = parser.text(); - } else if (ID.match(currentFieldName)) { + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); - } else if (ROUTING.match(currentFieldName)) { + } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { routing = parser.text(); - } else if (PARENT.match(currentFieldName)) { + } else if (PARENT.match(currentFieldName, parser.getDeprecationHandler())) { parent = parser.text(); - } else if (FIELDS.match(currentFieldName)) { + } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { throw new ParsingException(parser.getTokenLocation(), "Unsupported field [fields] used, expected [stored_fields] instead"); - } else if (STORED_FIELDS.match(currentFieldName)) { + } else if (STORED_FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { storedFields = new ArrayList<>(); storedFields.add(parser.text()); - } else if (VERSION.match(currentFieldName)) { + } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { version = parser.longValue(); - } else if (VERSION_TYPE.match(currentFieldName)) { + } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { versionType = VersionType.fromString(parser.text()); - } else if (SOURCE.match(currentFieldName)) { + } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { // check lenient to avoid interpreting the value as string but parse strict in order to provoke an error early on. if (parser.isBooleanValueLenient()) { fetchSourceContext = new FetchSourceContext(parser.booleanValue(), fetchSourceContext.includes(), @@ -456,15 +456,15 @@ private static void parseDocuments(XContentParser parser, List items, @Nul throw new ElasticsearchParseException("failed to parse multi get request. unknown field [{}]", currentFieldName); } } else if (token == Token.START_ARRAY) { - if (FIELDS.match(currentFieldName)) { + if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { throw new ParsingException(parser.getTokenLocation(), "Unsupported field [fields] used, expected [stored_fields] instead"); - } else if (STORED_FIELDS.match(currentFieldName)) { + } else if (STORED_FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { storedFields = new ArrayList<>(); while ((token = parser.nextToken()) != Token.END_ARRAY) { storedFields.add(parser.text()); } - } else if (SOURCE.match(currentFieldName)) { + } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { ArrayList includes = new ArrayList<>(); while ((token = parser.nextToken()) != Token.END_ARRAY) { includes.add(parser.text()); @@ -474,7 +474,7 @@ private static void parseDocuments(XContentParser parser, List items, @Nul } } else if (token == Token.START_OBJECT) { - if (SOURCE.match(currentFieldName)) { + if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { List currentList = null, includes = null, excludes = null; while ((token = parser.nextToken()) != Token.END_OBJECT) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 9cd9f71a6c53a..996309aff57e3 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -204,22 +204,24 @@ private static MultiGetItemResponse parseItem(XContentParser parser) throws IOEx switch (token) { case FIELD_NAME: currentFieldName = parser.currentName(); - if (INDEX.match(currentFieldName) == false && TYPE.match(currentFieldName) == false && - ID.match(currentFieldName) == false && ERROR.match(currentFieldName) == false) { + if (INDEX.match(currentFieldName, parser.getDeprecationHandler()) == false + && TYPE.match(currentFieldName, parser.getDeprecationHandler()) == false + && ID.match(currentFieldName, parser.getDeprecationHandler()) == false + && ERROR.match(currentFieldName, parser.getDeprecationHandler()) == false) { getResult = GetResult.fromXContentEmbedded(parser, index, type, id); } break; case VALUE_STRING: - if (INDEX.match(currentFieldName)) { + if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { index = parser.text(); - } else if (TYPE.match(currentFieldName)) { + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { type = parser.text(); - } else if (ID.match(currentFieldName)) { + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); } break; case START_OBJECT: - if (ERROR.match(currentFieldName)) { + if (ERROR.match(currentFieldName, parser.getDeprecationHandler())) { exception = ElasticsearchException.fromXContent(parser); } break; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 39d7f96e8e6f6..170f0bc8518cf 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Pipeline; @@ -195,8 +196,17 @@ private static List parseDocs(Map config) { dataMap, MetaData.ROUTING.getFieldName()); String parent = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, MetaData.PARENT.getFieldName()); + Long version = null; + if (dataMap.containsKey(MetaData.VERSION.getFieldName())) { + version = (Long) ConfigurationUtils.readObject(null, null, dataMap, MetaData.VERSION.getFieldName()); + } + VersionType versionType = null; + if (dataMap.containsKey(MetaData.VERSION_TYPE.getFieldName())) { + versionType = VersionType.fromString(ConfigurationUtils.readStringProperty(null, null, dataMap, + MetaData.VERSION_TYPE.getFieldName())); + } IngestDocument ingestDocument = - new IngestDocument(index, type, id, routing, parent, document); + new IngestDocument(index, type, id, routing, parent, version, versionType, document); ingestDocumentList.add(ingestDocument); } return ingestDocumentList; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java index 64365407f2379..87168cb7a9bba 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java @@ -68,10 +68,10 @@ IngestDocument getIngestDocument() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("doc"); - Map metadataMap = ingestDocument.extractMetadata(); - for (Map.Entry metadata : metadataMap.entrySet()) { + Map metadataMap = ingestDocument.extractMetadata(); + for (Map.Entry metadata : metadataMap.entrySet()) { if (metadata.getValue() != null) { - builder.field(metadata.getKey().getFieldName(), metadata.getValue()); + builder.field(metadata.getKey().getFieldName(), metadata.getValue().toString()); } } builder.field("_source", ingestDocument.getSourceAndMetadata()); diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 2aacca69f2f20..1d9512f1c1162 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContent; @@ -206,7 +207,9 @@ public static void readMultiLineFormat(BytesReference data, IndicesOptions defaultOptions = SearchRequest.DEFAULT_INDICES_OPTIONS; // now parse the action if (nextMarker - from > 0) { - try (XContentParser parser = xContent.createParser(registry, data.slice(from, nextMarker - from))) { + try (XContentParser parser = xContent + .createParser(registry, LoggingDeprecationHandler.INSTANCE, + data.slice(from, nextMarker - from).streamInput())) { Map source = parser.map(); for (Map.Entry entry : source.entrySet()) { Object value = entry.getValue(); @@ -242,7 +245,7 @@ public static void readMultiLineFormat(BytesReference data, break; } BytesReference bytes = data.slice(from, nextMarker - from); - try (XContentParser parser = xContent.createParser(registry, bytes)) { + try (XContentParser parser = xContent.createParser(registry, LoggingDeprecationHandler.INSTANCE, bytes.streamInput())) { consumer.accept(searchRequest, parser); } // move pointers diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index a8bbd6989185b..9ad8a20cb1770 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -269,15 +269,15 @@ static SearchResponse innerFromXContent(XContentParser parser) throws IOExceptio if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (SCROLL_ID.match(currentFieldName)) { + if (SCROLL_ID.match(currentFieldName, parser.getDeprecationHandler())) { scrollId = parser.text(); - } else if (TOOK.match(currentFieldName)) { + } else if (TOOK.match(currentFieldName, parser.getDeprecationHandler())) { tookInMillis = parser.longValue(); - } else if (TIMED_OUT.match(currentFieldName)) { + } else if (TIMED_OUT.match(currentFieldName, parser.getDeprecationHandler())) { timedOut = parser.booleanValue(); - } else if (TERMINATED_EARLY.match(currentFieldName)) { + } else if (TERMINATED_EARLY.match(currentFieldName, parser.getDeprecationHandler())) { terminatedEarly = parser.booleanValue(); - } else if (NUM_REDUCE_PHASES.match(currentFieldName)) { + } else if (NUM_REDUCE_PHASES.match(currentFieldName, parser.getDeprecationHandler())) { numReducePhases = parser.intValue(); } else { parser.skipChildren(); @@ -291,24 +291,24 @@ static SearchResponse innerFromXContent(XContentParser parser) throws IOExceptio suggest = Suggest.fromXContent(parser); } else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) { profile = SearchProfileShardResults.fromXContent(parser); - } else if (RestActions._SHARDS_FIELD.match(currentFieldName)) { + } else if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != Token.END_OBJECT) { if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (RestActions.FAILED_FIELD.match(currentFieldName)) { + if (RestActions.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { parser.intValue(); // we don't need it but need to consume it - } else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName)) { + } else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { successfulShards = parser.intValue(); - } else if (RestActions.TOTAL_FIELD.match(currentFieldName)) { + } else if (RestActions.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { totalShards = parser.intValue(); - } else if (RestActions.SKIPPED_FIELD.match(currentFieldName)) { + } else if (RestActions.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { skippedShards = parser.intValue(); } else { parser.skipChildren(); } } else if (token == Token.START_ARRAY) { - if (RestActions.FAILURES_FIELD.match(currentFieldName)) { + if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while((token = parser.nextToken()) != Token.END_ARRAY) { failures.add(ShardSearchFailure.fromXContent(parser)); } @@ -319,7 +319,7 @@ static SearchResponse innerFromXContent(XContentParser parser) throws IOExceptio parser.skipChildren(); } } - } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName)) { + } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { int successful = -1; int total = -1; int skipped = -1; @@ -327,11 +327,11 @@ static SearchResponse innerFromXContent(XContentParser parser) throws IOExceptio if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (Clusters.SUCCESSFUL_FIELD.match(currentFieldName)) { + if (Clusters.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { successful = parser.intValue(); - } else if (Clusters.TOTAL_FIELD.match(currentFieldName)) { + } else if (Clusters.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { total = parser.intValue(); - } else if (Clusters.SKIPPED_FIELD.match(currentFieldName)) { + } else if (Clusters.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { skipped = parser.intValue(); } else { parser.skipChildren(); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 509f89a7542fe..d2bb7e6e50d77 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -95,10 +95,6 @@ public SearchTransportService(Settings settings, TransportService transportServi this.responseWrapper = responseWrapper; } - public Map getClientConnections() { - return Collections.unmodifiableMap(clientConnections); - } - public void sendFreeContext(Transport.Connection connection, final long contextId, OriginalIndices originalIndices) { transportService.sendRequest(connection, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(originalIndices, contextId), TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(new ActionListener() { diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java index 18cb416a763fe..ea9a373efdd76 100755 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java @@ -24,9 +24,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -34,7 +36,7 @@ * Abstract class that allows to mark action responses that support acknowledgements. * Facilitates consistency across different api. */ -public abstract class AcknowledgedResponse extends ActionResponse { +public abstract class AcknowledgedResponse extends ActionResponse implements ToXContentObject { private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); @@ -62,20 +64,46 @@ public final boolean isAcknowledged() { } /** - * Reads the timeout value + * Reads the acknowledged value */ protected void readAcknowledged(StreamInput in) throws IOException { acknowledged = in.readBoolean(); } /** - * Writes the timeout value + * Writes the acknowledged value */ protected void writeAcknowledged(StreamOutput out) throws IOException { out.writeBoolean(acknowledged); } - protected void addAcknowledgedField(XContentBuilder builder) throws IOException { + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.field(ACKNOWLEDGED.getPreferredName(), isAcknowledged()); + addCustomFields(builder, params); + builder.endObject(); + return builder; + } + + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AcknowledgedResponse that = (AcknowledgedResponse) o; + return isAcknowledged() == that.isAcknowledged(); + } + + @Override + public int hashCode() { + return Objects.hash(isAcknowledged()); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java new file mode 100644 index 0000000000000..21ff8fe644997 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.master; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public abstract class ShardsAcknowledgedResponse extends AcknowledgedResponse { + + private static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged"); + + protected static void declareAcknowledgedAndShardsAcknowledgedFields( + ConstructingObjectParser objectParser) { + declareAcknowledgedField(objectParser); + objectParser.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED, + ObjectParser.ValueType.BOOLEAN); + } + + private boolean shardsAcknowledged; + + + protected ShardsAcknowledgedResponse() { + } + + protected ShardsAcknowledgedResponse(boolean acknowledged, boolean shardsAcknowledged) { + super(acknowledged); + assert acknowledged || shardsAcknowledged == false; // if it's not acknowledged, then shards acked should be false too + this.shardsAcknowledged = shardsAcknowledged; + } + + /** + * Returns true if the requisite number of shards were started before + * returning from the index creation operation. If {@link #isAcknowledged()} + * is false, then this also returns false. + */ + public boolean isShardsAcknowledged() { + return shardsAcknowledged; + } + + protected void readShardsAcknowledged(StreamInput in) throws IOException { + shardsAcknowledged = in.readBoolean(); + } + + protected void writeShardsAcknowledged(StreamOutput out) throws IOException { + out.writeBoolean(shardsAcknowledged); + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcknowledged()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + ShardsAcknowledgedResponse that = (ShardsAcknowledgedResponse) o; + return shardsAcknowledged == that.shardsAcknowledged; + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shardsAcknowledged); + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 817be12a8e2b5..c29ca5c1d0853 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -178,25 +178,19 @@ public void onResponse(ReplicaResponse response) { @Override public void onFailure(Exception replicaException) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "[{}] failure while performing [{}] on replica {}, request [{}]", - shard.shardId(), - opType, - shard, - replicaRequest), - replicaException); - if (TransportActions.isShardNotAvailableException(replicaException)) { - decPendingAndFinishIfNeeded(); - } else { + logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "[{}] failure while performing [{}] on replica {}, request [{}]", + shard.shardId(), opType, shard, replicaRequest), replicaException); + // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. + if (TransportActions.isShardNotAvailableException(replicaException) == false) { RestStatus restStatus = ExceptionsHelper.status(replicaException); shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure( shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false)); - String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); - replicasProxy.failShardIfNeeded(shard, message, - replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded, - ReplicationOperation.this::onPrimaryDemoted, throwable -> decPendingAndFinishIfNeeded()); } + String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); + replicasProxy.failShardIfNeeded(shard, message, + replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded, + ReplicationOperation.this::onPrimaryDemoted, throwable -> decPendingAndFinishIfNeeded()); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 1a57b6a5d9500..4398c56f26c77 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -202,7 +202,7 @@ protected abstract PrimaryResult shardOperationOnPrima /** * Synchronously execute the specified replica operation. This is done under a permit from - * {@link IndexShard#acquireReplicaOperationPermit(long, long, ActionListener, String)}. + * {@link IndexShard#acquireReplicaOperationPermit(long, long, ActionListener, String, Object)}. * * @param shardRequest the request to the replica shard * @param replica the replica shard to perform the operation on @@ -317,7 +317,7 @@ class AsyncPrimaryAction extends AbstractRunnable implements ActionListener onReferenceAcquired) { + ActionListener onReferenceAcquired, Object debugInfo) { IndexShard indexShard = getIndexShard(shardId); // we may end up here if the cluster state used to route the primary is so stale that the underlying // index shard was replaced with a replica. For example - in a two node cluster, if the primary fails @@ -981,7 +981,7 @@ public void onFailure(Exception e) { } }; - indexShard.acquirePrimaryOperationPermit(onAcquired, executor); + indexShard.acquirePrimaryOperationPermit(onAcquired, executor, debugInfo); } class ShardReference implements Releasable { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 0e87de98049d0..9a26e63f1afd4 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -610,7 +610,7 @@ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentP if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (currentFieldName != null) { - if (FIELDS.match(currentFieldName)) { + if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.START_ARRAY) { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { fields.add(parser.text()); @@ -618,43 +618,43 @@ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentP } else { throw new ElasticsearchParseException("failed to parse term vectors request. field [fields] must be an array"); } - } else if (OFFSETS.match(currentFieldName)) { + } else if (OFFSETS.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.offsets(parser.booleanValue()); - } else if (POSITIONS.match(currentFieldName)) { + } else if (POSITIONS.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.positions(parser.booleanValue()); - } else if (PAYLOADS.match(currentFieldName)) { + } else if (PAYLOADS.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.payloads(parser.booleanValue()); } else if (currentFieldName.equals("term_statistics") || currentFieldName.equals("termStatistics")) { termVectorsRequest.termStatistics(parser.booleanValue()); } else if (currentFieldName.equals("field_statistics") || currentFieldName.equals("fieldStatistics")) { termVectorsRequest.fieldStatistics(parser.booleanValue()); - } else if (DFS.match(currentFieldName)) { + } else if (DFS.match(currentFieldName, parser.getDeprecationHandler())) { throw new IllegalArgumentException("distributed frequencies is not supported anymore for term vectors"); } else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) { termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map())); - } else if (FILTER.match(currentFieldName)) { + } else if (FILTER.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.filterSettings(readFilterSettings(parser)); - } else if (INDEX.match(currentFieldName)) { // the following is important for multi request parsing. + } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { // the following is important for multi request parsing. termVectorsRequest.index = parser.text(); - } else if (TYPE.match(currentFieldName)) { + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.type = parser.text(); - } else if (ID.match(currentFieldName)) { + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.doc != null) { throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.id = parser.text(); - } else if (DOC.match(currentFieldName)) { + } else if (DOC.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.id != null) { throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser)); - } else if (ROUTING.match(currentFieldName)) { + } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.routing = parser.text(); - } else if (PARENT.match(currentFieldName)) { + } else if (PARENT.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.parent = parser.text(); - } else if (VERSION.match(currentFieldName)) { + } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.version = parser.longValue(); - } else if (VERSION_TYPE.match(currentFieldName)) { + } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.versionType = VersionType.fromString(parser.text()); } else { throw new ElasticsearchParseException("failed to parse term vectors request. unknown field [{}]", currentFieldName); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 0cbbd119e4853..b37999a971d02 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.settings.KeyStoreWrapper; @@ -194,7 +193,8 @@ public void run() { try { // look for jar hell - JarHell.checkJarHell(); + final Logger logger = ESLoggerFactory.getLogger(JarHell.class); + JarHell.checkJarHell(logger::debug); } catch (IOException | URISyntaxException e) { throw new BootstrapException(e); } @@ -301,9 +301,9 @@ static void init( try { if (closeStandardStreams) { final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); if (maybeConsoleAppender != null) { - ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); + Loggers.removeAppender(rootLogger, maybeConsoleAppender); } closeSystOut(); } @@ -334,9 +334,9 @@ static void init( } catch (NodeValidationException | RuntimeException e) { // disable console logging, so user does not see the exception twice (jvm will show it already) final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); if (foreground && maybeConsoleAppender != null) { - ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); + Loggers.removeAppender(rootLogger, maybeConsoleAppender); } Logger logger = Loggers.getLogger(Bootstrap.class); if (INSTANCE.node != null) { @@ -369,7 +369,7 @@ static void init( } // re-enable it if appropriate, so they can see any logging during the shutdown process if (foreground && maybeConsoleAppender != null) { - ServerLoggers.addAppender(rootLogger, maybeConsoleAppender); + Loggers.addAppender(rootLogger, maybeConsoleAppender); } throw e; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Security.java b/server/src/main/java/org/elasticsearch/bootstrap/Security.java index 4ef8ad891a382..57b141383073e 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -27,6 +27,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.plugins.PluginInfo; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.secure_sm.SecureSM; import org.elasticsearch.transport.TcpTransport; @@ -161,7 +162,7 @@ static Map getCodebaseJarMap(Set urls) { static Map getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException { Map map = new HashMap<>(); // collect up set of plugins and modules by listing directories. - Set pluginsAndModules = new LinkedHashSet<>(PluginInfo.extractAllPlugins(environment.pluginsFile())); + Set pluginsAndModules = new LinkedHashSet<>(PluginsService.findPluginDirs(environment.pluginsFile())); if (Files.exists(environment.modulesFile())) { try (DirectoryStream stream = Files.newDirectoryStream(environment.modulesFile())) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java index d6d66e1828361..dcaad3c39dd96 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java @@ -24,6 +24,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginInfo; +import org.elasticsearch.plugins.PluginsService; import java.io.Closeable; import java.io.IOException; @@ -70,7 +71,7 @@ void spawnNativePluginControllers(final Environment environment) throws IOExcept * For each plugin, attempt to spawn the controller daemon. Silently ignore any plugin that * don't include a controller for the correct platform. */ - List paths = PluginInfo.extractAllPlugins(pluginsFile); + List paths = PluginsService.findPluginDirs(pluginsFile); for (Path plugin : paths) { final PluginInfo info = PluginInfo.readFromProperties(plugin); final Path spawnPath = Platforms.nativeControllerPath(plugin); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 726f1da80dc35..f29841e3744a9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -205,7 +205,7 @@ private static class ShardFailedTransportHandler implements TransportRequestHand @Override public void messageReceived(FailedShardEntry request, TransportChannel channel) throws Exception { - logger.warn((Supplier) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); + logger.debug((Supplier) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", request, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 5792b55eec64f..7f1348dd1587f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -120,7 +121,8 @@ void validateAliasStandalone(String alias, String indexRouting) { public void validateAliasFilter(String alias, String filter, QueryShardContext queryShardContext, NamedXContentRegistry xContentRegistry) { assert queryShardContext != null; - try (XContentParser parser = XContentFactory.xContent(filter).createParser(xContentRegistry, filter)) { + try (XContentParser parser = XContentFactory.xContent(filter) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, filter)) { validateAliasFilter(parser, queryShardContext); } catch (Exception e) { throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); @@ -135,7 +137,8 @@ public void validateAliasFilter(String alias, String filter, QueryShardContext q public void validateAliasFilter(String alias, byte[] filter, QueryShardContext queryShardContext, NamedXContentRegistry xContentRegistry) { assert queryShardContext != null; - try (XContentParser parser = XContentFactory.xContent(filter).createParser(xContentRegistry, filter)) { + try (XContentParser parser = XContentFactory.xContent(filter) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, filter)) { validateAliasFilter(parser, queryShardContext); } catch (Exception e) { throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index b532a1ec841bb..414e06a236511 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -1293,7 +1293,7 @@ public static Settings addHumanReadableSettings(Settings settings) { /** * State format for {@link IndexMetaData} to write to and load from disk */ - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, INDEX_STATE_FILE_PREFIX) { + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(INDEX_STATE_FILE_PREFIX) { @Override public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 23ed28569d28d..8c6829ca78734 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -1205,7 +1205,7 @@ public static MetaData fromXContent(XContentParser parser) throws IOException { /** * State format for {@link MetaData} to write to and load from disk */ - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, GLOBAL_STATE_FILE_PREFIX) { + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(GLOBAL_STATE_FILE_PREFIX) { @Override public void toXContent(XContentBuilder builder, MetaData state) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 37831f977aec7..0c38371bdc9cb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -133,7 +133,7 @@ ClusterState innerExecute(ClusterState currentState, Iterable actio Function indexLookup = name -> metadata.get(name); aliasValidator.validateAlias(alias, action.getIndex(), indexRouting, indexLookup); if (Strings.hasLength(filter)) { - IndexService indexService = indices.get(index.getIndex()); + IndexService indexService = indices.get(index.getIndex().getName()); if (indexService == null) { indexService = indicesService.indexService(index.getIndex()); if (indexService == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 6dc405d00a3af..e6032c52585ec 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; @@ -160,7 +161,7 @@ public ClusterState applyFailedShards(final ClusterState clusterState, final Lis if (staleShards.isEmpty() && failedShards.isEmpty()) { return clusterState; } - ClusterState tmpState = IndexMetaDataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards); + ClusterState tmpState = IndexMetaDataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards, logger); RoutingNodes routingNodes = getMutableRoutingNodes(tmpState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards @@ -188,6 +189,7 @@ public ClusterState applyFailedShards(final ClusterState clusterState, final Lis if (failedShardEntry.markAsStale()) { allocation.removeAllocationId(failedShard); } + logger.warn(new ParameterizedMessage("failing shard [{}]", failedShardEntry), failedShardEntry.getFailure()); routingNodes.failShard(logger, failedShard, unassignedInfo, indexMetaData, allocation.changes()); } else { logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java index c5cb6d2af5b72..6d4ca7dc77524 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -210,7 +211,7 @@ private IndexMetaData.Builder updateInSyncAllocations(RoutingTable newRoutingTab * Removes allocation ids from the in-sync set for shard copies for which there is no routing entries in the routing table. * This method is called in AllocationService before any changes to the routing table are made. */ - public static ClusterState removeStaleIdsWithoutRoutings(ClusterState clusterState, List staleShards) { + public static ClusterState removeStaleIdsWithoutRoutings(ClusterState clusterState, List staleShards, Logger logger) { MetaData oldMetaData = clusterState.metaData(); RoutingTable oldRoutingTable = clusterState.routingTable(); MetaData.Builder metaDataBuilder = null; @@ -238,6 +239,7 @@ public static ClusterState removeStaleIdsWithoutRoutings(ClusterState clusterSta } indexMetaDataBuilder.putInSyncAllocationIds(shardNumber, remainingInSyncAllocations); } + logger.warn("{} marking unavailable shards as stale: {}", shardEntry.getKey(), idsToRemove); } if (indexMetaDataBuilder != null) { diff --git a/server/src/main/java/org/elasticsearch/common/ParseField.java b/server/src/main/java/org/elasticsearch/common/ParseField.java index 2f85f2dc78b9c..2c68ea7711bb2 100644 --- a/server/src/main/java/org/elasticsearch/common/ParseField.java +++ b/server/src/main/java/org/elasticsearch/common/ParseField.java @@ -18,11 +18,7 @@ */ package org.elasticsearch.common; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.XContentParser; import java.util.Collections; import java.util.HashSet; @@ -98,23 +94,6 @@ public ParseField withAllDeprecated(String allReplacedWith) { return parseField; } - /** - * Does {@code fieldName} match this field? Uses {@link LoggingDeprecationHandler} - * to prevent us from having to touch every call to {@code match} in the change - * that introduced {@linkplain LoggingDeprecationHandler}. In a followup this will - * be removed. - * @param fieldName - * the field name to match against this {@link ParseField} - * @return true if fieldName matches any of the acceptable - * names for this {@link ParseField}. - * @deprecated Use {@link #match(String, DeprecationHandler)} with - * {@link XContentParser#getDeprecationHandler()} instead. - */ - @Deprecated - public boolean match(String fieldName) { - return match(fieldName, LoggingDeprecationHandler.INSTANCE); - } - /** * Does {@code fieldName} match this field? * @param fieldName diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java index f335a754f3771..8cb51f2b06b0e 100644 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java +++ b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; @@ -34,7 +34,7 @@ public abstract class AbstractComponent { protected final Settings settings; public AbstractComponent(Settings settings) { - this.logger = ServerLoggers.getLogger(getClass(), settings); + this.logger = Loggers.getLogger(getClass(), settings); this.deprecationLogger = new DeprecationLogger(logger); this.settings = settings; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index 90145448be326..01f26498e9c69 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -57,7 +57,7 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s if (token == XContentParser.Token.FIELD_NAME) { String fieldName = parser.currentName(); - if (ShapeParser.FIELD_TYPE.match(fieldName)) { + if (ShapeParser.FIELD_TYPE.match(fieldName, parser.getDeprecationHandler())) { parser.nextToken(); final GeoShapeType type = GeoShapeType.forName(parser.text()); if (shapeType != null && shapeType.equals(type) == false) { @@ -66,10 +66,10 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s } else { shapeType = type; } - } else if (ShapeParser.FIELD_COORDINATES.match(fieldName)) { + } else if (ShapeParser.FIELD_COORDINATES.match(fieldName, parser.getDeprecationHandler())) { parser.nextToken(); coordinateNode = parseCoordinates(parser); - } else if (ShapeParser.FIELD_GEOMETRIES.match(fieldName)) { + } else if (ShapeParser.FIELD_GEOMETRIES.match(fieldName, parser.getDeprecationHandler())) { if (shapeType == null) { shapeType = GeoShapeType.GEOMETRYCOLLECTION; } else if (shapeType.equals(GeoShapeType.GEOMETRYCOLLECTION) == false) { @@ -78,7 +78,7 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s } parser.nextToken(); geometryCollections = parseGeometries(parser, shapeMapper); - } else if (CircleBuilder.FIELD_RADIUS.match(fieldName)) { + } else if (CircleBuilder.FIELD_RADIUS.match(fieldName, parser.getDeprecationHandler())) { if (shapeType == null) { shapeType = GeoShapeType.CIRCLE; } else if (shapeType != null && shapeType.equals(GeoShapeType.CIRCLE) == false) { @@ -87,7 +87,7 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s } parser.nextToken(); radius = DistanceUnit.Distance.parseDistance(parser.text()); - } else if (ShapeParser.FIELD_ORIENTATION.match(fieldName)) { + } else if (ShapeParser.FIELD_ORIENTATION.match(fieldName, parser.getDeprecationHandler())) { if (shapeType != null && (shapeType.equals(GeoShapeType.POLYGON) || shapeType.equals(GeoShapeType.MULTIPOLYGON)) == false) { malformedException = "cannot have [" + ShapeParser.FIELD_ORIENTATION + "] with type set to [" + shapeType + "]"; diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java rename to server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java diff --git a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index b38c3d3bdd78e..01aca53db051d 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -177,15 +177,15 @@ private static void configureStatusLogger() { * @param settings the settings from which logger levels will be extracted */ private static void configureLoggerLevels(final Settings settings) { - if (ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { - final Level level = ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings); - ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); + if (Loggers.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { + final Level level = Loggers.LOG_DEFAULT_LEVEL_SETTING.get(settings); + Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); } - ServerLoggers.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) + Loggers.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) // do not set a log level for a logger named level (from the default log setting) - .filter(s -> s.getKey().equals(ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { + .filter(s -> s.getKey().equals(Loggers.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { final Level level = s.get(settings); - ServerLoggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); + Loggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); }); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java similarity index 82% rename from server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java rename to server/src/main/java/org/elasticsearch/common/logging/Loggers.java index 99049c53d1637..40983c517c72f 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -43,7 +43,9 @@ /** * A set of utilities around Logging. */ -public class ServerLoggers { +public class Loggers { + + public static final String SPACE = " "; public static final Setting LOG_DEFAULT_LEVEL_SETTING = new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Setting.Property.NodeScope); @@ -89,6 +91,48 @@ private static List prefixesList(Settings settings, String... prefixes) return prefixesList; } + public static Logger getLogger(Logger parentLogger, String s) { + assert parentLogger instanceof PrefixLogger; + return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); + } + + public static Logger getLogger(String s) { + return ESLoggerFactory.getLogger(s); + } + + public static Logger getLogger(Class clazz) { + return ESLoggerFactory.getLogger(clazz); + } + + public static Logger getLogger(Class clazz, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); + } + + public static Logger getLogger(String name, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); + } + + private static String formatPrefix(String... prefixes) { + String prefix = null; + if (prefixes != null && prefixes.length > 0) { + StringBuilder sb = new StringBuilder(); + for (String prefixX : prefixes) { + if (prefixX != null) { + if (prefixX.equals(SPACE)) { + sb.append(" "); + } else { + sb.append("[").append(prefixX).append("]"); + } + } + } + if (sb.length() > 0) { + sb.append(" "); + prefix = sb.toString(); + } + } + return prefix; + } + /** * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null * level. diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java b/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java similarity index 98% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java rename to server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java index b24e839690366..a78330c3e8564 100644 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java +++ b/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java @@ -32,7 +32,7 @@ * A logger that prefixes all messages with a fixed prefix specified during construction. The prefix mechanism uses the marker construct, so * for the prefixes to appear, the logging layout pattern must include the marker in its pattern. */ -public class PrefixLogger extends ExtendedLoggerWrapper { +class PrefixLogger extends ExtendedLoggerWrapper { /* * We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker; diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 3df2f6037c6e1..804340d63ed11 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -46,7 +46,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting.Property; @@ -111,7 +111,7 @@ public ClusterSettings(Settings nodeSettings, Set> settingsSet) { } private static final class LoggingSettingUpdater implements SettingUpdater { - final Predicate loggerPredicate = ServerLoggers.LOG_LEVEL_SETTING::match; + final Predicate loggerPredicate = Loggers.LOG_LEVEL_SETTING::match; private final Settings settings; LoggingSettingUpdater(Settings settings) { @@ -129,10 +129,10 @@ public Settings getValue(Settings current, Settings previous) { builder.put(current.filter(loggerPredicate)); for (String key : previous.keySet()) { if (loggerPredicate.test(key) && builder.keys().contains(key) == false) { - if (ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { + if (Loggers.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { builder.putNull(key); } else { - builder.put(key, ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); + builder.put(key, Loggers.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); } } } @@ -150,12 +150,12 @@ public void apply(Settings value, Settings current, Settings previous) { if ("_root".equals(component)) { final String rootLevel = value.get(key); if (rootLevel == null) { - ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings)); + Loggers.setLevel(ESLoggerFactory.getRootLogger(), Loggers.LOG_DEFAULT_LEVEL_SETTING.get(settings)); } else { - ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); + Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); } } else { - ServerLoggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); + Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); } } } @@ -380,8 +380,8 @@ public void apply(Settings value, Settings current, Settings previous) { ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, EsExecutors.PROCESSORS_SETTING, ThreadContext.DEFAULT_HEADERS_SETTING, - ServerLoggers.LOG_DEFAULT_LEVEL_SETTING, - ServerLoggers.LOG_LEVEL_SETTING, + Loggers.LOG_DEFAULT_LEVEL_SETTING, + Loggers.LOG_LEVEL_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, OsService.REFRESH_INTERVAL_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index e832f629fd4cf..844445fa013ed 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -1116,7 +1117,8 @@ private void processLegacyLists(Map map) { * Loads settings from the actual string content that represents them using {@link #fromXContent(XContentParser)} */ public Builder loadFromSource(String source, XContentType xContentType) { - try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(NamedXContentRegistry.EMPTY, source)) { + try (XContentParser parser = XContentFactory.xContent(xContentType) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, source)) { this.put(fromXContent(parser, true, true)); } catch (Exception e) { throw new SettingsException("Failed to load settings from [" + source + "]", e); diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 20253f7876880..b4d108a1b9acb 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -57,7 +57,7 @@ public SettingsModule(Settings settings, Setting... additionalSettings) { } public SettingsModule(Settings settings, List> additionalSettings, List settingsFilter) { - logger = ServerLoggers.getLogger(getClass(), settings); + logger = Loggers.getLogger(getClass(), settings); this.settings = settings; for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { registerSetting(setting); diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index 8efff4edf5ca7..71b673a257c6e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -63,20 +63,52 @@ public Releasable acquire(T key) { while (true) { KeyLock perNodeLock = map.get(key); if (perNodeLock == null) { - KeyLock newLock = new KeyLock(fair); - perNodeLock = map.putIfAbsent(key, newLock); - if (perNodeLock == null) { - newLock.lock(); - return new ReleasableLock(key, newLock); + ReleasableLock newLock = tryCreateNewLock(key); + if (newLock != null) { + return newLock; + } + } else { + assert perNodeLock != null; + int i = perNodeLock.count.get(); + if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) { + perNodeLock.lock(); + return new ReleasableLock(key, perNodeLock); } } - assert perNodeLock != null; - int i = perNodeLock.count.get(); - if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) { - perNodeLock.lock(); - return new ReleasableLock(key, perNodeLock); + } + } + + /** + * Tries to acquire the lock for the given key and returns it. If the lock can't be acquired null is returned. + */ + public Releasable tryAcquire(T key) { + final KeyLock perNodeLock = map.get(key); + if (perNodeLock == null) { + return tryCreateNewLock(key); + } + if (perNodeLock.tryLock()) { // ok we got it - make sure we increment it accordingly otherwise release it again + int i; + while ((i = perNodeLock.count.get()) > 0) { + // we have to do this in a loop here since even if the count is > 0 + // there could be a concurrent blocking acquire that changes the count and then this CAS fails. Since we already got + // the lock we should retry and see if we can still get it or if the count is 0. If that is the case and we give up. + if (perNodeLock.count.compareAndSet(i, i + 1)) { + return new ReleasableLock(key, perNodeLock); + } } + perNodeLock.unlock(); // make sure we unlock and don't leave the lock in a locked state + } + return null; + } + + private ReleasableLock tryCreateNewLock(T key) { + KeyLock newLock = new KeyLock(fair); + newLock.lock(); + KeyLock keyLock = map.putIfAbsent(key, newLock); + if (keyLock == null) { + return new ReleasableLock(key, newLock); } + return null; } /** @@ -92,11 +124,12 @@ public boolean isHeldByCurrentThread(T key) { private void release(T key, KeyLock lock) { assert lock == map.get(key); + final int decrementAndGet = lock.count.decrementAndGet(); lock.unlock(); - int decrementAndGet = lock.count.decrementAndGet(); if (decrementAndGet == 0) { map.remove(key, lock); } + assert decrementAndGet >= 0 : decrementAndGet + " must be >= 0 but wasn't"; } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java index c4142152ccf43..457754d104c87 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java @@ -130,10 +130,14 @@ public void forcePut(E e) throws InterruptedException { @Override public boolean offer(E e) { - int count = size.incrementAndGet(); - if (count > capacity()) { - size.decrementAndGet(); - return false; + while (true) { + final int current = size.get(); + if (current >= capacity()) { + return false; + } + if (size.compareAndSet(current, 1 + current)) { + break; + } } boolean offered = queue.offer(e); if (!offered) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 6427368c4b915..8f950c5434bd7 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -19,9 +19,11 @@ package org.elasticsearch.common.util.concurrent; import org.apache.lucene.util.CloseableThreadLocal; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -33,7 +35,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.FutureTask; +import java.util.concurrent.RunnableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.function.Supplier; @@ -564,6 +571,36 @@ public void run() { ctx.restore(); whileRunning = true; in.run(); + if (in instanceof RunnableFuture) { + /* + * The wrapped runnable arose from asynchronous submission of a task to an executor. If an uncaught exception was thrown + * during the execution of this task, we need to inspect this runnable and see if it is an error that should be + * propagated to the uncaught exception handler. + */ + try { + ((RunnableFuture) in).get(); + } catch (final Exception e) { + /* + * In theory, Future#get can only throw a cancellation exception, an interrupted exception, or an execution + * exception. We want to ignore cancellation exceptions, restore the interrupt status on interrupted exceptions, and + * inspect the cause of an execution. We are going to be extra paranoid here though and completely unwrap the + * exception to ensure that there is not a buried error anywhere. We assume that a general exception has been + * handled by the executed task or the task submitter. + */ + assert e instanceof CancellationException + || e instanceof InterruptedException + || e instanceof ExecutionException : e; + final Optional maybeError = ExceptionsHelper.maybeError(e, ESLoggerFactory.getLogger(ThreadContext.class)); + if (maybeError.isPresent()) { + // throw this error where it will propagate to the uncaught exception handler + throw maybeError.get(); + } + if (e instanceof InterruptedException) { + // restore the interrupt status + Thread.currentThread().interrupt(); + } + } + } whileRunning = false; } catch (IllegalStateException ex) { if (whileRunning || threadLocal.closed.get() == false) { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java b/server/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java index f68cf0abe5a5a..0f25231634d07 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java @@ -183,27 +183,35 @@ public void declareBoolean(BiConsumer consumer, ParseField field public void declareObjectArray(BiConsumer> consumer, ContextParser objectParser, ParseField field) { - declareField(consumer, (p, c) -> parseArray(p, () -> objectParser.parse(p, c)), field, ValueType.OBJECT_ARRAY); + declareFieldArray(consumer, (p, c) -> objectParser.parse(p, c), field, ValueType.OBJECT_ARRAY); } public void declareStringArray(BiConsumer> consumer, ParseField field) { - declareField(consumer, (p, c) -> parseArray(p, p::text), field, ValueType.STRING_ARRAY); + declareFieldArray(consumer, (p, c) -> p.text(), field, ValueType.STRING_ARRAY); } public void declareDoubleArray(BiConsumer> consumer, ParseField field) { - declareField(consumer, (p, c) -> parseArray(p, p::doubleValue), field, ValueType.DOUBLE_ARRAY); + declareFieldArray(consumer, (p, c) -> p.doubleValue(), field, ValueType.DOUBLE_ARRAY); } public void declareFloatArray(BiConsumer> consumer, ParseField field) { - declareField(consumer, (p, c) -> parseArray(p, p::floatValue), field, ValueType.FLOAT_ARRAY); + declareFieldArray(consumer, (p, c) -> p.floatValue(), field, ValueType.FLOAT_ARRAY); } public void declareLongArray(BiConsumer> consumer, ParseField field) { - declareField(consumer, (p, c) -> parseArray(p, p::longValue), field, ValueType.LONG_ARRAY); + declareFieldArray(consumer, (p, c) -> p.longValue(), field, ValueType.LONG_ARRAY); } public void declareIntArray(BiConsumer> consumer, ParseField field) { - declareField(consumer, (p, c) -> parseArray(p, p::intValue), field, ValueType.INT_ARRAY); + declareFieldArray(consumer, (p, c) -> p.intValue(), field, ValueType.INT_ARRAY); + } + + /** + * Declares a field that can contain an array of elements listed in the type ValueType enum + */ + public void declareFieldArray(BiConsumer> consumer, ContextParser itemParser, + ParseField field, ValueType type) { + declareField(consumer, (p, c) -> parseArray(p, () -> itemParser.parse(p, c)), field, type); } public void declareRawObject(BiConsumer consumer, ParseField field) { @@ -220,13 +228,18 @@ public void declareRawObject(BiConsumer consumer, ParseFi private interface IOSupplier { T get() throws IOException; } + private static List parseArray(XContentParser parser, IOSupplier supplier) throws IOException { List list = new ArrayList<>(); - if (parser.currentToken().isValue() || parser.currentToken() == XContentParser.Token.START_OBJECT) { + if (parser.currentToken().isValue() + || parser.currentToken() == XContentParser.Token.VALUE_NULL + || parser.currentToken() == XContentParser.Token.START_OBJECT) { list.add(supplier.get()); // single value } else { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - if (parser.currentToken().isValue() || parser.currentToken() == XContentParser.Token.START_OBJECT) { + if (parser.currentToken().isValue() + || parser.currentToken() == XContentParser.Token.VALUE_NULL + || parser.currentToken() == XContentParser.Token.START_OBJECT) { list.add(supplier.get()); } else { throw new IllegalStateException("expected value but got [" + parser.currentToken() + "]"); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java b/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java index 41593bfe23803..4fb397dbe1751 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java @@ -134,7 +134,7 @@ public T parseNamedObject(Class categoryClass, String name, XContentPa if (entry == null) { throw new UnknownNamedObjectException(parser.getTokenLocation(), categoryClass, name); } - if (false == entry.name.match(name)) { + if (false == entry.name.match(name, parser.getDeprecationHandler())) { /* Note that this shouldn't happen because we already looked up the entry using the names but we need to call `match` anyway * because it is responsible for logging deprecation warnings. */ throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index aa5a7e8391bf9..1a3be1a5a7bdd 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -363,7 +363,7 @@ private class FieldParser { } void assertSupports(String parserName, XContentParser parser, String currentFieldName) { - if (parseField.match(currentFieldName) == false) { + if (parseField.match(currentFieldName, parser.getDeprecationHandler()) == false) { throw new ParsingException(parser.getTokenLocation(), "[" + parserName + "] parsefield doesn't accept: " + currentFieldName); } @@ -416,7 +416,8 @@ public enum ValueType { OBJECT_ARRAY_BOOLEAN_OR_STRING(START_OBJECT, START_ARRAY, VALUE_BOOLEAN, VALUE_STRING), OBJECT_ARRAY_OR_STRING(START_OBJECT, START_ARRAY, VALUE_STRING), VALUE(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING), - VALUE_OBJECT_ARRAY(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING, START_OBJECT, START_ARRAY); + VALUE_OBJECT_ARRAY(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING, START_OBJECT, START_ARRAY), + VALUE_ARRAY(VALUE_BOOLEAN, VALUE_NULL, VALUE_NUMBER, VALUE_STRING, START_ARRAY); private final EnumSet tokens; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java b/server/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java index 0282fba764621..98ecf52e4814d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java @@ -77,8 +77,8 @@ public void register(T value, ParseField parseField) { * @return The value being looked up. Never null. * @throws ParsingException if the named thing isn't in the registry or the name was deprecated and deprecated names aren't supported. */ - public T lookup(String name, XContentLocation xContentLocation) { - T value = lookupReturningNullIfNotFound(name); + public T lookup(String name, XContentLocation xContentLocation, DeprecationHandler deprecationHandler) { + T value = lookupReturningNullIfNotFound(name, deprecationHandler); if (value == null) { throw new ParsingException(xContentLocation, "no [" + registryName + "] registered for [" + name + "]"); } @@ -92,14 +92,14 @@ public T lookup(String name, XContentLocation xContentLocation) { * @return The value being looked up or null if it wasn't found. * @throws ParsingException if the named thing isn't in the registry or the name was deprecated and deprecated names aren't supported. */ - public T lookupReturningNullIfNotFound(String name) { + public T lookupReturningNullIfNotFound(String name, DeprecationHandler deprecationHandler) { Tuple parseFieldAndValue = registry.get(name); if (parseFieldAndValue == null) { return null; } ParseField parseField = parseFieldAndValue.v1(); T value = parseFieldAndValue.v2(); - boolean match = parseField.match(name); + boolean match = parseField.match(name, deprecationHandler); //this is always expected to match, ParseField is useful for deprecation warnings etc. here assert match : "ParseField did not match registered name [" + name + "][" + registryName + "]"; return value; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java index f2b487c4daf3e..c7118f025ee04 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java @@ -106,7 +106,10 @@ XContentParser createParser(NamedXContentRegistry xContentRegistry, /** * Creates a parser over the provided bytes. + * @deprecated use {@link #createParser(NamedXContentRegistry, DeprecationHandler, InputStream)} instead, + * the BytesReference coupling in this class will be removed in a future commit */ + @Deprecated XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, BytesReference bytes) throws IOException; @@ -115,70 +118,4 @@ XContentParser createParser(NamedXContentRegistry xContentRegistry, */ XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException; - - /** - * Creates a parser over the provided string content using - * {@link LoggingDeprecationHandler}. - * @deprecated This is a temporary shim so we can migrate all calls to createParser incrementally. - * Use {@link #createParser(NamedXContentRegistry, DeprecationHandler, String)} instead. - */ - @Deprecated - default XContentParser createParser(NamedXContentRegistry xContentRegistry, String content) throws IOException { - return createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, content); - } - - /** - * Creates a parser over the provided input stream using - * {@link LoggingDeprecationHandler}. - * @deprecated This is a temporary shim so we can migrate all calls to createParser incrementally. - * Use {@link #createParser(NamedXContentRegistry, DeprecationHandler, InputStream)} instead. - */ - @Deprecated - default XContentParser createParser(NamedXContentRegistry xContentRegistry, InputStream is) throws IOException { - return createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, is); - } - - /** - * Creates a parser over the provided bytes using - * {@link LoggingDeprecationHandler}. - * @deprecated This is a temporary shim so we can migrate all calls to createParser incrementally. - * Use {@link #createParser(NamedXContentRegistry, DeprecationHandler, byte[])} instead. - */ - @Deprecated - default XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data) throws IOException { - return createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, data); - } - - /** - * Creates a parser over the provided bytes using - * {@link LoggingDeprecationHandler}. - * @deprecated This is a temporary shim so we can migrate all calls to createParser incrementally. - * Use {@link #createParser(NamedXContentRegistry, DeprecationHandler, byte[], int, int)} instead. - */ - @Deprecated - default XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data, int offset, int length) throws IOException { - return createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, data, offset, length); - } - - /** - * Creates a parser over the provided bytes using - * {@link LoggingDeprecationHandler}. - * @deprecated This is a temporary shim so we can migrate all calls to createParser incrementally. - * Use {@link #createParser(NamedXContentRegistry, DeprecationHandler, BytesReference)} instead. - */ - @Deprecated - default XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException { - return createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes); - } - - /** - * Creates a parser over the provided reader using - * {@link LoggingDeprecationHandler}. - * @deprecated This is a temporary shim so we can migrate all calls to createParser incrementally. - * Use {@link #createParser(NamedXContentRegistry, DeprecationHandler, Reader)} instead. - */ - @Deprecated - default XContentParser createParser(NamedXContentRegistry xContentRegistry, Reader reader) throws IOException { - return createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, reader); - } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 76d8aa15f870b..f8b90c934f24a 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -43,7 +43,8 @@ public class XContentHelper { * @deprecated use {@link #createParser(NamedXContentRegistry, DeprecationHandler, BytesReference, XContentType)} to avoid content type auto-detection */ @Deprecated - public static XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException { + public static XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, + BytesReference bytes) throws IOException { Compressor compressor = CompressorFactory.compressor(bytes); if (compressor != null) { InputStream compressedInput = compressor.streamInput(bytes.streamInput()); @@ -51,9 +52,9 @@ public static XContentParser createParser(NamedXContentRegistry xContentRegistry compressedInput = new BufferedInputStream(compressedInput); } final XContentType contentType = XContentFactory.xContentType(compressedInput); - return XContentFactory.xContent(contentType).createParser(xContentRegistry, compressedInput); + return XContentFactory.xContent(contentType).createParser(xContentRegistry, deprecationHandler, compressedInput); } else { - return XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes.streamInput()); + return XContentFactory.xContent(bytes).createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); } } @@ -71,7 +72,7 @@ public static XContentParser createParser(NamedXContentRegistry xContentRegistry } return XContentFactory.xContent(xContentType).createParser(xContentRegistry, deprecationHandler, compressedInput); } else { - return xContentType.xContent().createParser(xContentRegistry, bytes.streamInput()); + return xContentType.xContent().createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); } } @@ -117,7 +118,8 @@ public static Tuple> convertToMap(BytesReferen */ public static Map convertToMap(XContent xContent, String string, boolean ordered) throws ElasticsearchParseException { // It is safe to use EMPTY here because this never uses namedObject - try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, string)) { + try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, string)) { return ordered ? parser.mapOrdered() : parser.map(); } catch (IOException e) { throw new ElasticsearchParseException("Failed to parse content to map", e); @@ -131,7 +133,8 @@ public static Map convertToMap(XContent xContent, String string, public static Map convertToMap(XContent xContent, InputStream input, boolean ordered) throws ElasticsearchParseException { // It is safe to use EMPTY here because this never uses namedObject - try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, input)) { + try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, input)) { return ordered ? parser.mapOrdered() : parser.map(); } catch (IOException e) { throw new ElasticsearchParseException("Failed to parse content to map", e); @@ -161,7 +164,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson, b // It is safe to use EMPTY here because this never uses namedObject try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, bytes.streamInput())) { + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { parser.nextToken(); XContentBuilder builder = XContentFactory.jsonBuilder(); if (prettyPrint) { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index 1e09f8334f772..f152fb3cc96ba 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; @@ -314,7 +315,11 @@ public void writeRawField(String name, InputStream content) throws IOException { public void writeRawField(String name, InputStream content, XContentType contentType) throws IOException { if (mayWriteRawData(contentType) == false) { // EMPTY is safe here because we never call namedObject when writing raw data - try (XContentParser parser = XContentFactory.xContent(contentType).createParser(NamedXContentRegistry.EMPTY, content)) { + try (XContentParser parser = XContentFactory.xContent(contentType) + // It's okay to pass the throwing deprecation handler + // because we should not be writing raw fields when + // generating JSON + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, content)) { parser.nextToken(); writeFieldName(name); copyCurrentStructure(parser); @@ -392,7 +397,10 @@ protected boolean supportsRawWrites() { protected void copyRawValue(BytesReference content, XContent xContent) throws IOException { // EMPTY is safe here because we never call namedObject try (StreamInput input = content.streamInput(); - XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, input)) { + XContentParser parser = xContent + // It's okay to pass the throwing deprecation handler because we + // should not be writing raw fields when generating JSON + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, input)) { copyCurrentStructure(parser); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index b2602e8f2c596..179692cd516c8 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -109,7 +109,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic if (discoverySupplier == null) { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } - ServerLoggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); + Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); discovery = Objects.requireNonNull(discoverySupplier.get()); } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 6f2fabd5188a4..2d254df539e60 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -182,7 +182,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce locks = null; nodeLockId = -1; nodeMetaData = new NodeMetaData(generateNodeId(settings)); - logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); return; } final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length]; @@ -190,7 +190,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce boolean success = false; // trace logger to debug issues before the default node name is derived from the node id - Logger startupTraceLogger = ServerLoggers.getLogger(getClass(), settings); + Logger startupTraceLogger = Loggers.getLogger(getClass(), settings); try { sharedDataPath = environment.sharedDataFile(); @@ -244,7 +244,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce throw new IllegalStateException(message, lastException); } this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths); - this.logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); this.nodeLockId = nodeLockId; this.locks = locks; diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java index 38a4fce9cdc3d..dbea3164c8a44 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java @@ -91,7 +91,7 @@ public NodeMetaData build() { } - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, "node-") { + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("node-") { @Override protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index fb48405b72538..1faa37c6a33a9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -64,36 +65,28 @@ * @param the type of the XContent base data-structure */ public abstract class MetaDataStateFormat { + public static final XContentType FORMAT = XContentType.SMILE; public static final String STATE_DIR_NAME = "_state"; public static final String STATE_FILE_EXTENSION = ".st"; + private static final String STATE_FILE_CODEC = "state"; private static final int MIN_COMPATIBLE_STATE_FILE_VERSION = 0; private static final int STATE_FILE_VERSION = 1; private static final int STATE_FILE_VERSION_ES_2X_AND_BELOW = 0; private static final int BUFFER_SIZE = 4096; - private final XContentType format; private final String prefix; private final Pattern stateFilePattern; /** * Creates a new {@link MetaDataStateFormat} instance - * @param format the format of the x-content */ - protected MetaDataStateFormat(XContentType format, String prefix) { - this.format = format; + protected MetaDataStateFormat(String prefix) { this.prefix = prefix; this.stateFilePattern = Pattern.compile(Pattern.quote(prefix) + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?"); } - /** - * Returns the {@link XContentType} used to serialize xcontent on write. - */ - public XContentType format() { - return format; - } - /** * Writes the given state to the given directories. The state is written to a * state directory ({@value #STATE_DIR_NAME}) underneath each of the given file locations and is created if it @@ -123,8 +116,8 @@ public final void write(final T state, final Path... locations) throws IOExcepti try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, fileName, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) { CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION); - out.writeInt(format.index()); - try (XContentBuilder builder = newXContentBuilder(format, new IndexOutputOutputStream(out) { + out.writeInt(FORMAT.index()); + try (XContentBuilder builder = newXContentBuilder(FORMAT, new IndexOutputOutputStream(out) { @Override public void close() throws IOException { // this is important since some of the XContentBuilders write bytes on close. @@ -190,6 +183,9 @@ public final T read(NamedXContentRegistry namedXContentRegistry, Path file) thro final int fileVersion = CodecUtil.checkHeader(indexInput, STATE_FILE_CODEC, MIN_COMPATIBLE_STATE_FILE_VERSION, STATE_FILE_VERSION); final XContentType xContentType = XContentType.values()[indexInput.readInt()]; + if (xContentType != FORMAT) { + throw new IllegalStateException("expected state in " + file + " to be " + FORMAT + " format but was " + xContentType); + } if (fileVersion == STATE_FILE_VERSION_ES_2X_AND_BELOW) { // format version 0, wrote a version that always came from the content state file and was never used indexInput.readLong(); // version currently unused @@ -197,8 +193,9 @@ public final T read(NamedXContentRegistry namedXContentRegistry, Path file) thro long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; try (IndexInput slice = indexInput.slice("state_xcontent", filePointer, contentSize)) { - try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(namedXContentRegistry, - new InputStreamIndexInput(slice, contentSize))) { + try (XContentParser parser = XContentFactory.xContent(FORMAT) + .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, + new InputStreamIndexInput(slice, contentSize))) { return fromXContent(parser); } } @@ -312,7 +309,8 @@ public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegi logger.debug("{}: no data for [{}], ignoring...", prefix, stateFile.toAbsolutePath()); continue; } - try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, new BytesArray(data))) { + try (XContentParser parser = XContentHelper + .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, new BytesArray(data))) { state = fromXContent(parser); } if (state == null) { diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index ce13c12c8496f..25acdd06b44a6 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; public abstract class AbstractIndexComponent implements IndexComponent { @@ -33,7 +33,7 @@ public abstract class AbstractIndexComponent implements IndexComponent { * Constructs a new index component, with the index name and its settings. */ protected AbstractIndexComponent(IndexSettings indexSettings) { - this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); this.deprecationLogger = new DeprecationLogger(logger); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index e50ddd8e3966c..90d8a205e8b57 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -24,7 +24,7 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -52,7 +52,7 @@ final class CompositeIndexEventListener implements IndexEventListener { } } this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners)); - this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 7c1672603aed9..3dc1c97677b8b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -775,7 +775,7 @@ private void maybeSyncGlobalCheckpoints() { e); } }), - ThreadPool.Names.SAME); + ThreadPool.Names.SAME, "background global checkpoint sync"); } catch (final AlreadyClosedException | IndexShardClosedException e) { // the shard was closed concurrently, continue } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 093e110cb827b..034f34010ede7 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.MergePolicy; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -381,7 +381,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build(); this.index = indexMetaData.getIndex(); version = Version.indexCreated(settings); - logger = ServerLoggers.getLogger(getClass(), settings, index); + logger = Loggers.getLogger(getClass(), settings, index); nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData; numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 53d63bf64bb6b..94c3892ef361e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -87,7 +87,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { - this.indexLogger = ServerLoggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); @@ -117,7 +117,7 @@ private void setMaxSourceCharsToLog(int maxSourceCharsToLog) { private void setLevel(SlowLogLevel level) { this.level = level; - ServerLoggers.setLevel(this.indexLogger, level.name()); + Loggers.setLevel(this.indexLogger, level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index d02d4820fd402..a48e3d7bd72c5 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -81,8 +81,8 @@ public final class SearchSlowLog implements SearchOperationListener { public SearchSlowLog(IndexSettings indexSettings) { - this.queryLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); - this.fetchLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); + this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); + this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); @@ -108,8 +108,8 @@ public SearchSlowLog(IndexSettings indexSettings) { private void setLevel(SlowLogLevel level) { this.level = level; - ServerLoggers.setLevel(queryLogger, level.name()); - ServerLoggers.setLevel(fetchLogger, level.name()); + Loggers.setLevel(queryLogger, level.name()); + Loggers.setLevel(fetchLogger, level.name()); } @Override public void onQueryPhase(SearchContext context, long tookInNanos) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 2a0d89cb1e2a2..f26a24c47a6f3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -168,8 +168,10 @@ synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) { /** * Releases an index commit that acquired by {@link #acquireIndexCommit(boolean)}. + * + * @return true if the snapshotting commit can be clean up. */ - synchronized void releaseCommit(final IndexCommit snapshotCommit) { + synchronized boolean releaseCommit(final IndexCommit snapshotCommit) { final IndexCommit releasingCommit = ((SnapshotIndexCommit) snapshotCommit).delegate; assert snapshottedCommits.containsKey(releasingCommit) : "Release non-snapshotted commit;" + "snapshotted commits [" + snapshottedCommits + "], releasing commit [" + releasingCommit + "]"; @@ -178,6 +180,8 @@ synchronized void releaseCommit(final IndexCommit snapshotCommit) { if (refCount == 0) { snapshottedCommits.remove(releasingCommit); } + // The commit can be clean up only if no pending snapshot and it is neither the safe commit nor last commit. + return refCount == 0 && releasingCommit.equals(safeCommit) == false && releasingCommit.equals(lastCommit) == false; } /** @@ -186,7 +190,7 @@ synchronized void releaseCommit(final IndexCommit snapshotCommit) { * If an index was created before v6.2, and we haven't retained a safe commit yet, this method will return the oldest commit. * * @param commits a list of existing commit points - * @param globalCheckpoint the persisted global checkpoint from the translog, see {@link Translog#readGlobalCheckpoint(Path)} + * @param globalCheckpoint the persisted global checkpoint from the translog, see {@link Translog#readGlobalCheckpoint(Path, String)} * @return a safe commit or the oldest commit if a safe commit is not found */ public static IndexCommit findSafeCommitPoint(List commits, long globalCheckpoint) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index 871f1f62f41be..f4876149cac13 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.OneMergeHelper; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { this.config = indexSettings.getMergeSchedulerConfig(); this.shardId = shardId; this.indexSettings = indexSettings.getSettings(); - this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings, shardId); + this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId); refreshConfig(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index eea63dec94bf2..fb937ed4e9302 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -34,7 +34,6 @@ import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -51,7 +50,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -131,7 +130,7 @@ protected Engine(EngineConfig engineConfig) { this.shardId = engineConfig.getShardId(); this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); - this.logger = ServerLoggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name + this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); } @@ -156,10 +155,6 @@ protected static boolean isMergedSegment(LeafReader reader) { return IndexWriter.SOURCE_MERGE.equals(source); } - protected Searcher newSearcher(String source, IndexSearcher searcher, ReferenceManager manager) { - return new EngineSearcher(source, searcher, manager, store, logger); - } - public final EngineConfig config() { return engineConfig; } @@ -510,38 +505,7 @@ public final Searcher acquireSearcher(String source) throws EngineException { * * @see Searcher#close() */ - public final Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException { - boolean success = false; - /* Acquire order here is store -> manager since we need - * to make sure that the store is not closed before - * the searcher is acquired. */ - store.incRef(); - try { - final ReferenceManager manager = getSearcherManager(source, scope); // can never be null - /* This might throw NPE but that's fine we will run ensureOpen() - * in the catch block and throw the right exception */ - final IndexSearcher searcher = manager.acquire(); - try { - final Searcher retVal = newSearcher(source, searcher, manager); - success = true; - return retVal; - } finally { - if (!success) { - manager.release(searcher); - } - } - } catch (AlreadyClosedException ex) { - throw ex; - } catch (Exception ex) { - ensureOpen(); // throw EngineCloseException here if we are already closed - logger.error((Supplier) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); - throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); - } finally { - if (!success) { // release the ref in the case of an error... - store.decRef(); - } - } - } + public abstract Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException; public enum SearcherScope { EXTERNAL, INTERNAL @@ -557,12 +521,20 @@ public enum SearcherScope { public abstract void syncTranslog() throws IOException; - protected void ensureOpen() { + protected final void ensureOpen(Exception suppressed) { if (isClosed.get()) { - throw new AlreadyClosedException(shardId + " engine is closed", failedEngine.get()); + AlreadyClosedException ace = new AlreadyClosedException(shardId + " engine is closed", failedEngine.get()); + if (suppressed != null) { + ace.addSuppressed(suppressed); + } + throw ace; } } + protected final void ensureOpen() { + ensureOpen(null); + } + /** get commits stats for the last commit */ public CommitStats commitStats() { return new CommitStats(getLastCommittedSegmentInfos()); @@ -785,13 +757,8 @@ public final boolean refreshNeeded() { the store is closed so we need to make sure we increment it here */ try { - ReferenceManager manager = getSearcherManager("refresh_needed", SearcherScope.EXTERNAL); - final IndexSearcher searcher = manager.acquire(); - try { - final IndexReader r = searcher.getIndexReader(); - return ((DirectoryReader) r).isCurrent() == false; - } finally { - manager.release(searcher); + try (Searcher searcher = acquireSearcher("refresh_needed", SearcherScope.EXTERNAL)) { + return searcher.getDirectoryReader().isCurrent() == false; } } catch (IOException e) { logger.error("failed to access searcher manager", e); @@ -868,13 +835,17 @@ public void forceMerge(boolean flush) throws IOException { public abstract void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException, IOException; /** - * Snapshots the index and returns a handle to it. If needed will try and "commit" the + * Snapshots the most recent index and returns a handle to it. If needed will try and "commit" the * lucene index to make sure we have a "fresh" copy of the files to snapshot. * - * @param safeCommit indicates whether the engine should acquire the most recent safe commit, or the most recent commit. * @param flushFirst indicates whether the engine should flush before returning the snapshot */ - public abstract IndexCommitRef acquireIndexCommit(boolean safeCommit, boolean flushFirst) throws EngineException; + public abstract IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws EngineException; + + /** + * Snapshots the most recent safe index commit from the engine. + */ + public abstract IndexCommitRef acquireSafeIndexCommit() throws EngineException; /** * fail engine due to some error. the engine will also be closed. @@ -1337,8 +1308,6 @@ public void release() { } } - protected abstract ReferenceManager getSearcherManager(String source, SearcherScope scope); - /** * Method to close the engine while the write lock is held. * Must decrement the supplied when closing work is done and resources are @@ -1537,4 +1506,9 @@ public interface Warmer { public boolean isRecovering() { return false; } + + /** + * Tries to prune buffered deletes from the version map. + */ + public abstract void maybePruneDeletes(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java b/server/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java index c72ec543e713c..ac461c1f58da2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java @@ -26,23 +26,24 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.index.store.Store; +import java.io.Closeable; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; /** * Searcher for an Engine */ -public class EngineSearcher extends Engine.Searcher { - private final ReferenceManager manager; +final class EngineSearcher extends Engine.Searcher { private final AtomicBoolean released = new AtomicBoolean(false); private final Store store; private final Logger logger; + private final ReferenceManager referenceManager; - public EngineSearcher(String source, IndexSearcher searcher, ReferenceManager manager, Store store, Logger logger) { - super(source, searcher); - this.manager = manager; + EngineSearcher(String source, ReferenceManager searcherReferenceManager, Store store, Logger logger) throws IOException { + super(source, searcherReferenceManager.acquire()); this.store = store; this.logger = logger; + this.referenceManager = searcherReferenceManager; } @Override @@ -56,7 +57,7 @@ public void close() { return; } try { - manager.release(this.searcher()); + referenceManager.release(searcher()); } catch (IOException e) { throw new IllegalStateException("Cannot close", e); } catch (AlreadyClosedException e) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index c2eefb9edd2b3..6b29176ca9fd6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -20,9 +20,10 @@ package org.elasticsearch.index.engine; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -50,6 +51,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -481,13 +483,18 @@ private void recoverFromTranslogInternal() throws IOException { private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) throws IOException { assert openMode != null; final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); - String translogUUID = null; - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - translogUUID = loadTranslogUUIDFromLastCommit(); - // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! - if (translogUUID == null) { - throw new IndexFormatTooOldException("translog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first"); - } + final String translogUUID; + switch (openMode) { + case CREATE_INDEX_AND_TRANSLOG: + case OPEN_INDEX_CREATE_TRANSLOG: + translogUUID = + Translog.createEmptyTranslog(translogConfig.getTranslogPath(), globalCheckpointSupplier.getAsLong(), shardId); + break; + case OPEN_INDEX_AND_TRANSLOG: + translogUUID = loadTranslogUUIDFromLastCommit(); + break; + default: + throw new AssertionError("Unknown openMode " + openMode); } return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier); } @@ -1022,21 +1029,14 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) */ private boolean mayHaveBeenIndexedBefore(Index index) { assert canOptimizeAddDocument(index); - boolean mayHaveBeenIndexBefore; - long deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get(); + final boolean mayHaveBeenIndexBefore; if (index.isRetry()) { mayHaveBeenIndexBefore = true; - do { - deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get(); - if (deOptimizeTimestamp >= index.getAutoGeneratedIdTimestamp()) { - break; - } - } while (maxUnsafeAutoIdTimestamp.compareAndSet(deOptimizeTimestamp, - index.getAutoGeneratedIdTimestamp()) == false); + maxUnsafeAutoIdTimestamp.updateAndGet(curr -> Math.max(index.getAutoGeneratedIdTimestamp(), curr)); assert maxUnsafeAutoIdTimestamp.get() >= index.getAutoGeneratedIdTimestamp(); } else { // in this case we force - mayHaveBeenIndexBefore = deOptimizeTimestamp >= index.getAutoGeneratedIdTimestamp(); + mayHaveBeenIndexBefore = maxUnsafeAutoIdTimestamp.get() >= index.getAutoGeneratedIdTimestamp(); } return mayHaveBeenIndexBefore; } @@ -1181,7 +1181,7 @@ public DeleteResult delete(Delete delete) throws IOException { } throw e; } - maybePruneDeletedTombstones(); + maybePruneDeletes(); return deleteResult; } @@ -1324,7 +1324,8 @@ public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, lo } } - private void maybePruneDeletedTombstones() { + @Override + public void maybePruneDeletes() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { @@ -1377,18 +1378,25 @@ final void refresh(String source, SearcherScope scope) throws EngineException { writingBytes.addAndGet(bytes); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); - switch (scope) { - case EXTERNAL: - // even though we maintain 2 managers we really do the heavy-lifting only once. - // the second refresh will only do the extra work we have to do for warming caches etc. - externalSearcherManager.maybeRefreshBlocking(); - // the break here is intentional we never refresh both internal / external together - break; - case INTERNAL: - internalSearcherManager.maybeRefreshBlocking(); - break; - default: - throw new IllegalArgumentException("unknown scope: " + scope); + if (store.tryIncRef()) { + // increment the ref just to ensure nobody closes the store during a refresh + try { + switch (scope) { + case EXTERNAL: + // even though we maintain 2 managers we really do the heavy-lifting only once. + // the second refresh will only do the extra work we have to do for warming caches etc. + externalSearcherManager.maybeRefreshBlocking(); + // the break here is intentional we never refresh both internal / external together + break; + case INTERNAL: + internalSearcherManager.maybeRefreshBlocking(); + break; + default: + throw new IllegalArgumentException("unknown scope: " + scope); + } + } finally { + store.decRef(); + } } } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -1407,7 +1415,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException { // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: - maybePruneDeletedTombstones(); + maybePruneDeletes(); mergeScheduler.refreshConfig(); } @@ -1627,32 +1635,15 @@ public void trimTranslog() throws EngineException { } private void pruneDeletedTombstones() { - long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); - - // TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock... - - // we only need to prune the deletes map; the current/old version maps are cleared on refresh: - for (Map.Entry entry : versionMap.getAllTombstones()) { - BytesRef uid = entry.getKey(); - try (Releasable ignored = versionMap.acquireLock(uid)) { - // can we do it without this lock on each value? maybe batch to a set and get the lock once per set? - - // Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator: - DeleteVersionValue versionValue = versionMap.getTombstoneUnderLock(uid); - if (versionValue != null) { - if (timeMSec - versionValue.time > getGcDeletesInMillis()) { - versionMap.removeTombstoneUnderLock(uid); - } - } - } - } - + final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); + versionMap.pruneTombstones(timeMSec, engineConfig.getIndexSettings().getGcDeletesInMillis()); lastDeleteVersionPruneTimeMSec = timeMSec; } // testing void clearDeletedTombstones() { - versionMap.clearTombstones(); + // clean with current time Long.MAX_VALUE and interval 0 since we use a greater than relationship here. + versionMap.pruneTombstones(Long.MAX_VALUE, 0); } @Override @@ -1705,7 +1696,7 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu * and expected. We don't hold any locks while we block on forceMerge otherwise it would block * closing the engine as well. If we are not closed we pass it on to failOnTragicEvent which ensures * we are handling a tragic even exception here */ - ensureOpen(); + ensureOpen(ex); failOnTragicEvent(ex); throw ex; } catch (Exception e) { @@ -1725,7 +1716,7 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu } @Override - public IndexCommitRef acquireIndexCommit(final boolean safeCommit, final boolean flushFirst) throws EngineException { + public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws EngineException { // we have to flush outside of the readlock otherwise we might have a problem upgrading // the to a write lock when we fail the engine in this operation if (flushFirst) { @@ -1733,8 +1724,22 @@ public IndexCommitRef acquireIndexCommit(final boolean safeCommit, final boolean flush(false, true); logger.trace("finish flush for snapshot"); } - final IndexCommit snapshotCommit = combinedDeletionPolicy.acquireIndexCommit(safeCommit); - return new Engine.IndexCommitRef(snapshotCommit, () -> combinedDeletionPolicy.releaseCommit(snapshotCommit)); + final IndexCommit lastCommit = combinedDeletionPolicy.acquireIndexCommit(false); + return new Engine.IndexCommitRef(lastCommit, () -> releaseIndexCommit(lastCommit)); + } + + @Override + public IndexCommitRef acquireSafeIndexCommit() throws EngineException { + final IndexCommit safeCommit = combinedDeletionPolicy.acquireIndexCommit(true); + return new Engine.IndexCommitRef(safeCommit, () -> releaseIndexCommit(safeCommit)); + } + + private void releaseIndexCommit(IndexCommit snapshot) throws IOException { + // Revisit the deletion policy if we can clean up the snapshotting commit. + if (combinedDeletionPolicy.releaseCommit(snapshot)) { + ensureOpen(); + indexWriter.deleteUnusedFiles(); + } } private boolean failOnTragicEvent(AlreadyClosedException ex) { @@ -1867,14 +1872,35 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { } @Override - protected ReferenceManager getSearcherManager(String source, SearcherScope scope) { - switch (scope) { - case INTERNAL: - return internalSearcherManager; - case EXTERNAL: - return externalSearcherManager; - default: - throw new IllegalStateException("unknown scope: " + scope); + public Searcher acquireSearcher(String source, SearcherScope scope) { + /* Acquire order here is store -> manager since we need + * to make sure that the store is not closed before + * the searcher is acquired. */ + store.incRef(); + Releasable releasable = store::decRef; + try { + final ReferenceManager referenceManager; + switch (scope) { + case INTERNAL: + referenceManager = internalSearcherManager; + break; + case EXTERNAL: + referenceManager = externalSearcherManager; + break; + default: + throw new IllegalStateException("unknown scope: " + scope); + } + EngineSearcher engineSearcher = new EngineSearcher(source, referenceManager, store, logger); + releasable = null; // success - hand over the reference to the engine searcher + return engineSearcher; + } catch (AlreadyClosedException ex) { + throw ex; + } catch (Exception ex) { + ensureOpen(ex); // throw EngineCloseException here if we are already closed + logger.error((Supplier) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); + throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); + } finally { + Releasables.close(releasable); } } @@ -2181,7 +2207,7 @@ private void ensureCanFlush() { public void onSettingsChanged() { mergeScheduler.refreshConfig(); // config().isEnableGcDeletes() or config.getGcDeletesInMillis() may have changed: - maybePruneDeletedTombstones(); + maybePruneDeletes(); if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { // this is an anti-viral settings you can only opt out for the entire index // only if a shard starts up again due to relocation or if the index is closed diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index f29d1fe872d6a..9c111ebc645bd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -40,6 +40,10 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta private static final class VersionLookup { + /** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account + * for the CHM entry here, and account for BytesRef/VersionValue against the tombstones, since refresh would not clear this RAM. */ + final AtomicLong ramBytesUsed = new AtomicLong(); + private static final VersionLookup EMPTY = new VersionLookup(Collections.emptyMap()); private final Map map; @@ -55,6 +59,10 @@ private static final class VersionLookup { // map reference itself. private boolean unsafe; + // minimum timestamp of delete operations that were made while this map was active. this is used to make sure they are kept in + // the tombstone + private final AtomicLong minDeleteTimestamp = new AtomicLong(Long.MAX_VALUE); + private VersionLookup(Map map) { this.map = map; } @@ -71,7 +79,6 @@ boolean isEmpty() { return map.isEmpty(); } - int size() { return map.size(); } @@ -83,6 +90,16 @@ boolean isUnsafe() { void markAsUnsafe() { unsafe = true; } + + public VersionValue remove(BytesRef uid) { + return map.remove(uid); + } + + public void updateMinDeletedTimestamp(DeleteVersionValue delete) { + long time = delete.time; + minDeleteTimestamp.updateAndGet(prev -> Math.min(time, prev)); + } + } private static final class Maps { @@ -98,6 +115,7 @@ private static final class Maps { boolean needsSafeAccess; final boolean previousMapsNeededSafeAccess; + Maps(VersionLookup current, VersionLookup old, boolean previousMapsNeededSafeAccess) { this.current = current; this.old = old; @@ -123,8 +141,8 @@ boolean shouldInheritSafeAccess() { * Builds a new map for the refresh transition this should be called in beforeRefresh() */ Maps buildTransitionMap() { - return new Maps(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(current.size())), - current, shouldInheritSafeAccess()); + return new Maps(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(current.size())), current, + shouldInheritSafeAccess()); } /** @@ -133,6 +151,39 @@ Maps buildTransitionMap() { Maps invalidateOldMap() { return new Maps(current, VersionLookup.EMPTY, previousMapsNeededSafeAccess); } + + void put(BytesRef uid, VersionValue version) { + long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; + long ramAccounting = BASE_BYTES_PER_CHM_ENTRY + version.ramBytesUsed() + uidRAMBytesUsed; + VersionValue previousValue = current.put(uid, version); + ramAccounting += previousValue == null ? 0 : -(BASE_BYTES_PER_CHM_ENTRY + previousValue.ramBytesUsed() + uidRAMBytesUsed); + adjustRam(ramAccounting); + } + + void adjustRam(long value) { + if (value != 0) { + long v = current.ramBytesUsed.addAndGet(value); + assert v >= 0 : "bytes=" + v; + } + } + + void remove(BytesRef uid, DeleteVersionValue deleted) { + VersionValue previousValue = current.remove(uid); + current.updateMinDeletedTimestamp(deleted); + if (previousValue != null) { + long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; + adjustRam(-(BASE_BYTES_PER_CHM_ENTRY + previousValue.ramBytesUsed() + uidRAMBytesUsed)); + } + if (old != VersionLookup.EMPTY) { + // we also need to remove it from the old map here to make sure we don't read this stale value while + // we are in the middle of a refresh. Most of the time the old map is an empty map so we can skip it there. + old.remove(uid); + } + } + + long getMinDeleteTimestamp() { + return Math.min(current.minDeleteTimestamp.get(), old.minDeleteTimestamp.get()); + } } // All deletes also go here, and delete "tombstones" are retained after refresh: @@ -178,12 +229,6 @@ Maps invalidateOldMap() { BASE_BYTES_PER_CHM_ENTRY = chmEntryShallowSize + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF; } - /** - * Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account - * for the CHM entry here, and account for BytesRef/VersionValue against the tombstones, since refresh would not clear this RAM. - */ - final AtomicLong ramBytesUsedCurrent = new AtomicLong(); - /** * Tracks bytes used by tombstones (deletes) */ @@ -199,7 +244,6 @@ public void beforeRefresh() throws IOException { assert (unsafeKeysMap = unsafeKeysMap.buildTransitionMap()) != null; // This is not 100% correct, since concurrent indexing ops can change these counters in between our execution of the previous // line and this one, but that should be minor, and the error won't accumulate over time: - ramBytesUsedCurrent.set(0); } @Override @@ -292,48 +336,28 @@ void putUnderLock(BytesRef uid, VersionValue version) { private void putUnderLock(BytesRef uid, VersionValue version, Maps maps) { assert keyedLock.isHeldByCurrentThread(uid); assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length; - long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; - final VersionValue prev = maps.current.put(uid, version); - if (prev != null) { - // Deduct RAM for the version we just replaced: - long prevBytes = BASE_BYTES_PER_CHM_ENTRY; - if (prev.isDelete() == false) { - prevBytes += prev.ramBytesUsed() + uidRAMBytesUsed; - } - ramBytesUsedCurrent.addAndGet(-prevBytes); - } - - // Add RAM for the new version: - long newBytes = BASE_BYTES_PER_CHM_ENTRY; if (version.isDelete() == false) { - newBytes += version.ramBytesUsed() + uidRAMBytesUsed; - } - ramBytesUsedCurrent.addAndGet(newBytes); - - final VersionValue prevTombstone; - if (version.isDelete()) { - // Also enroll the delete into tombstones, and account for its RAM too: - prevTombstone = tombstones.put(uid, (DeleteVersionValue) version); - - // We initially account for BytesRef/VersionValue RAM for a delete against the tombstones, because this RAM will not be freed up - // on refresh. Later, in removeTombstoneUnderLock, if we clear the tombstone entry but the delete remains in current, we shift - // the accounting to current: - ramBytesUsedTombstones.addAndGet(BASE_BYTES_PER_CHM_ENTRY + version.ramBytesUsed() + uidRAMBytesUsed); - - if (prevTombstone == null && prev != null && prev.isDelete()) { - // If prev was a delete that had already been removed from tombstones, then current was already accounting for the - // BytesRef/VersionValue RAM, so we now deduct that as well: - ramBytesUsedCurrent.addAndGet(-(prev.ramBytesUsed() + uidRAMBytesUsed)); - } + maps.put(uid, version); + removeTombstoneUnderLock(uid); } else { - // UID came back to life so we remove the tombstone: - prevTombstone = tombstones.remove(uid); + DeleteVersionValue versionValue = (DeleteVersionValue) version; + putTombstone(uid, versionValue); + maps.remove(uid, versionValue); } + } + private void putTombstone(BytesRef uid, DeleteVersionValue version) { + long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; + // Also enroll the delete into tombstones, and account for its RAM too: + final VersionValue prevTombstone = tombstones.put(uid, version); + long accountRam = (BASE_BYTES_PER_CHM_ENTRY + version.ramBytesUsed() + uidRAMBytesUsed); // Deduct tombstones bytes used for the version we just removed or replaced: if (prevTombstone != null) { - long v = ramBytesUsedTombstones.addAndGet(-(BASE_BYTES_PER_CHM_ENTRY + prevTombstone.ramBytesUsed() + uidRAMBytesUsed)); - assert v >= 0 : "bytes=" + v; + accountRam -= (BASE_BYTES_PER_CHM_ENTRY + prevTombstone.ramBytesUsed() + uidRAMBytesUsed); + } + if (accountRam != 0) { + long v = ramBytesUsedTombstones.addAndGet(accountRam); + assert v >= 0: "bytes=" + v; } } @@ -343,42 +367,40 @@ private void putUnderLock(BytesRef uid, VersionValue version, Maps maps) { void removeTombstoneUnderLock(BytesRef uid) { assert keyedLock.isHeldByCurrentThread(uid); long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; - final VersionValue prev = tombstones.remove(uid); if (prev != null) { assert prev.isDelete(); long v = ramBytesUsedTombstones.addAndGet(-(BASE_BYTES_PER_CHM_ENTRY + prev.ramBytesUsed() + uidRAMBytesUsed)); assert v >= 0 : "bytes=" + v; } - final VersionValue curVersion = maps.current.get(uid); - if (curVersion != null && curVersion.isDelete()) { - // We now shift accounting of the BytesRef from tombstones to current, because a refresh would clear this RAM. This should be - // uncommon, because with the default refresh=1s and gc_deletes=60s, deletes should be cleared from current long before we drop - // them from tombstones: - ramBytesUsedCurrent.addAndGet(curVersion.ramBytesUsed() + uidRAMBytesUsed); - } } - /** - * Caller has a lock, so that this uid will not be concurrently added/deleted by another thread. - */ - DeleteVersionValue getTombstoneUnderLock(BytesRef uid) { - assert keyedLock.isHeldByCurrentThread(uid); - return tombstones.get(uid); - } - - /** - * Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones (exposed via reader but not yet GC'd). - */ - Iterable> getAllTombstones() { - return tombstones.entrySet(); - } - - /** - * clears all tombstones ops - */ - void clearTombstones() { - tombstones.clear(); + void pruneTombstones(long currentTime, long pruneInterval) { + for (Map.Entry entry : tombstones.entrySet()) { + final BytesRef uid = entry.getKey(); + try (Releasable lock = keyedLock.tryAcquire(uid)) { + // we use tryAcquire here since this is a best effort and we try to be least disruptive + // this method is also called under lock in the engine under certain situations such that this can lead to deadlocks + // if we do use a blocking acquire. see #28714 + if (lock != null) { // did we get the lock? + // can we do it without this lock on each value? maybe batch to a set and get the lock once per set? + // Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator: + final DeleteVersionValue versionValue = tombstones.get(uid); + if (versionValue != null) { + // check if the value is old enough to be removed + final boolean isTooOld = currentTime - versionValue.time > pruneInterval; + if (isTooOld) { + // version value can't be removed it's + // not yet flushed to lucene ie. it's part of this current maps object + final boolean isNotTrackedByCurrentMaps = versionValue.time < maps.getMinDeleteTimestamp(); + if (isNotTrackedByCurrentMaps) { + removeTombstoneUnderLock(uid); + } + } + } + } + } + } } /** @@ -387,8 +409,6 @@ void clearTombstones() { synchronized void clear() { maps = new Maps(); tombstones.clear(); - ramBytesUsedCurrent.set(0); - // NOTE: we can't zero this here, because a refresh thread could be calling InternalEngine.pruneDeletedTombstones at the same time, // and this will lead to an assert trip. Presumably it's fine if our ramBytesUsedTombstones is non-zero after clear since the index // is being closed: @@ -397,7 +417,7 @@ synchronized void clear() { @Override public long ramBytesUsed() { - return ramBytesUsedCurrent.get() + ramBytesUsedTombstones.get(); + return maps.current.ramBytesUsed.get() + ramBytesUsedTombstones.get(); } /** @@ -405,7 +425,7 @@ public long ramBytesUsed() { * don't clear on refresh. */ long ramBytesUsedForRefresh() { - return ramBytesUsedCurrent.get(); + return maps.current.ramBytesUsed.get(); } @Override @@ -421,6 +441,11 @@ Map getAllCurrent() { return maps.current.map; } + /** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones (exposed via reader but not yet GC'd). */ + Map getAllTombstones() { + return tombstones; + } + /** * Acquires a releaseable lock for the given uId. All *UnderLock methods require * this lock to be hold by the caller otherwise the visibility guarantees of this version diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 0c03e8a551f6f..af50dfbcb4e18 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.NumberType; @@ -128,22 +129,22 @@ public static class TypeParser implements Mapper.TypeParser { if (fieldName.equals("type")) { continue; } - if (Fields.ANALYZER.match(fieldName)) { + if (Fields.ANALYZER.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { indexAnalyzer = getNamedAnalyzer(parserContext, fieldNode.toString()); iterator.remove(); - } else if (Fields.SEARCH_ANALYZER.match(fieldName)) { + } else if (Fields.SEARCH_ANALYZER.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { searchAnalyzer = getNamedAnalyzer(parserContext, fieldNode.toString()); iterator.remove(); - } else if (Fields.PRESERVE_SEPARATORS.match(fieldName)) { + } else if (Fields.PRESERVE_SEPARATORS.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { builder.preserveSeparators(Boolean.parseBoolean(fieldNode.toString())); iterator.remove(); - } else if (Fields.PRESERVE_POSITION_INCREMENTS.match(fieldName)) { + } else if (Fields.PRESERVE_POSITION_INCREMENTS.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { builder.preservePositionIncrements(Boolean.parseBoolean(fieldNode.toString())); iterator.remove(); - } else if (Fields.MAX_INPUT_LENGTH.match(fieldName)) { + } else if (Fields.MAX_INPUT_LENGTH.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { builder.maxInputLength(Integer.parseInt(fieldNode.toString())); iterator.remove(); - } else if (Fields.CONTEXTS.match(fieldName)) { + } else if (Fields.CONTEXTS.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { builder.contextMappings(ContextMappings.load(fieldNode, parserContext.indexVersionCreated())); iterator.remove(); } else if (parseMultiField(builder, name, parserContext, fieldName, fieldNode)) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index be25775c1353e..15faa70456c2a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -162,7 +163,8 @@ private static String getRemainingFields(Map map) { private Tuple> extractMapping(String type, String source) throws MapperParsingException { Map root; - try (XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry, source)) { + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source)) { root = parser.mapOrdered(); } catch (Exception e) { throw new MapperParsingException("failed to parse mapping definition", e); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index a04673eca4c38..dd737af1ddf32 100755 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -200,7 +201,8 @@ public DocumentMapperParser documentMapperParser() { * Parses the mappings (formatted as JSON) into a map */ public static Map parseMapping(NamedXContentRegistry xContentRegistry, String mappingSource) throws Exception { - try (XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry, mappingSource)) { + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, mappingSource)) { return parser.map(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java index 1d3588ae5a745..9de53b8dec6dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -116,7 +117,7 @@ public MetadataFieldMapper.Builder parse(String name, Map node, if (fieldName.equals("type")) { builder.type(fieldNode.toString()); iterator.remove(); - } else if (FIELDDATA.match(fieldName)) { + } else if (FIELDDATA.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { // for bw compat only Map fieldDataSettings = nodeMapValue(fieldNode, "fielddata"); if (fieldDataSettings.containsKey("loading")) { diff --git a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index ac57c2abd585b..58697d9ada5ac 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -333,15 +333,15 @@ public static BoolQueryBuilder fromXContent(XContentParser parser) throws IOExce } } } else if (token.isValue()) { - if (DISABLE_COORD_FIELD.match(currentFieldName)) { + if (DISABLE_COORD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore - } else if (MINIMUM_SHOULD_MATCH.match(currentFieldName)) { + } else if (MINIMUM_SHOULD_MATCH.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatch = parser.textOrNull(); - } else if (BOOST_FIELD.match(currentFieldName)) { + } else if (BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (ADJUST_PURE_NEGATIVE.match(currentFieldName)) { + } else if (ADJUST_PURE_NEGATIVE.match(currentFieldName, parser.getDeprecationHandler())) { adjustPureNegative = parser.booleanValue(); - } else if (NAME_FIELD.match(currentFieldName)) { + } else if (NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[bool] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java index 833e3a2ed0db1..35b0d18b1e88c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java @@ -152,21 +152,21 @@ public static BoostingQueryBuilder fromXContent(XContentParser parser) throws IO if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (POSITIVE_FIELD.match(currentFieldName)) { + if (POSITIVE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { positiveQuery = parseInnerQueryBuilder(parser); positiveQueryFound = true; - } else if (NEGATIVE_FIELD.match(currentFieldName)) { + } else if (NEGATIVE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { negativeQuery = parseInnerQueryBuilder(parser); negativeQueryFound = true; } else { throw new ParsingException(parser.getTokenLocation(), "[boosting] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (NEGATIVE_BOOST_FIELD.match(currentFieldName)) { + if (NEGATIVE_BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { negativeBoost = parser.floatValue(); - } else if (NAME_FIELD.match(currentFieldName)) { + } else if (NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (BOOST_FIELD.match(currentFieldName)) { + } else if (BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), "[boosting] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index dc7c3d92e924c..c0c08e654807f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -271,15 +271,15 @@ public static CommonTermsQueryBuilder fromXContent(XContentParser parser) throws if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { + if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String innerFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { innerFieldName = parser.currentName(); } else if (token.isValue()) { - if (LOW_FREQ_FIELD.match(innerFieldName)) { + if (LOW_FREQ_FIELD.match(innerFieldName, parser.getDeprecationHandler())) { lowFreqMinimumShouldMatch = parser.text(); - } else if (HIGH_FREQ_FIELD.match(innerFieldName)) { + } else if (HIGH_FREQ_FIELD.match(innerFieldName, parser.getDeprecationHandler())) { highFreqMinimumShouldMatch = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + @@ -297,23 +297,23 @@ public static CommonTermsQueryBuilder fromXContent(XContentParser parser) throws "] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { text = parser.objectText(); - } else if (ANALYZER_FIELD.match(currentFieldName)) { + } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzer = parser.text(); - } else if (DISABLE_COORD_FIELD.match(currentFieldName)) { + } else if (DISABLE_COORD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (HIGH_FREQ_OPERATOR_FIELD.match(currentFieldName)) { + } else if (HIGH_FREQ_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { highFreqOperator = Operator.fromString(parser.text()); - } else if (LOW_FREQ_OPERATOR_FIELD.match(currentFieldName)) { + } else if (LOW_FREQ_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lowFreqOperator = Operator.fromString(parser.text()); - } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { + } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lowFreqMinimumShouldMatch = parser.text(); - } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName)) { + } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { cutoffFrequency = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + diff --git a/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java index df4e31c5955a9..18330f9cb4d9b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java @@ -97,7 +97,7 @@ public static ConstantScoreQueryBuilder fromXContent(XContentParser parser) thro if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (INNER_QUERY_FIELD.match(currentFieldName)) { + if (INNER_QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (queryFound) { throw new ParsingException(parser.getTokenLocation(), "[" + ConstantScoreQueryBuilder.NAME + "]" + " accepts only one 'filter' element."); @@ -109,9 +109,9 @@ public static ConstantScoreQueryBuilder fromXContent(XContentParser parser) thro "[constant_score] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java index 516712040897c..0e2a19e2b0754 100644 --- a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java @@ -136,14 +136,14 @@ public static DisMaxQueryBuilder fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (QUERIES_FIELD.match(currentFieldName)) { + if (QUERIES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queriesFound = true; queries.add(parseInnerQueryBuilder(parser)); } else { throw new ParsingException(parser.getTokenLocation(), "[dis_max] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { - if (QUERIES_FIELD.match(currentFieldName)) { + if (QUERIES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queriesFound = true; while (token != XContentParser.Token.END_ARRAY) { queries.add(parseInnerQueryBuilder(parser)); @@ -153,11 +153,11 @@ public static DisMaxQueryBuilder fromXContent(XContentParser parser) throws IOEx throw new ParsingException(parser.getTokenLocation(), "[dis_max] query does not support [" + currentFieldName + "]"); } } else { - if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (TIE_BREAKER_FIELD.match(currentFieldName)) { + } else if (TIE_BREAKER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tieBreaker = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[dis_max] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java index 97378e01236fb..280df7cfa6ad8 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java @@ -99,11 +99,11 @@ public static ExistsQueryBuilder fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (FIELD_FIELD.match(currentFieldName)) { + if (FIELD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fieldPattern = parser.text(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), "[" + ExistsQueryBuilder.NAME + @@ -131,7 +131,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { } public static Query newFilter(QueryShardContext context, String fieldPattern) { - + final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context .getMapperService().fullName(FieldNamesFieldMapper.NAME); if (fieldNamesFieldType == null) { @@ -165,7 +165,7 @@ public static Query newFilter(QueryShardContext context, String fieldPattern) { } private static Query newLegacyExistsQuery(Collection fields) { - // We create TermsQuery directly here rather than using FieldNamesFieldType.termsQuery() + // We create TermsQuery directly here rather than using FieldNamesFieldType.termsQuery() // so we don't end up with deprecation warnings if (fields.size() == 1) { Query filter = new TermQuery(new Term(FieldNamesFieldMapper.NAME, fields.iterator().next())); diff --git a/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java index 9fd037f561033..9190d4de14aec 100644 --- a/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -113,7 +113,7 @@ public static FieldMaskingSpanQueryBuilder fromXContent(XContentParser parser) t if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "[field_masking_span] query must be of type span query"); @@ -124,11 +124,11 @@ public static FieldMaskingSpanQueryBuilder fromXContent(XContentParser parser) t + currentFieldName + "]"); } } else { - if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (FIELD_FIELD.match(currentFieldName)) { + } else if (FIELD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { field = parser.text(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index ba6b4dd0450d6..91843ffbd3c9d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MappedFieldType; @@ -272,23 +273,23 @@ public static FuzzyQueryBuilder fromXContent(XContentParser parser) throws IOExc if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (TERM_FIELD.match(currentFieldName)) { + if (TERM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectBytes(); - } else if (VALUE_FIELD.match(currentFieldName)) { + } else if (VALUE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectBytes(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (Fuzziness.FIELD.match(currentFieldName)) { + } else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzziness = Fuzziness.parse(parser); - } else if (PREFIX_LENGTH_FIELD.match(currentFieldName)) { + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { prefixLength = parser.intValue(); - } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName)) { + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxExpansions = parser.intValue(); - } else if (TRANSPOSITIONS_FIELD.match(currentFieldName)) { + } else if (TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { transpositions = parser.booleanValue(); - } else if (REWRITE_FIELD.match(currentFieldName)) { + } else if (REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { rewrite = parser.textOrNull(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -336,7 +337,8 @@ protected Query doToQuery(QueryShardContext context) throws IOException { query = new FuzzyQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), maxEdits, prefixLength, maxExpansions, transpositions); } if (query instanceof MultiTermQuery) { - MultiTermQuery.RewriteMethod rewriteMethod = QueryParsers.parseRewriteMethod(rewrite, null); + MultiTermQuery.RewriteMethod rewriteMethod = QueryParsers.parseRewriteMethod(rewrite, null, + LoggingDeprecationHandler.INSTANCE); QueryParsers.setRewriteMethod((MultiTermQuery) query, rewriteMethod); } return query; diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 47dcbaa351454..3fea896342270 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -403,15 +403,15 @@ public static GeoBoundingBoxQueryBuilder fromXContent(XContentParser parser) thr throw new ElasticsearchParseException("failed to parse [{}] query. [{}]", NAME, e.getMessage()); } } else if (token.isValue()) { - if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (VALIDATION_METHOD_FIELD.match(currentFieldName)) { + } else if (VALIDATION_METHOD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { validationMethod = GeoValidationMethod.fromString(parser.text()); - } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName)) { + } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { ignoreUnmapped = parser.booleanValue(); - } else if (TYPE_FIELD.match(currentFieldName)) { + } else if (TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { type = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. unexpected field [{}]", @@ -479,30 +479,30 @@ public static Rectangle parseBoundingBox(XContentParser parser) throws IOExcepti if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); token = parser.nextToken(); - if (WKT_FIELD.match(currentFieldName)) { + if (WKT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { envelope = (EnvelopeBuilder)(GeoWKTParser.parseExpectedType(parser, GeoShapeType.ENVELOPE)); - } else if (TOP_FIELD.match(currentFieldName)) { + } else if (TOP_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { top = parser.doubleValue(); - } else if (BOTTOM_FIELD.match(currentFieldName)) { + } else if (BOTTOM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { bottom = parser.doubleValue(); - } else if (LEFT_FIELD.match(currentFieldName)) { + } else if (LEFT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { left = parser.doubleValue(); - } else if (RIGHT_FIELD.match(currentFieldName)) { + } else if (RIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { right = parser.doubleValue(); } else { - if (TOP_LEFT_FIELD.match(currentFieldName)) { + if (TOP_LEFT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { GeoUtils.parseGeoPoint(parser, sparse); top = sparse.getLat(); left = sparse.getLon(); - } else if (BOTTOM_RIGHT_FIELD.match(currentFieldName)) { + } else if (BOTTOM_RIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { GeoUtils.parseGeoPoint(parser, sparse); bottom = sparse.getLat(); right = sparse.getLon(); - } else if (TOP_RIGHT_FIELD.match(currentFieldName)) { + } else if (TOP_RIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { GeoUtils.parseGeoPoint(parser, sparse); top = sparse.getLat(); right = sparse.getLon(); - } else if (BOTTOM_LEFT_FIELD.match(currentFieldName)) { + } else if (BOTTOM_LEFT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { GeoUtils.parseGeoPoint(parser, sparse); bottom = sparse.getLat(); left = sparse.getLon(); diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index a6ef5fbd69572..5db7516437314 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -311,15 +311,15 @@ public static GeoDistanceQueryBuilder fromXContent(XContentParser parser) throws } } } else if (token.isValue()) { - if (DISTANCE_FIELD.match(currentFieldName)) { + if (DISTANCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.VALUE_STRING) { vDistance = parser.text(); // a String } else { vDistance = parser.numberValue(); // a Number } - } else if (UNIT_FIELD.match(currentFieldName)) { + } else if (UNIT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { unit = DistanceUnit.fromString(parser.text()); - } else if (DISTANCE_TYPE_FIELD.match(currentFieldName)) { + } else if (DISTANCE_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { geoDistance = GeoDistance.fromString(parser.text()); } else if (currentFieldName.endsWith(".lat")) { point.resetLat(parser.doubleValue()); @@ -327,13 +327,13 @@ public static GeoDistanceQueryBuilder fromXContent(XContentParser parser) throws } else if (currentFieldName.endsWith(".lon")) { point.resetLon(parser.doubleValue()); fieldName = currentFieldName.substring(0, currentFieldName.length() - ".lon".length()); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName)) { + } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { ignoreUnmapped = parser.booleanValue(); - } else if (VALIDATION_METHOD_FIELD.match(currentFieldName)) { + } else if (VALIDATION_METHOD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { validationMethod = GeoValidationMethod.fromString(parser.text()); } else { if (fieldName == null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java index 45e71231ab6d7..34c29ab0f1890 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java @@ -243,7 +243,7 @@ public static GeoPolygonQueryBuilder fromXContent(XContentParser parser) throws if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (POINTS_FIELD.match(currentFieldName)) { + if (POINTS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { shell = new ArrayList<>(); while ((token = parser.nextToken()) != Token.END_ARRAY) { shell.add(GeoUtils.parseGeoPoint(parser)); @@ -262,9 +262,9 @@ public static GeoPolygonQueryBuilder fromXContent(XContentParser parser) throws queryName = parser.text(); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); - } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName)) { + } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { ignoreUnmapped = parser.booleanValue(); - } else if (VALIDATION_METHOD.match(currentFieldName)) { + } else if (VALIDATION_METHOD.match(currentFieldName, parser.getDeprecationHandler())) { validationMethod = GeoValidationMethod.fromString(parser.text()); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index 5fb9776946326..d7d9797dad88b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -401,7 +402,9 @@ public void onResponse(GetResponse response) { int currentPathSlot = 0; // It is safe to use EMPTY here because this never uses namedObject - try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, response.getSourceAsBytesRef())) { + try (XContentParser parser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, response.getSourceAsBytesRef())) { XContentParser.Token currentToken; while ((currentToken = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (currentToken == XContentParser.Token.FIELD_NAME) { @@ -515,31 +518,31 @@ public static GeoShapeQueryBuilder fromXContent(XContentParser parser) throws IO if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); token = parser.nextToken(); - if (SHAPE_FIELD.match(currentFieldName)) { + if (SHAPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { shape = ShapeParser.parse(parser); - } else if (STRATEGY_FIELD.match(currentFieldName)) { + } else if (STRATEGY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String strategyName = parser.text(); strategy = SpatialStrategy.fromString(strategyName); if (strategy == null) { throw new ParsingException(parser.getTokenLocation(), "Unknown strategy [" + strategyName + " ]"); } - } else if (RELATION_FIELD.match(currentFieldName)) { + } else if (RELATION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { shapeRelation = ShapeRelation.getRelationByName(parser.text()); if (shapeRelation == null) { throw new ParsingException(parser.getTokenLocation(), "Unknown shape operation [" + parser.text() + " ]"); } - } else if (INDEXED_SHAPE_FIELD.match(currentFieldName)) { + } else if (INDEXED_SHAPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (SHAPE_ID_FIELD.match(currentFieldName)) { + if (SHAPE_ID_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); - } else if (SHAPE_TYPE_FIELD.match(currentFieldName)) { + } else if (SHAPE_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { type = parser.text(); - } else if (SHAPE_INDEX_FIELD.match(currentFieldName)) { + } else if (SHAPE_INDEX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { index = parser.text(); - } else if (SHAPE_PATH_FIELD.match(currentFieldName)) { + } else if (SHAPE_PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { shapePath = parser.text(); } } else { @@ -554,11 +557,11 @@ public static GeoShapeQueryBuilder fromXContent(XContentParser parser) throws IO } } } else if (token.isValue()) { - if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName)) { + } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { ignoreUnmapped = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java index 5049cf5a0e1da..dccc1a5bc086a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java @@ -66,9 +66,9 @@ public static MatchNoneQueryBuilder fromXContent(XContentParser parser) throws I if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), "["+MatchNoneQueryBuilder.NAME + diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java index 28a77c0566756..0e90ba5ae575b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java @@ -211,17 +211,17 @@ public static MatchPhrasePrefixQueryBuilder fromXContent(XContentParser parser) if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (MatchQueryBuilder.QUERY_FIELD.match(currentFieldName)) { + if (MatchQueryBuilder.QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectText(); - } else if (MatchQueryBuilder.ANALYZER_FIELD.match(currentFieldName)) { + } else if (MatchQueryBuilder.ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzer = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (MatchPhraseQueryBuilder.SLOP_FIELD.match(currentFieldName)) { + } else if (MatchPhraseQueryBuilder.SLOP_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { slop = parser.intValue(); - } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName)) { + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxExpansion = parser.intValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index 1bdab8d78a81d..03a1f78289409 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -182,15 +182,15 @@ public static MatchPhraseQueryBuilder fromXContent(XContentParser parser) throws if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (MatchQueryBuilder.QUERY_FIELD.match(currentFieldName)) { + if (MatchQueryBuilder.QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectText(); - } else if (MatchQueryBuilder.ANALYZER_FIELD.match(currentFieldName)) { + } else if (MatchQueryBuilder.ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzer = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (SLOP_FIELD.match(currentFieldName)) { + } else if (SLOP_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { slop = parser.intValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index cc19603ea64d8..3895aeab0f3da 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.QueryParsers; @@ -412,7 +413,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { matchQuery.setFuzzyPrefixLength(prefixLength); matchQuery.setMaxExpansions(maxExpansions); matchQuery.setTranspositions(fuzzyTranspositions); - matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null)); + matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null, LoggingDeprecationHandler.INSTANCE)); matchQuery.setLenient(lenient); matchQuery.setCommonTermsCutoff(cutoffFrequency); matchQuery.setZeroTermsQuery(zeroTermsQuery); @@ -481,31 +482,31 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectText(); - } else if (ANALYZER_FIELD.match(currentFieldName)) { + } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzer = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (Fuzziness.FIELD.match(currentFieldName)) { + } else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzziness = Fuzziness.parse(parser); - } else if (PREFIX_LENGTH_FIELD.match(currentFieldName)) { + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { prefixLength = parser.intValue(); - } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName)) { + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxExpansion = parser.intValue(); - } else if (OPERATOR_FIELD.match(currentFieldName)) { + } else if (OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { operator = Operator.fromString(parser.text()); - } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { + } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatch = parser.textOrNull(); - } else if (FUZZY_REWRITE_FIELD.match(currentFieldName)) { + } else if (FUZZY_REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyRewrite = parser.textOrNull(); - } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName)) { + } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyTranspositions = parser.booleanValue(); - } else if (LENIENT_FIELD.match(currentFieldName)) { + } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lenient = parser.booleanValue(); - } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName)) { + } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { cutOffFrequency = parser.floatValue(); - } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName)) { + } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String zeroTermsDocs = parser.text(); if ("none".equalsIgnoreCase(zeroTermsDocs)) { zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE; @@ -515,9 +516,9 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc throw new ParsingException(parser.getTokenLocation(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); } - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName)) { + } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) { autoGenerateSynonymsPhraseQuery = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index 24a4eef9802d5..a94c2dae283a5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -366,16 +366,16 @@ public static Item parse(XContentParser parser, Item item) throws IOException { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (currentFieldName != null) { - if (INDEX.match(currentFieldName)) { + if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { item.index = parser.text(); - } else if (TYPE.match(currentFieldName)) { + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { item.type = parser.text(); - } else if (ID.match(currentFieldName)) { + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { item.id = parser.text(); - } else if (DOC.match(currentFieldName)) { + } else if (DOC.match(currentFieldName, parser.getDeprecationHandler())) { item.doc = jsonBuilder().copyCurrentStructure(parser).bytes(); item.xContentType = XContentType.JSON; - } else if (FIELDS.match(currentFieldName)) { + } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.START_ARRAY) { List fields = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { @@ -386,13 +386,13 @@ public static Item parse(XContentParser parser, Item item) throws IOException { throw new ElasticsearchParseException( "failed to parse More Like This item. field [fields] must be an array"); } - } else if (PER_FIELD_ANALYZER.match(currentFieldName)) { + } else if (PER_FIELD_ANALYZER.match(currentFieldName, parser.getDeprecationHandler())) { item.perFieldAnalyzer(TermVectorsRequest.readPerFieldAnalyzer(parser.map())); - } else if (ROUTING.match(currentFieldName)) { + } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { item.routing = parser.text(); - } else if (VERSION.match(currentFieldName)) { + } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { item.version = parser.longValue(); - } else if (VERSION_TYPE.match(currentFieldName)) { + } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { item.versionType = VersionType.fromString(parser.text()); } else { throw new ElasticsearchParseException( @@ -834,31 +834,31 @@ public static MoreLikeThisQueryBuilder fromXContent(XContentParser parser) throw if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (LIKE.match(currentFieldName)) { + if (LIKE.match(currentFieldName, parser.getDeprecationHandler())) { parseLikeField(parser, likeTexts, likeItems); - } else if (UNLIKE.match(currentFieldName)) { + } else if (UNLIKE.match(currentFieldName, parser.getDeprecationHandler())) { parseLikeField(parser, unlikeTexts, unlikeItems); - } else if (MAX_QUERY_TERMS.match(currentFieldName)) { + } else if (MAX_QUERY_TERMS.match(currentFieldName, parser.getDeprecationHandler())) { maxQueryTerms = parser.intValue(); - } else if (MIN_TERM_FREQ.match(currentFieldName)) { + } else if (MIN_TERM_FREQ.match(currentFieldName, parser.getDeprecationHandler())) { minTermFreq =parser.intValue(); - } else if (MIN_DOC_FREQ.match(currentFieldName)) { + } else if (MIN_DOC_FREQ.match(currentFieldName, parser.getDeprecationHandler())) { minDocFreq = parser.intValue(); - } else if (MAX_DOC_FREQ.match(currentFieldName)) { + } else if (MAX_DOC_FREQ.match(currentFieldName, parser.getDeprecationHandler())) { maxDocFreq = parser.intValue(); - } else if (MIN_WORD_LENGTH.match(currentFieldName)) { + } else if (MIN_WORD_LENGTH.match(currentFieldName, parser.getDeprecationHandler())) { minWordLength = parser.intValue(); - } else if (MAX_WORD_LENGTH.match(currentFieldName)) { + } else if (MAX_WORD_LENGTH.match(currentFieldName, parser.getDeprecationHandler())) { maxWordLength = parser.intValue(); - } else if (ANALYZER.match(currentFieldName)) { + } else if (ANALYZER.match(currentFieldName, parser.getDeprecationHandler())) { analyzer = parser.text(); - } else if (MINIMUM_SHOULD_MATCH.match(currentFieldName)) { + } else if (MINIMUM_SHOULD_MATCH.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatch = parser.text(); - } else if (BOOST_TERMS.match(currentFieldName)) { + } else if (BOOST_TERMS.match(currentFieldName, parser.getDeprecationHandler())) { boostTerms = parser.floatValue(); - } else if (INCLUDE.match(currentFieldName)) { + } else if (INCLUDE.match(currentFieldName, parser.getDeprecationHandler())) { include = parser.booleanValue(); - } else if (FAIL_ON_UNSUPPORTED_FIELD.match(currentFieldName)) { + } else if (FAIL_ON_UNSUPPORTED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { failOnUnsupportedField = parser.booleanValue(); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); @@ -868,20 +868,20 @@ public static MoreLikeThisQueryBuilder fromXContent(XContentParser parser) throw throw new ParsingException(parser.getTokenLocation(), "[mlt] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { - if (FIELDS.match(currentFieldName)) { + if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { fields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { fields.add(parser.text()); } - } else if (LIKE.match(currentFieldName)) { + } else if (LIKE.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { parseLikeField(parser, likeTexts, likeItems); } - } else if (UNLIKE.match(currentFieldName)) { + } else if (UNLIKE.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { parseLikeField(parser, unlikeTexts, unlikeItems); } - } else if (STOP_WORDS.match(currentFieldName)) { + } else if (STOP_WORDS.match(currentFieldName, parser.getDeprecationHandler())) { stopWords = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { stopWords.add(parser.text()); @@ -890,9 +890,9 @@ public static MoreLikeThisQueryBuilder fromXContent(XContentParser parser) throw throw new ParsingException(parser.getTokenLocation(), "[mlt] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { - if (LIKE.match(currentFieldName)) { + if (LIKE.match(currentFieldName, parser.getDeprecationHandler())) { parseLikeField(parser, likeTexts, likeItems); - } else if (UNLIKE.match(currentFieldName)) { + } else if (UNLIKE.match(currentFieldName, parser.getDeprecationHandler())) { parseLikeField(parser, unlikeTexts, unlikeItems); } else { throw new ParsingException(parser.getTokenLocation(), "[mlt] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 0411b955b6547..e56fd44f5b856 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -31,6 +31,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.QueryParsers; @@ -154,11 +156,11 @@ public ParseField parseField() { return parseField; } - public static Type parse(String value) { + public static Type parse(String value, DeprecationHandler deprecationHandler) { MultiMatchQueryBuilder.Type[] values = MultiMatchQueryBuilder.Type.values(); Type type = null; for (MultiMatchQueryBuilder.Type t : values) { - if (t.parseField().match(value)) { + if (t.parseField().match(value, deprecationHandler)) { type = t; break; } @@ -326,7 +328,7 @@ public MultiMatchQueryBuilder type(Object type) { if (type == null) { throw new IllegalArgumentException("[" + NAME + "] requires type to be non-null"); } - this.type = Type.parse(type.toString().toLowerCase(Locale.ROOT)); + this.type = Type.parse(type.toString().toLowerCase(Locale.ROOT), LoggingDeprecationHandler.INSTANCE); return this; } @@ -639,7 +641,7 @@ public static MultiMatchQueryBuilder fromXContent(XContentParser parser) throws while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - } else if (FIELDS_FIELD.match(currentFieldName)) { + } else if (FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { parseFieldAndBoost(parser, fieldsBoosts); @@ -651,37 +653,37 @@ public static MultiMatchQueryBuilder fromXContent(XContentParser parser) throws "[" + NAME + "] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectText(); - } else if (TYPE_FIELD.match(currentFieldName)) { - type = MultiMatchQueryBuilder.Type.parse(parser.text()); - } else if (ANALYZER_FIELD.match(currentFieldName)) { + } else if (TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + type = MultiMatchQueryBuilder.Type.parse(parser.text(), parser.getDeprecationHandler()); + } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzer = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (SLOP_FIELD.match(currentFieldName)) { + } else if (SLOP_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { slop = parser.intValue(); - } else if (Fuzziness.FIELD.match(currentFieldName)) { + } else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzziness = Fuzziness.parse(parser); - } else if (PREFIX_LENGTH_FIELD.match(currentFieldName)) { + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { prefixLength = parser.intValue(); - } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName)) { + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxExpansions = parser.intValue(); - } else if (OPERATOR_FIELD.match(currentFieldName)) { + } else if (OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { operator = Operator.fromString(parser.text()); - } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { + } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatch = parser.textOrNull(); - } else if (FUZZY_REWRITE_FIELD.match(currentFieldName)) { + } else if (FUZZY_REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyRewrite = parser.textOrNull(); - } else if (USE_DIS_MAX_FIELD.match(currentFieldName)) { + } else if (USE_DIS_MAX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { useDisMax = parser.booleanValue(); - } else if (TIE_BREAKER_FIELD.match(currentFieldName)) { + } else if (TIE_BREAKER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tieBreaker = parser.floatValue(); - } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName)) { + } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { cutoffFrequency = parser.floatValue(); - } else if (LENIENT_FIELD.match(currentFieldName)) { + } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lenient = parser.booleanValue(); - } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName)) { + } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String zeroTermsDocs = parser.text(); if ("none".equalsIgnoreCase(zeroTermsDocs)) { zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE; @@ -690,11 +692,11 @@ public static MultiMatchQueryBuilder fromXContent(XContentParser parser) throws } else { throw new ParsingException(parser.getTokenLocation(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); } - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName)) { + } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) { autoGenerateSynonymsPhraseQuery = parser.booleanValue(); - } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName)) { + } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyTranspositions = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -781,7 +783,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { multiMatchQuery.setMaxExpansions(maxExpansions); multiMatchQuery.setOccur(operator.toBooleanClauseOccur()); if (fuzzyRewrite != null) { - multiMatchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null)); + multiMatchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null, LoggingDeprecationHandler.INSTANCE)); } if (tieBreaker != null) { multiMatchQuery.setTieBreaker(tieBreaker); diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 95e9e3a1869b0..9ebd548cae1f0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -192,23 +192,23 @@ public static NestedQueryBuilder fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { query = parseInnerQueryBuilder(parser); - } else if (INNER_HITS_FIELD.match(currentFieldName)) { + } else if (INNER_HITS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { innerHitBuilder = InnerHitBuilder.fromXContent(parser); } else { throw new ParsingException(parser.getTokenLocation(), "[nested] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (PATH_FIELD.match(currentFieldName)) { + if (PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { path = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName)) { + } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { ignoreUnmapped = parser.booleanValue(); - } else if (SCORE_MODE_FIELD.match(currentFieldName)) { + } else if (SCORE_MODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { scoreMode = parseScoreMode(parser.text()); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[nested] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java index fcc688d191a36..c1cd99d712a5a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MappedFieldType; @@ -135,13 +136,13 @@ public static PrefixQueryBuilder fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (PREFIX_FIELD.match(currentFieldName)) { + } else if (PREFIX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.textOrNull(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (REWRITE_FIELD.match(currentFieldName)) { + } else if (REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { rewrite = parser.textOrNull(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -169,7 +170,7 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { - MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null); + MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null, LoggingDeprecationHandler.INSTANCE); Query query = null; MappedFieldType fieldType = context.fieldMapper(fieldName); diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 154060ec1a5b0..56c49b7f2c1bf 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -768,7 +769,7 @@ public static QueryStringQueryBuilder fromXContent(XContentParser parser) throws if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (FIELDS_FIELD.match(currentFieldName)) { + if (FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List fields = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { fields.add(parser.text()); @@ -779,76 +780,76 @@ public static QueryStringQueryBuilder fromXContent(XContentParser parser) throws "] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryString = parser.text(); - } else if (DEFAULT_FIELD_FIELD.match(currentFieldName)) { + } else if (DEFAULT_FIELD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { defaultField = parser.text(); - } else if (DEFAULT_OPERATOR_FIELD.match(currentFieldName)) { + } else if (DEFAULT_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { defaultOperator = Operator.fromString(parser.text()); - } else if (ANALYZER_FIELD.match(currentFieldName)) { + } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzer = parser.text(); - } else if (QUOTE_ANALYZER_FIELD.match(currentFieldName)) { + } else if (QUOTE_ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { quoteAnalyzer = parser.text(); - } else if (ALLOW_LEADING_WILDCARD_FIELD.match(currentFieldName)) { + } else if (ALLOW_LEADING_WILDCARD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { allowLeadingWildcard = parser.booleanValue(); - } else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName)) { + } else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxDeterminizedStates = parser.intValue(); - } else if (ENABLE_POSITION_INCREMENTS_FIELD.match(currentFieldName)) { + } else if (ENABLE_POSITION_INCREMENTS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { enablePositionIncrements = parser.booleanValue(); - } else if (ESCAPE_FIELD.match(currentFieldName)) { + } else if (ESCAPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { escape = parser.booleanValue(); - } else if (FUZZY_PREFIX_LENGTH_FIELD.match(currentFieldName)) { + } else if (FUZZY_PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyPrefixLength = parser.intValue(); - } else if (FUZZY_MAX_EXPANSIONS_FIELD.match(currentFieldName)) { + } else if (FUZZY_MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyMaxExpansions = parser.intValue(); - } else if (FUZZY_REWRITE_FIELD.match(currentFieldName)) { + } else if (FUZZY_REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyRewrite = parser.textOrNull(); - } else if (PHRASE_SLOP_FIELD.match(currentFieldName)) { + } else if (PHRASE_SLOP_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { phraseSlop = parser.intValue(); - } else if (Fuzziness.FIELD.match(currentFieldName)) { + } else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzziness = Fuzziness.parse(parser); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (TYPE_FIELD.match(currentFieldName)) { - type = MultiMatchQueryBuilder.Type.parse(parser.text()); - } else if (TIE_BREAKER_FIELD.match(currentFieldName)) { + } else if (TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + type = MultiMatchQueryBuilder.Type.parse(parser.text(), parser.getDeprecationHandler()); + } else if (TIE_BREAKER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tieBreaker = parser.floatValue(); - } else if (ANALYZE_WILDCARD_FIELD.match(currentFieldName)) { + } else if (ANALYZE_WILDCARD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzeWildcard = parser.booleanValue(); - } else if (REWRITE_FIELD.match(currentFieldName)) { + } else if (REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { rewrite = parser.textOrNull(); - } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { + } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatch = parser.textOrNull(); - } else if (QUOTE_FIELD_SUFFIX_FIELD.match(currentFieldName)) { + } else if (QUOTE_FIELD_SUFFIX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { quoteFieldSuffix = parser.textOrNull(); - } else if (LENIENT_FIELD.match(currentFieldName)) { + } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lenient = parser.booleanValue(); - } else if (ALL_FIELDS_FIELD.match(currentFieldName)) { + } else if (ALL_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { defaultField = "*"; - } else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName)) { + } else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxDeterminizedStates = parser.intValue(); - } else if (TIME_ZONE_FIELD.match(currentFieldName)) { + } else if (TIME_ZONE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { try { timeZone = parser.text(); } catch (IllegalArgumentException e) { throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + "] time_zone [" + parser.text() + "] is unknown"); } - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName)) { + } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) { autoGenerateSynonymsPhraseQuery = parser.booleanValue(); - } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName)) { + } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyTranspositions = parser.booleanValue(); - } else if (AUTO_GENERATE_PHRASE_QUERIES_FIELD.match(currentFieldName)) { + } else if (AUTO_GENERATE_PHRASE_QUERIES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore, deprecated setting - } else if (LOWERCASE_EXPANDED_TERMS_FIELD.match(currentFieldName)) { + } else if (LOWERCASE_EXPANDED_TERMS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore, deprecated setting - } else if (LOCALE_FIELD.match(currentFieldName)) { + } else if (LOCALE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore, deprecated setting - } else if (USE_DIS_MAX_FIELD.match(currentFieldName)) { + } else if (USE_DIS_MAX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore, deprecated setting - } else if (SPLIT_ON_WHITESPACE.match(currentFieldName)) { + } else if (SPLIT_ON_WHITESPACE.match(currentFieldName, parser.getDeprecationHandler())) { // ignore, deprecated setting } else { throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + @@ -1004,8 +1005,8 @@ protected Query doToQuery(QueryShardContext context) throws IOException { queryParser.setFuzziness(fuzziness); queryParser.setFuzzyPrefixLength(fuzzyPrefixLength); queryParser.setFuzzyMaxExpansions(fuzzyMaxExpansions); - queryParser.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(this.fuzzyRewrite)); - queryParser.setMultiTermRewriteMethod(QueryParsers.parseRewriteMethod(this.rewrite)); + queryParser.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(this.fuzzyRewrite, LoggingDeprecationHandler.INSTANCE)); + queryParser.setMultiTermRewriteMethod(QueryParsers.parseRewriteMethod(this.rewrite, LoggingDeprecationHandler.INSTANCE)); queryParser.setTimeZone(timeZone); queryParser.setMaxDeterminizedStates(maxDeterminizedStates); queryParser.setAutoGenerateMultiTermSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 14f1e16f39cbf..9cf008e1b1b2e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -376,35 +376,35 @@ public static RangeQueryBuilder fromXContent(XContentParser parser) throws IOExc if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (FROM_FIELD.match(currentFieldName)) { + if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { from = parser.objectBytes(); - } else if (TO_FIELD.match(currentFieldName)) { + } else if (TO_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { to = parser.objectBytes(); - } else if (INCLUDE_LOWER_FIELD.match(currentFieldName)) { + } else if (INCLUDE_LOWER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { includeLower = parser.booleanValue(); - } else if (INCLUDE_UPPER_FIELD.match(currentFieldName)) { + } else if (INCLUDE_UPPER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { includeUpper = parser.booleanValue(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (GT_FIELD.match(currentFieldName)) { + } else if (GT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { from = parser.objectBytes(); includeLower = false; - } else if (GTE_FIELD.match(currentFieldName)) { + } else if (GTE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { from = parser.objectBytes(); includeLower = true; - } else if (LT_FIELD.match(currentFieldName)) { + } else if (LT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { to = parser.objectBytes(); includeUpper = false; - } else if (LTE_FIELD.match(currentFieldName)) { + } else if (LTE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { to = parser.objectBytes(); includeUpper = true; - } else if (TIME_ZONE_FIELD.match(currentFieldName)) { + } else if (TIME_ZONE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { timeZone = parser.text(); - } else if (FORMAT_FIELD.match(currentFieldName)) { + } else if (FORMAT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); - } else if (RELATION_FIELD.match(currentFieldName)) { + } else if (RELATION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { relation = parser.text(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java index 96290d9291259..7f697eb20e477 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MappedFieldType; @@ -195,20 +196,20 @@ public static RegexpQueryBuilder fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (VALUE_FIELD.match(currentFieldName)) { + if (VALUE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.textOrNull(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (REWRITE_FIELD.match(currentFieldName)) { + } else if (REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { rewrite = parser.textOrNull(); - } else if (FLAGS_FIELD.match(currentFieldName)) { + } else if (FLAGS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String flags = parser.textOrNull(); flagsValue = RegexpFlag.resolveValue(flags); - } else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName)) { + } else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxDeterminizedStates = parser.intValue(); - } else if (FLAGS_VALUE_FIELD.match(currentFieldName)) { + } else if (FLAGS_VALUE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { flagsValue = parser.intValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -238,7 +239,7 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws QueryShardException, IOException { - MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null); + MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null, LoggingDeprecationHandler.INSTANCE); Query query = null; MappedFieldType fieldType = context.fieldMapper(fieldName); diff --git a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index 3d217ab36a243..9cae2f3e061da 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -96,17 +96,17 @@ public static ScriptQueryBuilder fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 3a9a0c3736b93..e51722195399c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -539,7 +539,7 @@ public static SimpleQueryStringBuilder fromXContent(XContentParser parser) throw if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (FIELDS_FIELD.match(currentFieldName)) { + if (FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List fields = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { fields.add(parser.text()); @@ -550,15 +550,15 @@ public static SimpleQueryStringBuilder fromXContent(XContentParser parser) throw "] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryBody = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (ANALYZER_FIELD.match(currentFieldName)) { + } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzerName = parser.text(); - } else if (DEFAULT_OPERATOR_FIELD.match(currentFieldName)) { + } else if (DEFAULT_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { defaultOperator = Operator.fromString(parser.text()); - } else if (FLAGS_FIELD.match(currentFieldName)) { + } else if (FLAGS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { // Possible options are: // ALL, NONE, AND, OR, PREFIX, PHRASE, PRECEDENCE, ESCAPE, WHITESPACE, FUZZY, NEAR, SLOP @@ -569,29 +569,29 @@ public static SimpleQueryStringBuilder fromXContent(XContentParser parser) throw flags = SimpleQueryStringFlag.ALL.value(); } } - } else if (LOCALE_FIELD.match(currentFieldName)) { + } else if (LOCALE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore, deprecated setting - } else if (LOWERCASE_EXPANDED_TERMS_FIELD.match(currentFieldName)) { + } else if (LOWERCASE_EXPANDED_TERMS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore, deprecated setting - } else if (LENIENT_FIELD.match(currentFieldName)) { + } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lenient = parser.booleanValue(); - } else if (ANALYZE_WILDCARD_FIELD.match(currentFieldName)) { + } else if (ANALYZE_WILDCARD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { analyzeWildcard = parser.booleanValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { + } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatch = parser.textOrNull(); - } else if (QUOTE_FIELD_SUFFIX_FIELD.match(currentFieldName)) { + } else if (QUOTE_FIELD_SUFFIX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { quoteFieldSuffix = parser.textOrNull(); - } else if (ALL_FIELDS_FIELD.match(currentFieldName)) { + } else if (ALL_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // Ignore deprecated option - } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName)) { + } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) { autoGenerateSynonymsPhraseQuery = parser.booleanValue(); - } else if (FUZZY_PREFIX_LENGTH_FIELD.match(currentFieldName)) { + } else if (FUZZY_PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyPrefixLenght = parser.intValue(); - } else if (FUZZY_MAX_EXPANSIONS_FIELD.match(currentFieldName)) { + } else if (FUZZY_MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyMaxExpansions = parser.intValue(); - } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName)) { + } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyTranspositions = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME + diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java index 264a8c559c16f..2842b84fa1ce1 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java @@ -111,13 +111,13 @@ public static SpanContainingQueryBuilder fromXContent(XContentParser parser) thr if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (BIG_FIELD.match(currentFieldName)) { + if (BIG_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "span_containing [big] must be of type span query"); } big = (SpanQueryBuilder) query; - } else if (LITTLE_FIELD.match(currentFieldName)) { + } else if (LITTLE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "span_containing [little] must be of type span query"); @@ -127,9 +127,9 @@ public static SpanContainingQueryBuilder fromXContent(XContentParser parser) thr throw new ParsingException(parser.getTokenLocation(), "[span_containing] query does not support [" + currentFieldName + "]"); } - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java index 135f4086020e8..376e87424da59 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java @@ -112,7 +112,7 @@ public static SpanFirstQueryBuilder fromXContent(XContentParser parser) throws I if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (MATCH_FIELD.match(currentFieldName)) { + if (MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "spanFirst [match] must be of type span query"); @@ -122,11 +122,11 @@ public static SpanFirstQueryBuilder fromXContent(XContentParser parser) throws I throw new ParsingException(parser.getTokenLocation(), "[span_first] query does not support [" + currentFieldName + "]"); } } else { - if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (END_FIELD.match(currentFieldName)) { + } else if (END_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { end = parser.intValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[span_first] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java index 7b469b3ef4639..4f102b58616f6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java @@ -91,7 +91,7 @@ public static SpanMultiTermQueryBuilder fromXContent(XContentParser parser) thro if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (MATCH_FIELD.match(currentFieldName)) { + if (MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof MultiTermQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), @@ -102,9 +102,9 @@ public static SpanMultiTermQueryBuilder fromXContent(XContentParser parser) thro throw new ParsingException(parser.getTokenLocation(), "[span_multi] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), "[span_multi] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java index 63c886431b369..7ff181acb9033 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java @@ -159,7 +159,7 @@ public static SpanNearQueryBuilder fromXContent(XContentParser parser) throws IO if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (CLAUSES_FIELD.match(currentFieldName)) { + if (CLAUSES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { @@ -171,13 +171,13 @@ public static SpanNearQueryBuilder fromXContent(XContentParser parser) throws IO throw new ParsingException(parser.getTokenLocation(), "[span_near] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (IN_ORDER_FIELD.match(currentFieldName)) { + if (IN_ORDER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { inOrder = parser.booleanValue(); - } else if (SLOP_FIELD.match(currentFieldName)) { + } else if (SLOP_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { slop = parser.intValue(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[span_near] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java index ca1d30ccbd6a6..e65310d84a4c7 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java @@ -178,13 +178,13 @@ public static SpanNotQueryBuilder fromXContent(XContentParser parser) throws IOE if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (INCLUDE_FIELD.match(currentFieldName)) { + if (INCLUDE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "spanNot [include] must be of type span query"); } include = (SpanQueryBuilder) query; - } else if (EXCLUDE_FIELD.match(currentFieldName)) { + } else if (EXCLUDE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "spanNot [exclude] must be of type span query"); @@ -194,15 +194,15 @@ public static SpanNotQueryBuilder fromXContent(XContentParser parser) throws IOE throw new ParsingException(parser.getTokenLocation(), "[span_not] query does not support [" + currentFieldName + "]"); } } else { - if (DIST_FIELD.match(currentFieldName)) { + if (DIST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { dist = parser.intValue(); - } else if (PRE_FIELD.match(currentFieldName)) { + } else if (PRE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { pre = parser.intValue(); - } else if (POST_FIELD.match(currentFieldName)) { + } else if (POST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { post = parser.intValue(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[span_not] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java index 2ed46c7f5ee10..3a44a8d2c1598 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java @@ -109,7 +109,7 @@ public static SpanOrQueryBuilder fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (CLAUSES_FIELD.match(currentFieldName)) { + if (CLAUSES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { @@ -121,9 +121,9 @@ public static SpanOrQueryBuilder fromXContent(XContentParser parser) throws IOEx throw new ParsingException(parser.getTokenLocation(), "[span_or] query does not support [" + currentFieldName + "]"); } } else { - if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[span_or] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java index 8c0e56266fbca..f5b286451863f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java @@ -108,13 +108,13 @@ public static SpanTermQueryBuilder fromXContent(XContentParser parser) throws IO if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (TERM_FIELD.match(currentFieldName)) { + if (TERM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectBytes(); - } else if (BaseTermQueryBuilder.VALUE_FIELD.match(currentFieldName)) { + } else if (BaseTermQueryBuilder.VALUE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectBytes(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java index 714ad02c18cf0..a454dd0fb521b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java @@ -116,13 +116,13 @@ public static SpanWithinQueryBuilder fromXContent(XContentParser parser) throws if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (BIG_FIELD.match(currentFieldName)) { + if (BIG_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "span_within [big] must be of type span query"); } big = (SpanQueryBuilder) query; - } else if (LITTLE_FIELD.match(currentFieldName)) { + } else if (LITTLE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { QueryBuilder query = parseInnerQueryBuilder(parser); if (query instanceof SpanQueryBuilder == false) { throw new ParsingException(parser.getTokenLocation(), "span_within [little] must be of type span query"); @@ -132,9 +132,9 @@ public static SpanWithinQueryBuilder fromXContent(XContentParser parser) throws throw new ParsingException(parser.getTokenLocation(), "[span_within] query does not support [" + currentFieldName + "]"); } - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[span_within] query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java index 8ee7c699c0071..0df4973329d36 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java @@ -99,13 +99,13 @@ public static TermQueryBuilder fromXContent(XContentParser parser) throws IOExce if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (TERM_FIELD.match(currentFieldName)) { + if (TERM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectBytes(); - } else if (VALUE_FIELD.match(currentFieldName)) { + } else if (VALUE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.objectBytes(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java index 478e583b936a6..f235c785cb195 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java @@ -369,9 +369,9 @@ public static TermsQueryBuilder fromXContent(XContentParser parser) throws IOExc fieldName = currentFieldName; termsLookup = TermsLookup.parseTermsLookup(parser); } else if (token.isValue()) { - if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java index b704c89819fa8..b8afe967b05ff 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java @@ -170,25 +170,25 @@ public static TermsSetQueryBuilder fromXContent(XContentParser parser) throws IO if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (TERMS_FIELD.match(currentFieldName)) { + if (TERMS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { values = TermsQueryBuilder.parseValues(parser); } else { throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { - if (MINIMUM_SHOULD_MATCH_SCRIPT.match(currentFieldName)) { + if (MINIMUM_SHOULD_MATCH_SCRIPT.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatchScript = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { + if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { minimumShouldMatchField = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support [" diff --git a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java index d53736a38363d..c6df4f59e7eeb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java @@ -90,11 +90,11 @@ public static TypeQueryBuilder fromXContent(XContentParser parser) throws IOExce if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (VALUE_FIELD.match(currentFieldName)) { + } else if (VALUE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { type = parser.utf8Bytes(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -142,4 +142,4 @@ protected int doHashCode() { protected boolean doEquals(TypeQueryBuilder other) { return Objects.equals(type, other.type); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index 8303f8e1e9436..351cddd59004f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.IndexFieldMapper; @@ -153,15 +154,15 @@ public static WildcardQueryBuilder fromXContent(XContentParser parser) throws IO if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (WILDCARD_FIELD.match(currentFieldName)) { + if (WILDCARD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.text(); - } else if (VALUE_FIELD.match(currentFieldName)) { + } else if (VALUE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { value = parser.text(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (REWRITE_FIELD.match(currentFieldName)) { + } else if (REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { rewrite = parser.textOrNull(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -197,7 +198,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { } WildcardQuery query = new WildcardQuery(term); - MultiTermQuery.RewriteMethod rewriteMethod = QueryParsers.parseRewriteMethod(rewrite, null); + MultiTermQuery.RewriteMethod rewriteMethod = QueryParsers.parseRewriteMethod(rewrite, null, LoggingDeprecationHandler.INSTANCE); QueryParsers.setRewriteMethod(query, rewriteMethod); return query; } diff --git a/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java index df765568eae87..712d701967418 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -121,7 +122,7 @@ public static WrapperQueryBuilder fromXContent(XContentParser parser) throws IOE throw new ParsingException(parser.getTokenLocation(), "[wrapper] query malformed"); } String fieldName = parser.currentName(); - if (! QUERY_FIELD.match(fieldName)) { + if (! QUERY_FIELD.match(fieldName, parser.getDeprecationHandler())) { throw new ParsingException(parser.getTokenLocation(), "[wrapper] query malformed, expected `query` but was " + fieldName); } parser.nextToken(); @@ -158,7 +159,8 @@ protected boolean doEquals(WrapperQueryBuilder other) { @Override protected QueryBuilder doRewrite(QueryRewriteContext context) throws IOException { - try (XContentParser qSourceParser = XContentFactory.xContent(source).createParser(context.getXContentRegistry(), source)) { + try (XContentParser qSourceParser = XContentFactory.xContent(source) + .createParser(context.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, source)) { final QueryBuilder queryBuilder = parseInnerQueryBuilder(qSourceParser).rewrite(context); if (boost() != DEFAULT_BOOST || queryName() != null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java index fe7d097638f31..9bf9b2897397c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.lucene.search.function.ScoreFunction; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -181,7 +182,8 @@ protected int doHashCode() { protected ScoreFunction doToFunction(QueryShardContext context) throws IOException { AbstractDistanceScoreFunction scoreFunction; // EMPTY is safe because parseVariable doesn't use namedObject - try (XContentParser parser = XContentFactory.xContent(functionBytes).createParser(NamedXContentRegistry.EMPTY, functionBytes)) { + try (XContentParser parser = XContentFactory.xContent(functionBytes) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, functionBytes.streamInput())) { scoreFunction = parseVariable(fieldName, parser, context, multiValueMode); } return scoreFunction; diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java index 6fe41471e2f22..989c52d8fd46e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java @@ -110,7 +110,7 @@ public DFB fromXContent(XContentParser parser) throws IOException, ParsingExcept XContentBuilder builder = XContentFactory.jsonBuilder(); builder.copyCurrentStructure(parser); functionBytes = builder.bytes(); - } else if (MULTI_VALUE_MODE.match(currentFieldName)) { + } else if (MULTI_VALUE_MODE.match(currentFieldName, parser.getDeprecationHandler())) { multiValueMode = MultiValueMode.fromString(parser.text()); } else { throw new ParsingException(parser.getTokenLocation(), "malformed score function score parameters."); diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java index 437d49ae37f7a..b14fab84130b1 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -454,7 +454,7 @@ public static FunctionScoreQueryBuilder fromXContent(XContentParser parser) thro if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (query != null) { throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. [query] is already defined.", NAME); @@ -479,7 +479,7 @@ public static FunctionScoreQueryBuilder fromXContent(XContentParser parser) thro filterFunctionBuilders.add(new FunctionScoreQueryBuilder.FilterFunctionBuilder(scoreFunction)); } } else if (token == XContentParser.Token.START_ARRAY) { - if (FUNCTIONS_FIELD.match(currentFieldName)) { + if (FUNCTIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (singleFunctionFound) { String errorString = "already found [" + singleFunctionName + "], now encountering [functions]."; handleMisplacedFunctionsDeclaration(parser.getTokenLocation(), errorString); @@ -492,17 +492,17 @@ public static FunctionScoreQueryBuilder fromXContent(XContentParser parser) thro } } else if (token.isValue()) { - if (SCORE_MODE_FIELD.match(currentFieldName)) { + if (SCORE_MODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { scoreMode = FunctionScoreQuery.ScoreMode.fromString(parser.text()); - } else if (BOOST_MODE_FIELD.match(currentFieldName)) { + } else if (BOOST_MODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { combineFunction = CombineFunction.fromString(parser.text()); - } else if (MAX_BOOST_FIELD.match(currentFieldName)) { + } else if (MAX_BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { maxBoost = parser.floatValue(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); - } else if (MIN_SCORE_FIELD.match(currentFieldName)) { + } else if (MIN_SCORE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { minScore = parser.floatValue(); } else { if (singleFunctionFound) { @@ -515,7 +515,7 @@ public static FunctionScoreQueryBuilder fromXContent(XContentParser parser) thro String errorString = "already found [functions] array, now encountering [" + currentFieldName + "]."; handleMisplacedFunctionsDeclaration(parser.getTokenLocation(), errorString); } - if (WEIGHT_FIELD.match(currentFieldName)) { + if (WEIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { filterFunctionBuilders.add( new FunctionScoreQueryBuilder.FilterFunctionBuilder(new WeightBuilder().setWeight(parser.floatValue()))); singleFunctionFound = true; @@ -569,7 +569,7 @@ private static String parseFiltersAndFunctions(XContentParser parser, if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (FILTER_FIELD.match(currentFieldName)) { + if (FILTER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { filter = parseInnerQueryBuilder(parser); } else { if (scoreFunction != null) { @@ -580,7 +580,7 @@ private static String parseFiltersAndFunctions(XContentParser parser, scoreFunction = parser.namedObject(ScoreFunctionBuilder.class, currentFieldName, null); } } else if (token.isValue()) { - if (WEIGHT_FIELD.match(currentFieldName)) { + if (WEIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { functionWeight = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. field [{}] is not supported", diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index 60f3918863969..cc89518154d12 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -109,7 +109,7 @@ public static ScriptScoreFunctionBuilder fromXContent(XContentParser parser) if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), NAME + " query does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java b/server/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java index 036efa75bdaa3..f880913747307 100644 --- a/server/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java +++ b/server/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.MultiTermQuery; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.DeprecationHandler; public final class QueryParsers { @@ -43,22 +44,23 @@ public static void setRewriteMethod(MultiTermQuery query, @Nullable MultiTermQue query.setRewriteMethod(rewriteMethod); } - public static MultiTermQuery.RewriteMethod parseRewriteMethod(@Nullable String rewriteMethod) { - return parseRewriteMethod(rewriteMethod, MultiTermQuery.CONSTANT_SCORE_REWRITE); + public static MultiTermQuery.RewriteMethod parseRewriteMethod(@Nullable String rewriteMethod, DeprecationHandler deprecationHandler) { + return parseRewriteMethod(rewriteMethod, MultiTermQuery.CONSTANT_SCORE_REWRITE, deprecationHandler); } public static MultiTermQuery.RewriteMethod parseRewriteMethod(@Nullable String rewriteMethod, - @Nullable MultiTermQuery.RewriteMethod defaultRewriteMethod) { + @Nullable MultiTermQuery.RewriteMethod defaultRewriteMethod, + DeprecationHandler deprecationHandler) { if (rewriteMethod == null) { return defaultRewriteMethod; } - if (CONSTANT_SCORE.match(rewriteMethod)) { + if (CONSTANT_SCORE.match(rewriteMethod, deprecationHandler)) { return MultiTermQuery.CONSTANT_SCORE_REWRITE; } - if (SCORING_BOOLEAN.match(rewriteMethod)) { + if (SCORING_BOOLEAN.match(rewriteMethod, deprecationHandler)) { return MultiTermQuery.SCORING_BOOLEAN_REWRITE; } - if (CONSTANT_SCORE_BOOLEAN.match(rewriteMethod)) { + if (CONSTANT_SCORE_BOOLEAN.match(rewriteMethod, deprecationHandler)) { return MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE; } @@ -74,13 +76,13 @@ public static MultiTermQuery.RewriteMethod parseRewriteMethod(@Nullable String r final int size = Integer.parseInt(rewriteMethod.substring(firstDigit)); String rewriteMethodName = rewriteMethod.substring(0, firstDigit); - if (TOP_TERMS.match(rewriteMethodName)) { + if (TOP_TERMS.match(rewriteMethodName, deprecationHandler)) { return new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(size); } - if (TOP_TERMS_BOOST.match(rewriteMethodName)) { + if (TOP_TERMS_BOOST.match(rewriteMethodName, deprecationHandler)) { return new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(size); } - if (TOP_TERMS_BLENDED_FREQS.match(rewriteMethodName)) { + if (TOP_TERMS_BLENDED_FREQS.match(rewriteMethodName, deprecationHandler)) { return new MultiTermQuery.TopTermsBlendedFreqScoringRewrite(size); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java index 1d02c33dd3e1b..0e46a562488d3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.IndexSettings; public abstract class AbstractIndexShardComponent implements IndexShardComponent { @@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) { this.shardId = shardId; this.indexSettings = indexSettings; - this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); + this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); this.deprecationLogger = new DeprecationLogger(logger); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 5a4c47642a103..24ac81c0bfe5c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1076,22 +1076,34 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { } /** - * Creates a new {@link IndexCommit} snapshot form the currently running engine. All resources referenced by this + * Creates a new {@link IndexCommit} snapshot from the currently running engine. All resources referenced by this * commit won't be freed until the commit / snapshot is closed. * - * @param safeCommit true capture the most recent safe commit point; otherwise the most recent commit point. * @param flushFirst true if the index should first be flushed to disk / a low level lucene commit should be executed */ - public Engine.IndexCommitRef acquireIndexCommit(boolean safeCommit, boolean flushFirst) throws EngineException { - IndexShardState state = this.state; // one time volatile read + public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws EngineException { + final IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { - return getEngine().acquireIndexCommit(safeCommit, flushFirst); + return getEngine().acquireLastIndexCommit(flushFirst); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); } } + /** + * Snapshots the most recent safe index commit from the currently running engine. + * All index files referenced by this index commit won't be freed until the commit/snapshot is closed. + */ + public Engine.IndexCommitRef acquireSafeIndexCommit() throws EngineException { + final IndexShardState state = this.state; // one time volatile read + // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine + if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { + return getEngine().acquireSafeIndexCommit(); + } else { + throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); + } + } /** * gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard, @@ -1120,7 +1132,7 @@ public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { return store.getMetadata(null, true); } } - indexCommit = engine.acquireIndexCommit(false, false); + indexCommit = engine.acquireLastIndexCommit(false); return store.getMetadata(indexCommit.getIndexCommit()); } finally { store.decRef(); @@ -1348,8 +1360,9 @@ private void innerOpenEngineAndTranslog(final EngineConfig.OpenMode openMode, fi // we have to set it before we open an engine and recover from the translog because // acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in, // and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in. - replicationTracker.updateGlobalCheckpointOnReplica(Translog.readGlobalCheckpoint(translogConfig.getTranslogPath()), - "read from translog checkpoint"); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); } createNewEngine(config); verifyNotClosed(); @@ -2191,19 +2204,23 @@ private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode, final boole * Acquire a primary operation permit whenever the shard is ready for indexing. If a permit is directly available, the provided * ActionListener will be called on the calling thread. During relocation hand-off, permit acquisition can be delayed. The provided * ActionListener will then be called using the provided executor. + * + * @param debugInfo an extra information that can be useful when tracing an unreleased permit. When assertions are enabled + * the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object + * isn't used */ - public void acquirePrimaryOperationPermit(ActionListener onPermitAcquired, String executorOnDelay) { + public void acquirePrimaryOperationPermit(ActionListener onPermitAcquired, String executorOnDelay, Object debugInfo) { verifyNotClosed(); verifyPrimary(); - indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false); + indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo); } private final Object primaryTermMutex = new Object(); /** * Acquire a replica operation permit whenever the shard is ready for indexing (see - * {@link #acquirePrimaryOperationPermit(ActionListener, String)}). If the given primary term is lower than then one in + * {@link #acquirePrimaryOperationPermit(ActionListener, String, Object)}). If the given primary term is lower than then one in * {@link #shardRouting}, the {@link ActionListener#onFailure(Exception)} method of the provided listener is invoked with an * {@link IllegalStateException}. If permit acquisition is delayed, the listener will be invoked on the executor with the specified * name. @@ -2212,9 +2229,13 @@ public void acquirePrimaryOperationPermit(ActionListener onPermitAcq * @param globalCheckpoint the global checkpoint associated with the request * @param onPermitAcquired the listener for permit acquisition * @param executorOnDelay the name of the executor to invoke the listener on if permit acquisition is delayed + * @param debugInfo an extra information that can be useful when tracing an unreleased permit. When assertions are enabled + * the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object + * isn't used */ public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final long globalCheckpoint, - final ActionListener onPermitAcquired, final String executorOnDelay) { + final ActionListener onPermitAcquired, final String executorOnDelay, + final Object debugInfo) { verifyNotClosed(); verifyReplicationTarget(); final boolean globalCheckpointUpdated; @@ -2250,7 +2271,7 @@ public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final getLocalCheckpoint(), localCheckpoint); getEngine().getLocalCheckpointTracker().resetCheckpoint(localCheckpoint); - getEngine().getTranslog().rollGeneration(); + getEngine().rollTranslogGeneration(); }); globalCheckpointUpdated = true; } catch (final Exception e) { @@ -2300,13 +2321,21 @@ public void onFailure(final Exception e) { } }, executorOnDelay, - true); + true, debugInfo); } public int getActiveOperationsCount() { return indexShardOperationPermits.getActiveOperationsCount(); // refCount is incremented on successful acquire and decremented on close } + /** + * @return a list of describing each permit that wasn't released yet. The description consist of the debugInfo supplied + * when the permit was acquired plus a stack traces that was captured when the permit was request. + */ + public List getActiveOperations() { + return indexShardOperationPermits.getActiveOperations(); + } + private final AsyncIOProcessor translogSyncProcessor = new AsyncIOProcessor(logger, 1024) { @Override protected void write(List>> candidates) throws IOException { @@ -2470,13 +2499,17 @@ && isSearchIdle() // lets skip this refresh since we are search idle and // don't necessarily need to refresh. the next searcher access will register a refreshListener and that will // cause the next schedule to refresh. - setRefreshPending(); + final Engine engine = getEngine(); + engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some + setRefreshPending(engine); return false; } else { refresh("schedule"); return true; } } + final Engine engine = getEngine(); + engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some return false; } @@ -2494,8 +2527,7 @@ final long getLastSearcherAccess() { return lastSearcherAccess.get(); } - private void setRefreshPending() { - Engine engine = getEngine(); + private void setRefreshPending(Engine engine) { Translog.Location lastWriteLocation = engine.getTranslog().getLastWriteLocation(); Translog.Location location; do { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java index 75645198f5b6a..3855a3292a345 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java @@ -19,12 +19,13 @@ package org.elasticsearch.index.shard; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Assertions; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; @@ -33,12 +34,15 @@ import java.io.Closeable; import java.util.ArrayList; import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * Tracks shard operation permits. Each operation on the shard obtains a permit. When we need to block operations (e.g., to transition @@ -53,10 +57,14 @@ final class IndexShardOperationPermits implements Closeable { static final int TOTAL_PERMITS = Integer.MAX_VALUE; final Semaphore semaphore = new Semaphore(TOTAL_PERMITS, true); // fair to ensure a blocking thread is not starved - private final List> delayedOperations = new ArrayList<>(); // operations that are delayed + private final List delayedOperations = new ArrayList<>(); // operations that are delayed private volatile boolean closed; private boolean delayed; // does not need to be volatile as all accesses are done under a lock on this + // only valid when assertions are enabled. Key is AtomicBoolean associated with each permit to ensure close once semantics. + // Value is a tuple, with a some debug information supplied by the caller and a stack trace of the acquiring thread + private final Map> issuedPermits; + /** * Construct operation permits for the specified shards. * @@ -66,6 +74,11 @@ final class IndexShardOperationPermits implements Closeable { IndexShardOperationPermits(final ShardId shardId, final ThreadPool threadPool) { this.shardId = shardId; this.threadPool = threadPool; + if (Assertions.ENABLED) { + issuedPermits = new ConcurrentHashMap<>(); + } else { + issuedPermits = null; + } } @Override @@ -167,7 +180,7 @@ private void doBlockOperations( } private void releaseDelayedOperations() { - final List> queuedActions; + final List queuedActions; synchronized (this) { assert delayed; queuedActions = new ArrayList<>(delayedOperations); @@ -185,8 +198,8 @@ private void releaseDelayedOperations() { * recovery */ threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> { - for (ActionListener queuedAction : queuedActions) { - acquire(queuedAction, null, false); + for (DelayedOperation queuedAction : queuedActions) { + acquire(queuedAction.listener, null, false, queuedAction.debugInfo, queuedAction.stackTrace); } }); } @@ -204,8 +217,24 @@ private void releaseDelayedOperations() { * @param onAcquired {@link ActionListener} that is invoked once acquisition is successful or failed * @param executorOnDelay executor to use for the possibly delayed {@link ActionListener#onResponse(Object)} call * @param forceExecution whether the runnable should force its execution in case it gets rejected + * @param debugInfo an extra information that can be useful when tracing an unreleased permit. When assertions are enabled + * the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object + * isn't used + * */ - public void acquire(final ActionListener onAcquired, final String executorOnDelay, final boolean forceExecution) { + public void acquire(final ActionListener onAcquired, final String executorOnDelay, final boolean forceExecution, + final Object debugInfo) { + final StackTraceElement[] stackTrace; + if (Assertions.ENABLED) { + stackTrace = Thread.currentThread().getStackTrace(); + } else { + stackTrace = null; + } + acquire(onAcquired, executorOnDelay, forceExecution, debugInfo, stackTrace); + } + + private void acquire(final ActionListener onAcquired, final String executorOnDelay, final boolean forceExecution, + final Object debugInfo, final StackTraceElement[] stackTrace) { if (closed) { onAcquired.onFailure(new IndexShardClosedException(shardId)); return; @@ -215,16 +244,18 @@ public void acquire(final ActionListener onAcquired, final String ex synchronized (this) { if (delayed) { final Supplier contextSupplier = threadPool.getThreadContext().newRestorableContext(false); + final ActionListener wrappedListener; if (executorOnDelay != null) { - delayedOperations.add( - new PermitAwareThreadedActionListener(threadPool, executorOnDelay, - new ContextPreservingActionListener<>(contextSupplier, onAcquired), forceExecution)); + wrappedListener = + new PermitAwareThreadedActionListener(threadPool, executorOnDelay, + new ContextPreservingActionListener<>(contextSupplier, onAcquired), forceExecution); } else { - delayedOperations.add(new ContextPreservingActionListener<>(contextSupplier, onAcquired)); + wrappedListener = new ContextPreservingActionListener<>(contextSupplier, onAcquired); } + delayedOperations.add(new DelayedOperation(wrappedListener, debugInfo, stackTrace)); return; } else { - releasable = acquire(); + releasable = acquire(debugInfo, stackTrace); } } } catch (final InterruptedException e) { @@ -235,15 +266,23 @@ public void acquire(final ActionListener onAcquired, final String ex onAcquired.onResponse(releasable); } - private Releasable acquire() throws InterruptedException { + private Releasable acquire(Object debugInfo, StackTraceElement[] stackTrace) throws InterruptedException { assert Thread.holdsLock(this); if (semaphore.tryAcquire(1, 0, TimeUnit.SECONDS)) { // the un-timed tryAcquire methods do not honor the fairness setting final AtomicBoolean closed = new AtomicBoolean(); - return () -> { + final Releasable releasable = () -> { if (closed.compareAndSet(false, true)) { + if (Assertions.ENABLED) { + Tuple existing = issuedPermits.remove(closed); + assert existing != null; + } semaphore.release(1); } }; + if (Assertions.ENABLED) { + issuedPermits.put(closed, new Tuple<>(debugInfo.toString(), stackTrace)); + } + return releasable; } else { // this should never happen, if it does something is deeply wrong throw new IllegalStateException("failed to obtain permit but operations are not delayed"); @@ -269,6 +308,33 @@ int getActiveOperationsCount() { } } + /** + * @return a list of describing each permit that wasn't released yet. The description consist of the debugInfo supplied + * when the permit was acquired plus a stack traces that was captured when the permit was request. + */ + List getActiveOperations() { + return issuedPermits.values().stream().map( + t -> t.v1() + "\n" + ExceptionsHelper.formatStackTrace(t.v2())) + .collect(Collectors.toList()); + } + + private static class DelayedOperation { + private final ActionListener listener; + private final String debugInfo; + private final StackTraceElement[] stackTrace; + + private DelayedOperation(ActionListener listener, Object debugInfo, StackTraceElement[] stackTrace) { + this.listener = listener; + if (Assertions.ENABLED) { + this.debugInfo = "[delayed] " + debugInfo; + this.stackTrace = stackTrace; + } else { + this.debugInfo = null; + this.stackTrace = null; + } + } + } + /** * A permit-aware action listener wrapper that spawns onResponse listener invocations off on a configurable thread-pool. * Being permit-aware, it also releases the permit when hitting thread-pool rejections and falls back to the diff --git a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java index f8f92fbb5fa8b..d7105c0c14d38 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java @@ -48,7 +48,7 @@ final class LocalShardSnapshot implements Closeable { store.incRef(); boolean success = false; try { - indexCommit = shard.acquireIndexCommit(false, true); + indexCommit = shard.acquireLastIndexCommit(true); success = true; } finally { if (success == false) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java index a1133d565f754..3f3f2a78100af 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java @@ -88,7 +88,7 @@ public String toString() { return "primary [" + primary + "], allocation [" + allocationId + "]"; } - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, SHARD_STATE_FILE_PREFIX) { + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(SHARD_STATE_FILE_PREFIX) { @Override protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index b0767a7c512ec..124b538d3facf 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -516,24 +516,24 @@ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) th String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { - if (PARSE_NAME.match(currentFieldName)) { + if (PARSE_NAME.match(currentFieldName, parser.getDeprecationHandler())) { snapshot = parser.text(); - } else if (PARSE_INDEX_VERSION.match(currentFieldName)) { + } else if (PARSE_INDEX_VERSION.match(currentFieldName, parser.getDeprecationHandler())) { // The index-version is needed for backward compatibility with v 1.0 indexVersion = parser.longValue(); - } else if (PARSE_START_TIME.match(currentFieldName)) { + } else if (PARSE_START_TIME.match(currentFieldName, parser.getDeprecationHandler())) { startTime = parser.longValue(); - } else if (PARSE_TIME.match(currentFieldName)) { + } else if (PARSE_TIME.match(currentFieldName, parser.getDeprecationHandler())) { time = parser.longValue(); - } else if (PARSE_NUMBER_OF_FILES.match(currentFieldName)) { + } else if (PARSE_NUMBER_OF_FILES.match(currentFieldName, parser.getDeprecationHandler())) { numberOfFiles = parser.intValue(); - } else if (PARSE_TOTAL_SIZE.match(currentFieldName)) { + } else if (PARSE_TOTAL_SIZE.match(currentFieldName, parser.getDeprecationHandler())) { totalSize = parser.longValue(); } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { - if (PARSE_FILES.match(currentFieldName)) { + if (PARSE_FILES.match(currentFieldName, parser.getDeprecationHandler())) { while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexFiles.add(FileInfo.fromXContent(parser)); } diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 7ecf92d3bf11f..d25b1eb04866d 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -243,7 +243,7 @@ public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) t String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token == XContentParser.Token.START_ARRAY) { - if (ParseFields.FILES.match(currentFieldName) == false) { + if (ParseFields.FILES.match(currentFieldName, parser.getDeprecationHandler()) == false) { throw new ElasticsearchParseException("unknown array [{}]", currentFieldName); } while (parser.nextToken() != XContentParser.Token.END_ARRAY) { @@ -251,7 +251,7 @@ public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) t files.put(fileInfo.name(), fileInfo); } } else if (token == XContentParser.Token.START_OBJECT) { - if (ParseFields.SNAPSHOTS.match(currentFieldName) == false) { + if (ParseFields.SNAPSHOTS.match(currentFieldName, parser.getDeprecationHandler()) == false) { throw new ElasticsearchParseException("unknown object [{}]", currentFieldName); } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -266,7 +266,7 @@ public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) t if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); if (parser.nextToken() == XContentParser.Token.START_ARRAY) { - if (ParseFields.FILES.match(currentFieldName) == false) { + if (ParseFields.FILES.match(currentFieldName, parser.getDeprecationHandler()) == false) { throw new ElasticsearchParseException("unknown array [{}]", currentFieldName); } List fileNames = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 7aab2c750d139..02713348b86b8 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -58,7 +58,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; @@ -159,7 +159,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); - this.directory = new StoreDirectory(directoryService.newDirectory(), ServerLoggers.getLogger("index.store.deletes", settings, shardId)); + this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); this.shardLock = shardLock; this.onClose = onClose; final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); @@ -238,7 +238,7 @@ final void ensureOpen() { * * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard - * {@link IndexShard#acquireIndexCommit(boolean, boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed + * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed * @param commit the index commit to read the snapshot from or null if the latest snapshot should be read from the * directory * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an @@ -262,7 +262,7 @@ public MetadataSnapshot getMetadata(IndexCommit commit) throws IOException { * * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard - * {@link IndexShard#acquireIndexCommit(boolean, boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed + * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed * * @param commit the index commit to read the snapshot from or null if the latest snapshot should be read from the * directory @@ -661,17 +661,17 @@ private static void failIfCorrupted(Directory directory, ShardId shardId) throws public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException { metadataLock.writeLock().lock(); try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - final StoreDirectory dir = directory; - for (String existingFile : dir.listAll()) { + for (String existingFile : directory.listAll()) { if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) { continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum) } try { - dir.deleteFile(reason, existingFile); + directory.deleteFile(reason, existingFile); // FNF should not happen since we hold a write lock? } catch (IOException ex) { if (existingFile.startsWith(IndexFileNames.SEGMENTS) - || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { + || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN) + || existingFile.startsWith(CORRUPTED)) { // TODO do we need to also fail this if we can't delete the pending commit file? // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); @@ -680,6 +680,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr // ignore, we don't really care, will get deleted later on } } + directory.syncMetaData(); final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null); verifyAfterCleanup(sourceMetaData, metadataOrEmpty); } finally { diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 870eea98ad2dd..c9b69a68f8696 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; @@ -45,6 +46,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShardComponent; +import org.elasticsearch.index.shard.ShardId; import java.io.Closeable; import java.io.EOFException; @@ -132,23 +134,19 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * translog file referenced by this generation. The translog creation will fail if this generation can't be opened. * * @param config the configuration of this translog - * @param expectedTranslogUUID the translog uuid to open, null for a new translog + * @param translogUUID the translog uuid to open, null for a new translog * @param deletionPolicy an instance of {@link TranslogDeletionPolicy} that controls when a translog file can be safely * deleted * @param globalCheckpointSupplier a supplier for the global checkpoint */ public Translog( - final TranslogConfig config, final String expectedTranslogUUID, TranslogDeletionPolicy deletionPolicy, + final TranslogConfig config, final String translogUUID, TranslogDeletionPolicy deletionPolicy, final LongSupplier globalCheckpointSupplier) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; this.globalCheckpointSupplier = globalCheckpointSupplier; this.deletionPolicy = deletionPolicy; - if (expectedTranslogUUID == null) { - translogUUID = UUIDs.randomBase64UUID(); - } else { - translogUUID = expectedTranslogUUID; - } + this.translogUUID = translogUUID; bigArrays = config.getBigArrays(); ReadWriteLock rwl = new ReentrantReadWriteLock(); readLock = new ReleasableLock(rwl.readLock()); @@ -157,53 +155,38 @@ public Translog( Files.createDirectories(this.location); try { - if (expectedTranslogUUID != null) { - final Checkpoint checkpoint = readCheckpoint(location); - final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1)); - final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); - // this is special handling for error condition when we create a new writer but we fail to bake - // the newly written file (generation+1) into the checkpoint. This is still a valid state - // we just need to cleanup before we continue - // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: - // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example - // - // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists - // if not we don't even try to clean it up and wait until we fail creating it - assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(expectedTranslogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; - if (Files.exists(currentCheckpointFile) // current checkpoint is already copied - && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning - logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); - } - this.readers.addAll(recoverFromFiles(checkpoint)); - if (readers.isEmpty()) { - throw new IllegalStateException("at least one reader must be recovered"); - } - boolean success = false; - current = null; - try { - current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint); - success = true; - } finally { - // we have to close all the recovered ones otherwise we leak file handles here - // for instance if we have a lot of tlog and we can't create the writer we keep on holding - // on to all the uncommitted tlog files if we don't close - if (success == false) { - IOUtils.closeWhileHandlingException(readers); - } + final Checkpoint checkpoint = readCheckpoint(location); + final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1)); + final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); + // this is special handling for error condition when we create a new writer but we fail to bake + // the newly written file (generation+1) into the checkpoint. This is still a valid state + // we just need to cleanup before we continue + // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: + // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example + // + // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists + // if not we don't even try to clean it up and wait until we fail creating it + assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; + if (Files.exists(currentCheckpointFile) // current checkpoint is already copied + && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning + logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); + } + this.readers.addAll(recoverFromFiles(checkpoint)); + if (readers.isEmpty()) { + throw new IllegalStateException("at least one reader must be recovered"); + } + boolean success = false; + current = null; + try { + current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint); + success = true; + } finally { + // we have to close all the recovered ones otherwise we leak file handles here + // for instance if we have a lot of tlog and we can't create the writer we keep on holding + // on to all the uncommitted tlog files if we don't close + if (success == false) { + IOUtils.closeWhileHandlingException(readers); } - } else { - IOUtils.rm(location); - // start from whatever generation lucene points to - final long generation = deletionPolicy.getMinTranslogGenerationForRecovery(); - logger.debug("wipe translog location - creating new translog, starting generation [{}]", generation); - Files.createDirectories(location); - final long initialGlobalCheckpoint = globalCheckpointSupplier.getAsLong(); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, generation); - final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); - Checkpoint.write(getChannelFactory(), checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); - IOUtils.fsync(checkpointFile, false); - current = createWriter(generation, generation, initialGlobalCheckpoint); - readers.clear(); } } catch (Exception e) { // close the opened translog files if we fail to create a new translog... @@ -287,6 +270,10 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws } TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException { + return openReader(path, checkpoint, translogUUID); + } + + private static TranslogReader openReader(Path path, Checkpoint checkpoint, String translogUUID) throws IOException { FileChannel channel = FileChannel.open(path, StandardOpenOption.READ); try { assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation; @@ -398,6 +385,26 @@ public long sizeInBytes() { return sizeInBytesByMinGen(-1); } + long earliestLastModifiedAge() { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + return findEarliestLastModifiedAge(System.currentTimeMillis(), readers, current); + } catch (IOException e) { + throw new TranslogException(shardId, "Unable to get the earliest last modified time for the transaction log"); + } + } + + /** + * Returns the age of the oldest entry in the translog files in seconds + */ + static long findEarliestLastModifiedAge(long currentTime, Iterable readers, TranslogWriter writer) throws IOException { + long earliestTime = currentTime; + for (BaseTranslogReader r : readers) { + earliestTime = Math.min(r.getLastModifiedTime(), earliestTime); + } + return Math.max(0, currentTime - Math.min(earliestTime, writer.getLastModifiedTime())); + } + /** * Returns the number of operations in the transaction files that aren't committed to lucene.. */ @@ -405,9 +412,9 @@ private int totalOperations(long minGeneration) { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); return Stream.concat(readers.stream(), Stream.of(current)) - .filter(r -> r.getGeneration() >= minGeneration) - .mapToInt(BaseTranslogReader::totalOperations) - .sum(); + .filter(r -> r.getGeneration() >= minGeneration) + .mapToInt(BaseTranslogReader::totalOperations) + .sum(); } } @@ -430,9 +437,9 @@ private long sizeInBytesByMinGen(long minGeneration) { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); return Stream.concat(readers.stream(), Stream.of(current)) - .filter(r -> r.getGeneration() >= minGeneration) - .mapToLong(BaseTranslogReader::sizeInBytes) - .sum(); + .filter(r -> r.getGeneration() >= minGeneration) + .mapToLong(BaseTranslogReader::sizeInBytes) + .sum(); } } @@ -586,8 +593,8 @@ public Snapshot newSnapshotFromGen(long minGeneration) throws IOException { "Min referenced generation is [" + getMinFileGeneration() + "]"); } TranslogSnapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current)) - .filter(reader -> reader.getGeneration() >= minGeneration) - .map(BaseTranslogReader::newSnapshot).toArray(TranslogSnapshot[]::new); + .filter(reader -> reader.getGeneration() >= minGeneration) + .map(BaseTranslogReader::newSnapshot).toArray(TranslogSnapshot[]::new); return newMultiSnapshot(snapshots); } } @@ -635,7 +642,7 @@ private Snapshot newMultiSnapshot(TranslogSnapshot[] snapshots) throws IOExcepti } private Stream readersBetweenMinAndMaxSeqNo(long minSeqNo, long maxSeqNo) { - assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread(); + assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread() ; return Stream.concat(readers.stream(), Stream.of(current)) .filter(reader -> { @@ -765,7 +772,7 @@ private void closeOnTragicEvent(Exception ex) { public TranslogStats stats() { // acquire lock to make the two numbers roughly consistent (no file change half way) try (ReleasableLock lock = readLock.acquire()) { - return new TranslogStats(totalOperations(), sizeInBytes(), uncommittedOperations(), uncommittedSizeInBytes()); + return new TranslogStats(totalOperations(), sizeInBytes(), uncommittedOperations(), uncommittedSizeInBytes(), earliestLastModifiedAge()); } } @@ -1123,14 +1130,14 @@ public boolean equals(Object o) { Index index = (Index) o; if (version != index.version || - seqNo != index.seqNo || - primaryTerm != index.primaryTerm || - id.equals(index.id) == false || - type.equals(index.type) == false || - versionType != index.versionType || - autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp || - source.equals(index.source) == false) { - return false; + seqNo != index.seqNo || + primaryTerm != index.primaryTerm || + id.equals(index.id) == false || + type.equals(index.type) == false || + versionType != index.versionType || + autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp || + source.equals(index.source) == false) { + return false; } if (routing != null ? !routing.equals(index.routing) : index.routing != null) { return false; @@ -1303,10 +1310,10 @@ public boolean equals(Object o) { Delete delete = (Delete) o; return version == delete.version && - seqNo == delete.seqNo && - primaryTerm == delete.primaryTerm && - uid.equals(delete.uid) && - versionType == delete.versionType; + seqNo == delete.seqNo && + primaryTerm == delete.primaryTerm && + uid.equals(delete.uid) && + versionType == delete.versionType; } @Override @@ -1431,7 +1438,7 @@ private static void verifyChecksum(BufferedChecksumStreamInput in) throws IOExce long readChecksum = in.readInt() & 0xFFFF_FFFFL; if (readChecksum != expectedChecksum) { throw new TranslogCorruptedException("translog stream is corrupted, expected: 0x" + - Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum)); + Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum)); } } @@ -1553,7 +1560,7 @@ public void rollGeneration() throws IOException { final Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME); assert Checkpoint.read(checkpoint).generation == current.getGeneration(); final Path generationCheckpoint = - location.resolve(getCommitCheckpointFileName(current.getGeneration())); + location.resolve(getCommitCheckpointFileName(current.getGeneration())); Files.copy(checkpoint, generationCheckpoint); IOUtils.fsync(generationCheckpoint, false); IOUtils.fsync(generationCheckpoint.getParent(), true); @@ -1696,19 +1703,31 @@ public Exception getTragicException() { } /** Reads and returns the current checkpoint */ - static final Checkpoint readCheckpoint(final Path location) throws IOException { + static Checkpoint readCheckpoint(final Path location) throws IOException { return Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); } /** * Reads the sequence numbers global checkpoint from the translog checkpoint. + * This ensures that the translogUUID from this translog matches with the provided translogUUID. * * @param location the location of the translog * @return the global checkpoint - * @throws IOException if an I/O exception occurred reading the checkpoint + * @throws IOException if an I/O exception occurred reading the checkpoint + * @throws TranslogCorruptedException if the translog is corrupted or mismatched with the given uuid */ - public static final long readGlobalCheckpoint(final Path location) throws IOException { - return readCheckpoint(location).globalCheckpoint; + public static long readGlobalCheckpoint(final Path location, final String expectedTranslogUUID) throws IOException { + final Checkpoint checkpoint = readCheckpoint(location); + // We need to open at least translog reader to validate the translogUUID. + final Path translogFile = location.resolve(getFilename(checkpoint.generation)); + try (TranslogReader reader = openReader(translogFile, checkpoint, expectedTranslogUUID)) { + + } catch (TranslogCorruptedException ex) { + throw ex; // just bubble up. + } catch (Exception ex) { + throw new TranslogCorruptedException("Translog at [" + location + "] is corrupted", ex); + } + return checkpoint.globalCheckpoint; } /** @@ -1726,4 +1745,26 @@ TranslogWriter getCurrent() { List getReaders() { return readers; } + + public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint, final ShardId shardId) + throws IOException { + final ChannelFactory channelFactory = FileChannel::open; + return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory); + } + + static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, ChannelFactory channelFactory) throws IOException { + IOUtils.rm(location); + Files.createDirectories(location); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1); + final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); + Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + IOUtils.fsync(checkpointFile, false); + final String translogUUID = UUIDs.randomBase64UUID(); + TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, + new ByteSizeValue(10), 1, initialGlobalCheckpoint, + () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); } + ); + writer.close(); + return translogUUID; + } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index c49de67f56aea..c90e79eeba371 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -34,11 +34,13 @@ public class TranslogStats implements Streamable, ToXContentFragment { private int numberOfOperations; private long uncommittedSizeInBytes; private int uncommittedOperations; + private long earliestLastModifiedAge; public TranslogStats() { } - public TranslogStats(int numberOfOperations, long translogSizeInBytes, int uncommittedOperations, long uncommittedSizeInBytes) { + public TranslogStats(int numberOfOperations, long translogSizeInBytes, int uncommittedOperations, long uncommittedSizeInBytes, + long earliestLastModifiedAge) { if (numberOfOperations < 0) { throw new IllegalArgumentException("numberOfOperations must be >= 0"); } @@ -51,10 +53,14 @@ public TranslogStats(int numberOfOperations, long translogSizeInBytes, int uncom if (uncommittedSizeInBytes < 0) { throw new IllegalArgumentException("uncommittedSizeInBytes must be >= 0"); } + if (earliestLastModifiedAge < 0) { + throw new IllegalArgumentException("earliestLastModifiedAge must be >= 0"); + } this.numberOfOperations = numberOfOperations; this.translogSizeInBytes = translogSizeInBytes; this.uncommittedSizeInBytes = uncommittedSizeInBytes; this.uncommittedOperations = uncommittedOperations; + this.earliestLastModifiedAge = earliestLastModifiedAge; } public void add(TranslogStats translogStats) { @@ -66,6 +72,8 @@ public void add(TranslogStats translogStats) { this.translogSizeInBytes += translogStats.translogSizeInBytes; this.uncommittedOperations += translogStats.uncommittedOperations; this.uncommittedSizeInBytes += translogStats.uncommittedSizeInBytes; + this.earliestLastModifiedAge = + Math.min(this.earliestLastModifiedAge, translogStats.earliestLastModifiedAge); } public long getTranslogSizeInBytes() { @@ -86,6 +94,8 @@ public int getUncommittedOperations() { return uncommittedOperations; } + public long getEarliestLastModifiedAge() { return earliestLastModifiedAge; } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("translog"); @@ -93,6 +103,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.byteSizeField("size_in_bytes", "size", translogSizeInBytes); builder.field("uncommitted_operations", uncommittedOperations); builder.byteSizeField("uncommitted_size_in_bytes", "uncommitted_size", uncommittedSizeInBytes); + builder.field("earliest_last_modified_age", earliestLastModifiedAge); builder.endObject(); return builder; } @@ -113,6 +124,9 @@ public void readFrom(StreamInput in) throws IOException { uncommittedOperations = numberOfOperations; uncommittedSizeInBytes = translogSizeInBytes; } + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + earliestLastModifiedAge = in.readVLong(); + } } @Override @@ -123,5 +137,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(uncommittedOperations); out.writeVLong(uncommittedSizeInBytes); } + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeVLong(earliestLastModifiedAge); + } } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 1cd2073e1f299..a7f789817dabe 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -63,6 +63,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -1275,7 +1276,8 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, String... /* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch * of dependencies we pass in a function that can perform the parsing. */ CheckedFunction filterParser = bytes -> { - try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes)) { + try (XContentParser parser = XContentFactory.xContent(bytes) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes)) { return parseInnerQueryBuilder(parser); } }; diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index eff6d7ab3b93e..6a3618e668950 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -529,11 +529,19 @@ int numDocs() { return numDocs; } + boolean includeNumDocs(Version version) { + if (version.major == Version.V_5_6_8.major) { + return version.onOrAfter(Version.V_5_6_8); + } else { + return version.onOrAfter(Version.V_6_2_2); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); commitId = new Engine.CommitId(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + if (includeNumDocs(in.getVersion())) { numDocs = in.readInt(); } else { numDocs = UNKNOWN_NUM_DOCS; @@ -544,7 +552,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); commitId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + if (includeNumDocs(out.getVersion())) { out.writeInt(numDocs); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 9b4131239abe1..57aa4cf140392 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -53,6 +53,7 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogCorruptedException; import org.elasticsearch.indices.recovery.RecoveriesCollection.RecoveryRef; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -358,10 +359,12 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove */ public static long getStartingSeqNo(final Logger logger, final RecoveryTarget recoveryTarget) { try { - final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation()); - final List existingCommits = DirectoryReader.listCommits(recoveryTarget.store().directory()); + final Store store = recoveryTarget.store(); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation(), translogUUID); + final List existingCommits = DirectoryReader.listCommits(store.directory()); final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, globalCheckpoint); - final SequenceNumbers.CommitInfo seqNoStats = recoveryTarget.store().loadSeqNoInfo(safeCommit); + final SequenceNumbers.CommitInfo seqNoStats = store.loadSeqNoInfo(safeCommit); if (logger.isTraceEnabled()) { final StringJoiner descriptionOfExistingCommits = new StringJoiner(","); for (IndexCommit commit : existingCommits) { @@ -381,7 +384,7 @@ public static long getStartingSeqNo(final Logger logger, final RecoveryTarget re } else { return SequenceNumbers.UNASSIGNED_SEQ_NO; } - } catch (final IOException e) { + } catch (final TranslogCorruptedException | IOException e) { /* * This can happen, for example, if a phase one of the recovery completed successfully, a network partition happens before the * translog on the recovery target is opened, the recovery enters a retry loop seeing now that the index files are on disk and diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 9876f224e9b61..137878450ea86 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -120,7 +120,7 @@ public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recov this.recoveryTarget = recoveryTarget; this.request = request; this.shardId = this.request.shardId().id(); - this.logger = ServerLoggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); + this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); } @@ -142,7 +142,7 @@ public RecoveryResponse recoverToTarget() throws IOException { throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); } assert targetShardRouting.initializing() : "expected recovery target to be initializing but was " + targetShardRouting; - }); + }, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered "); try (Closeable ignored = shard.acquireTranslogRetentionLock()) { @@ -159,7 +159,7 @@ public RecoveryResponse recoverToTarget() throws IOException { } else { final Engine.IndexCommitRef phase1Snapshot; try { - phase1Snapshot = shard.acquireIndexCommit(true, false); + phase1Snapshot = shard.acquireSafeIndexCommit(); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } @@ -198,7 +198,8 @@ public RecoveryResponse recoverToTarget() throws IOException { * make sure to do this before sampling the max sequence number in the next step, to ensure that we send * all documents up to maxSeqNo in phase2. */ - runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId())); + runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()), + shardId + " initiating tracking of " + request.targetAllocationId()); final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); /* @@ -229,10 +230,10 @@ private boolean isTargetSameHistory() { return targetHistoryUUID != null && targetHistoryUUID.equals(shard.getHistoryUUID()); } - private void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable) { + private void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable, String reason) { cancellableThreads.execute(() -> { final PlainActionFuture onAcquired = new PlainActionFuture<>(); - shard.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME); + shard.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME, reason); try (Releasable ignored = onAcquired.actionGet()) { // check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent // races, as IndexShard will change to RELOCATED only when it holds all operation permits, see IndexShard.relocated() @@ -493,10 +494,12 @@ public void finalizeRecovery(final long targetLocalCheckpoint) throws IOExceptio * marking the shard as in-sync. If the relocation handoff holds all the permits then after the handoff completes and we acquire * the permit then the state of the shard will be relocated and this recovery will fail. */ - runUnderPrimaryPermit(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint)); + runUnderPrimaryPermit(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint), + shardId + " marking " + request.targetAllocationId() + " as in sync"); final long globalCheckpoint = shard.getGlobalCheckpoint(); cancellableThreads.executeIO(() -> recoveryTarget.finalizeRecovery(globalCheckpoint)); - runUnderPrimaryPermit(() -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint)); + runUnderPrimaryPermit(() -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint), + shardId + " updating " + request.targetAllocationId() + "'s global checkpoint"); if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 75afaa140f21d..41df6ec73e020 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -34,7 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; @@ -117,7 +117,7 @@ public RecoveryTarget(final IndexShard indexShard, this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; - this.logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index b35ac567f9e25..89e945780c8f5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -20,14 +20,14 @@ package org.elasticsearch.ingest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.script.TemplateScript; import java.time.ZoneOffset; @@ -56,7 +56,8 @@ public final class IngestDocument { private final Map sourceAndMetadata; private final Map ingestMetadata; - public IngestDocument(String index, String type, String id, String routing, String parent, Map source) { + public IngestDocument(String index, String type, String id, String routing, String parent, + Long version, VersionType versionType, Map source) { this.sourceAndMetadata = new HashMap<>(); this.sourceAndMetadata.putAll(source); this.sourceAndMetadata.put(MetaData.INDEX.getFieldName(), index); @@ -68,6 +69,12 @@ public IngestDocument(String index, String type, String id, String routing, Stri if (parent != null) { this.sourceAndMetadata.put(MetaData.PARENT.getFieldName(), parent); } + if (version != null) { + sourceAndMetadata.put(MetaData.VERSION.getFieldName(), version); + } + if (versionType != null) { + sourceAndMetadata.put(MetaData.VERSION_TYPE.getFieldName(), VersionType.toString(versionType)); + } this.ingestMetadata = new HashMap<>(); this.ingestMetadata.put(TIMESTAMP, ZonedDateTime.now(ZoneOffset.UTC)); @@ -559,10 +566,10 @@ private Map createTemplateModel() { * one time operation that extracts the metadata fields from the ingest document and returns them. * Metadata fields that used to be accessible as ordinary top level fields will be removed as part of this call. */ - public Map extractMetadata() { - Map metadataMap = new EnumMap<>(MetaData.class); + public Map extractMetadata() { + Map metadataMap = new EnumMap<>(MetaData.class); for (MetaData metaData : MetaData.values()) { - metadataMap.put(metaData, cast(metaData.getFieldName(), sourceAndMetadata.remove(metaData.getFieldName()), String.class)); + metadataMap.put(metaData, sourceAndMetadata.remove(metaData.getFieldName())); } return metadataMap; } @@ -649,7 +656,9 @@ public enum MetaData { TYPE(TypeFieldMapper.NAME), ID(IdFieldMapper.NAME), ROUTING(RoutingFieldMapper.NAME), - PARENT(ParentFieldMapper.NAME); + PARENT(ParentFieldMapper.NAME), + VERSION(VersionFieldMapper.NAME), + VERSION_TYPE("_version_type"); private final String fieldName; diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index cec622f4a2587..31bedd4ee1777 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.index.VersionType; import org.elasticsearch.threadpool.ThreadPool; import java.util.Collections; @@ -164,18 +165,24 @@ private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws E String id = indexRequest.id(); String routing = indexRequest.routing(); String parent = indexRequest.parent(); + Long version = indexRequest.version(); + VersionType versionType = indexRequest.versionType(); Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, sourceAsMap); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, version, versionType, sourceAsMap); pipeline.execute(ingestDocument); - Map metadataMap = ingestDocument.extractMetadata(); + Map metadataMap = ingestDocument.extractMetadata(); //it's fine to set all metadata fields all the time, as ingest document holds their starting values //before ingestion, which might also get modified during ingestion. - indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); - indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE)); - indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); - indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT)); + indexRequest.index((String) metadataMap.get(IngestDocument.MetaData.INDEX)); + indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); + indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); + indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); + indexRequest.parent((String) metadataMap.get(IngestDocument.MetaData.PARENT)); + indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); + if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { + indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); + } indexRequest.source(ingestDocument.getSourceAndMetadata()); } catch (Exception e) { totalStats.ingestFailed(); diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java index d19c13cc3a185..21372e46e5f3d 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java @@ -35,9 +35,9 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.gateway.GatewayService; import java.util.ArrayList; import java.util.Collections; @@ -70,6 +70,10 @@ public void applyClusterState(ClusterChangedEvent event) { } void innerUpdatePipelines(ClusterState previousState, ClusterState state) { + if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + return; + } + IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); if (Objects.equals(ingestMetadata, previousIngestMetadata)) { diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java b/server/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java index 1d4234d689ea5..19253c52e3007 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java @@ -137,9 +137,7 @@ public Deadlock(ThreadInfo[] members) { for (int x = 0; x < members.length; x++) { ThreadInfo ti = members[x]; sb.append(ti.getThreadName()); - if (x < members.length) { - sb.append(" > "); - } + sb.append(" > "); if (x == members.length - 1) { sb.append(ti.getLockOwnerName()); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 481bae1181d8c..c42d1c6e8ec49 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,7 +67,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; @@ -273,7 +272,7 @@ protected Node(final Environment environment, Collection throw new IllegalStateException("Failed to create node environment", ex); } final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); - Logger logger = ServerLoggers.getLogger(Node.class, tmpSettings); + Logger logger = Loggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); // this must be captured after the node name is possibly added to the settings @@ -343,7 +342,7 @@ protected Node(final Environment environment, Collection List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class); final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool, ClusterModule.getClusterStateCustomSuppliers(clusterPlugins)); - clusterService.addListener(scriptModule.getScriptService()); + clusterService.addStateApplier(scriptModule.getScriptService()); resourcesToClose.add(clusterService); final IngestService ingestService = new IngestService(settings, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 5fd4599df941f..11106d41beaf1 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -121,7 +121,7 @@ public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, bo script ? scriptService.stats() : null, discoveryStats ? discovery.stats() : null, ingest ? ingestService.getPipelineExecutionService().stats() : null, - adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getClientConnections()) : null + adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null ); } diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 11cd251486905..9f33b0d975fd1 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,7 +35,6 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -60,6 +58,8 @@ public class PluginInfo implements Writeable, ToXContentObject { private final String name; private final String description; private final String version; + private final Version elasticsearchVersion; + private final String javaVersion; private final String classname; private final List extendedPlugins; private final boolean hasNativeController; @@ -68,19 +68,23 @@ public class PluginInfo implements Writeable, ToXContentObject { /** * Construct plugin info. * - * @param name the name of the plugin - * @param description a description of the plugin - * @param version the version of Elasticsearch the plugin is built for - * @param classname the entry point to the plugin - * @param extendedPlugins other plugins this plugin extends through SPI - * @param hasNativeController whether or not the plugin has a native controller - * @param requiresKeystore whether or not the plugin requires the elasticsearch keystore to be created + * @param name the name of the plugin + * @param description a description of the plugin + * @param version an opaque version identifier for the plugin + * @param elasticsearchVersion the version of Elasticsearch the plugin was built for + * @param javaVersion the version of Java the plugin was built with + * @param classname the entry point to the plugin + * @param extendedPlugins other plugins this plugin extends through SPI + * @param hasNativeController whether or not the plugin has a native controller + * @param requiresKeystore whether or not the plugin requires the elasticsearch keystore to be created */ - public PluginInfo(String name, String description, String version, String classname, - List extendedPlugins, boolean hasNativeController, boolean requiresKeystore) { + public PluginInfo(String name, String description, String version, Version elasticsearchVersion, String javaVersion, + String classname, List extendedPlugins, boolean hasNativeController, boolean requiresKeystore) { this.name = name; this.description = description; this.version = version; + this.elasticsearchVersion = elasticsearchVersion; + this.javaVersion = javaVersion; this.classname = classname; this.extendedPlugins = Collections.unmodifiableList(extendedPlugins); this.hasNativeController = hasNativeController; @@ -97,6 +101,15 @@ public PluginInfo(final StreamInput in) throws IOException { this.name = in.readString(); this.description = in.readString(); this.version = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + elasticsearchVersion = Version.readVersion(in); + javaVersion = in.readString(); + } else { + // the plugin must have the version of whichever node we are talking to, since this is enforced on startup + elasticsearchVersion = in.getVersion(); + // this might not be true, but it is not important, we just need something here for bwc that is a valid java version string + javaVersion = "1.8"; + } this.classname = in.readString(); if (in.getVersion().onOrAfter(Version.V_6_2_0)) { extendedPlugins = in.readList(StreamInput::readString); @@ -120,6 +133,10 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); out.writeString(description); out.writeString(version); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + Version.writeVersion(elasticsearchVersion, out); + out.writeString(javaVersion); + } out.writeString(classname); if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeStringList(extendedPlugins); @@ -133,67 +150,13 @@ public void writeTo(final StreamOutput out) throws IOException { } /** - * Extracts all {@link PluginInfo} from the provided {@code rootPath} expanding meta plugins if needed. - * @param rootPath the path where the plugins are installed - * @return A list of all plugin paths installed in the {@code rootPath} - * @throws IOException if an I/O exception occurred reading the plugin descriptors - */ - public static List extractAllPlugins(final Path rootPath) throws IOException { - final List plugins = new LinkedList<>(); // order is already lost, but some filesystems have it - final Set seen = new HashSet<>(); - if (Files.exists(rootPath)) { - try (DirectoryStream stream = Files.newDirectoryStream(rootPath)) { - for (Path plugin : stream) { - if (FileSystemUtils.isDesktopServicesStore(plugin) || - plugin.getFileName().toString().startsWith(".removing-")) { - continue; - } - if (seen.add(plugin.getFileName().toString()) == false) { - throw new IllegalStateException("duplicate plugin: " + plugin); - } - if (MetaPluginInfo.isMetaPlugin(plugin)) { - try (DirectoryStream subStream = Files.newDirectoryStream(plugin)) { - for (Path subPlugin : subStream) { - if (MetaPluginInfo.isPropertiesFile(subPlugin) || - FileSystemUtils.isDesktopServicesStore(subPlugin)) { - continue; - } - if (seen.add(subPlugin.getFileName().toString()) == false) { - throw new IllegalStateException("duplicate plugin: " + subPlugin); - } - plugins.add(subPlugin); - } - } - } else { - plugins.add(plugin); - } - } - } - } - return plugins; - } - - /** - * Reads and validates the plugin descriptor file. - * - * @param path the path to the root directory for the plugin - * @return the plugin info - * @throws IOException if an I/O exception occurred reading the plugin descriptor - */ - public static PluginInfo readFromProperties(final Path path) throws IOException { - return readFromProperties(path, true); - } - - /** - * Reads and validates the plugin descriptor file. If {@code enforceVersion} is false then version enforcement for the plugin descriptor - * is skipped. + * Reads the plugin descriptor file. * * @param path the path to the root directory for the plugin - * @param enforceVersion whether or not to enforce the version when reading plugin descriptors * @return the plugin info * @throws IOException if an I/O exception occurred reading the plugin descriptor */ - static PluginInfo readFromProperties(final Path path, final boolean enforceVersion) throws IOException { + public static PluginInfo readFromProperties(final Path path) throws IOException { final Path descriptor = path.resolve(ES_PLUGIN_PROPERTIES); final Map propsMap; @@ -227,22 +190,12 @@ static PluginInfo readFromProperties(final Path path, final boolean enforceVersi "property [elasticsearch.version] is missing for plugin [" + name + "]"); } final Version esVersion = Version.fromString(esVersionString); - if (enforceVersion && esVersion.equals(Version.CURRENT) == false) { - final String message = String.format( - Locale.ROOT, - "plugin [%s] is incompatible with version [%s]; was designed for version [%s]", - name, - Version.CURRENT.toString(), - esVersionString); - throw new IllegalArgumentException(message); - } final String javaVersionString = propsMap.remove("java.version"); if (javaVersionString == null) { throw new IllegalArgumentException( "property [java.version] is missing for plugin [" + name + "]"); } JarHell.checkVersionFormat(javaVersionString); - JarHell.checkJavaVersion(name, javaVersionString); final String classname = propsMap.remove("classname"); if (classname == null) { throw new IllegalArgumentException( @@ -297,7 +250,8 @@ static PluginInfo readFromProperties(final Path path, final boolean enforceVersi throw new IllegalArgumentException("Unknown properties in plugin descriptor: " + propsMap.keySet()); } - return new PluginInfo(name, description, version, classname, extendedPlugins, hasNativeController, requiresKeystore); + return new PluginInfo(name, description, version, esVersion, javaVersionString, + classname, extendedPlugins, hasNativeController, requiresKeystore); } /** @@ -337,7 +291,7 @@ public List getExtendedPlugins() { } /** - * The version of Elasticsearch the plugin was built for. + * The version of the plugin * * @return the version */ @@ -345,6 +299,24 @@ public String getVersion() { return version; } + /** + * The version of Elasticsearch the plugin was built for. + * + * @return an Elasticsearch version + */ + public Version getElasticsearchVersion() { + return elasticsearchVersion; + } + + /** + * The version of Java the plugin was built with. + * + * @return a java version string + */ + public String getJavaVersion() { + return javaVersion; + } + /** * Whether or not the plugin has a native controller. * @@ -369,6 +341,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field("name", name); builder.field("version", version); + builder.field("elasticsearch_version", elasticsearchVersion); + builder.field("java_version", javaVersion); builder.field("description", description); builder.field("classname", classname); builder.field("extended_plugins", extendedPlugins); @@ -388,6 +362,7 @@ public boolean equals(Object o) { PluginInfo that = (PluginInfo) o; if (!name.equals(that.name)) return false; + // TODO: since the plugins are unique by their directory name, this should only be a name check, version should not matter? if (version != null ? !version.equals(that.version) : that.version != null) return false; return true; @@ -409,6 +384,8 @@ public String toString(String prefix) { .append(prefix).append("Name: ").append(name).append("\n") .append(prefix).append("Description: ").append(description).append("\n") .append(prefix).append("Version: ").append(version).append("\n") + .append(prefix).append("Elasticsearch Version: ").append(elasticsearchVersion).append("\n") + .append(prefix).append("Java Version: ").append(javaVersion).append("\n") .append(prefix).append("Native Controller: ").append(hasNativeController).append("\n") .append(prefix).append("Requires Keystore: ").append(requiresKeystore).append("\n") .append(prefix).append("Extended Plugins: ").append(extendedPlugins).append("\n") diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index d70b40d701d3f..3ed4374ca2ac3 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -27,6 +27,7 @@ import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.common.Strings; @@ -34,6 +35,8 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -54,6 +57,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; @@ -62,6 +66,7 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; @@ -99,16 +104,19 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, List> pluginsLoaded = new ArrayList<>(); List pluginsList = new ArrayList<>(); + // we need to build a List of plugins for checking mandatory plugins + final List pluginsNames = new ArrayList<>(); // first we load plugins that are on the classpath. this is for tests and transport clients for (Class pluginClass : classpathPlugins) { Plugin plugin = loadPlugin(pluginClass, settings, configPath); - PluginInfo pluginInfo = new PluginInfo(pluginClass.getName(), "classpath plugin", "NA", + PluginInfo pluginInfo = new PluginInfo(pluginClass.getName(), "classpath plugin", "NA", Version.CURRENT, "1.8", pluginClass.getName(), Collections.emptyList(), false, false); if (logger.isTraceEnabled()) { logger.trace("plugin loaded from classpath [{}]", pluginInfo); } pluginsLoaded.add(new Tuple<>(pluginInfo, plugin)); pluginsList.add(pluginInfo); + pluginsNames.add(pluginInfo.getName()); } Set seenBundles = new LinkedHashSet<>(); @@ -132,11 +140,15 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, // TODO: remove this leniency, but tests bogusly rely on it if (isAccessibleDirectory(pluginsDirectory, logger)) { checkForFailedPluginRemovals(pluginsDirectory); - Set plugins = getPluginBundles(pluginsDirectory); - for (Bundle bundle : plugins) { - pluginsList.add(bundle.plugin); + List plugins = getPluginBundleCollections(pluginsDirectory); + for (final BundleCollection plugin : plugins) { + final Collection bundles = plugin.bundles(); + for (final Bundle bundle : bundles) { + pluginsList.add(bundle.plugin); + } + seenBundles.addAll(bundles); + pluginsNames.add(plugin.name()); } - seenBundles.addAll(plugins); } } catch (IOException ex) { throw new IllegalStateException("Unable to initialize plugins", ex); @@ -149,12 +161,6 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, this.info = new PluginsAndModules(pluginsList, modulesList); this.plugins = Collections.unmodifiableList(pluginsLoaded); - // We need to build a List of plugins for checking mandatory plugins - Set pluginsNames = new HashSet<>(); - for (Tuple tuple : this.plugins) { - pluginsNames.add(tuple.v1().getName()); - } - // Checking expected plugins List mandatoryPlugins = MANDATORY_SETTING.get(settings); if (mandatoryPlugins.isEmpty() == false) { @@ -165,7 +171,11 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, } } if (!missingPlugins.isEmpty()) { - throw new ElasticsearchException("Missing mandatory plugins [" + Strings.collectionToDelimitedString(missingPlugins, ", ") + "]"); + final String message = String.format( + Locale.ROOT, + "missing mandatory plugins [%s]", + Strings.collectionToDelimitedString(missingPlugins, ", ")); + throw new IllegalStateException(message); } } @@ -241,9 +251,17 @@ public PluginsAndModules info() { return info; } + /** + * An abstraction over a single plugin and meta-plugins. + */ + interface BundleCollection { + String name(); + Collection bundles(); + } + // a "bundle" is a group of plugins in a single classloader // really should be 1-1, but we are not so fortunate - static class Bundle { + static class Bundle implements BundleCollection { final PluginInfo plugin; final Set urls; @@ -263,6 +281,16 @@ static class Bundle { this.urls = Objects.requireNonNull(urls); } + @Override + public String name() { + return plugin.getName(); + } + + @Override + public Collection bundles() { + return Collections.singletonList(this); + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -277,6 +305,101 @@ public int hashCode() { } } + /** + * Represents a meta-plugin and the {@link Bundle}s corresponding to its constituents. + */ + static class MetaBundle implements BundleCollection { + private final String name; + private final List bundles; + + MetaBundle(final String name, final List bundles) { + this.name = name; + this.bundles = bundles; + } + + @Override + public String name() { + return name; + } + + @Override + public Collection bundles() { + return bundles; + } + + } + + /** + * Extracts all installed plugin directories from the provided {@code rootPath} expanding meta-plugins if needed. + * + * @param rootPath the path where the plugins are installed + * @return a list of all plugin paths installed in the {@code rootPath} + * @throws IOException if an I/O exception occurred reading the directories + */ + public static List findPluginDirs(final Path rootPath) throws IOException { + final Tuple, Map>> groupedPluginDirs = findGroupedPluginDirs(rootPath); + return Stream.concat( + groupedPluginDirs.v1().stream(), + groupedPluginDirs.v2().values().stream().flatMap(Collection::stream)) + .collect(Collectors.toList()); + } + + /** + * Extracts all installed plugin directories from the provided {@code rootPath} expanding meta-plugins if needed. The plugins are + * grouped into plugins and meta-plugins. The meta-plugins are keyed by the meta-plugin name. + * + * @param rootPath the path where the plugins are installed + * @return a tuple of plugins as the first component and meta-plugins keyed by meta-plugin name as the second component + * @throws IOException if an I/O exception occurred reading the directories + */ + private static Tuple, Map>> findGroupedPluginDirs(final Path rootPath) throws IOException { + final List plugins = new ArrayList<>(); + final Map> metaPlugins = new LinkedHashMap<>(); + final Set seen = new HashSet<>(); + if (Files.exists(rootPath)) { + try (DirectoryStream stream = Files.newDirectoryStream(rootPath)) { + for (Path plugin : stream) { + if (FileSystemUtils.isDesktopServicesStore(plugin) || + plugin.getFileName().toString().startsWith(".removing-")) { + continue; + } + if (seen.add(plugin.getFileName().toString()) == false) { + throw new IllegalStateException("duplicate plugin: " + plugin); + } + if (MetaPluginInfo.isMetaPlugin(plugin)) { + final String name = plugin.getFileName().toString(); + try (DirectoryStream subStream = Files.newDirectoryStream(plugin)) { + for (Path subPlugin : subStream) { + if (MetaPluginInfo.isPropertiesFile(subPlugin) || + FileSystemUtils.isDesktopServicesStore(subPlugin)) { + continue; + } + if (seen.add(subPlugin.getFileName().toString()) == false) { + throw new IllegalStateException("duplicate plugin: " + subPlugin); + } + metaPlugins.computeIfAbsent(name, n -> new ArrayList<>()).add(subPlugin); + } + } + } else { + plugins.add(plugin); + } + } + } + } + return Tuple.tuple(plugins, metaPlugins); + } + + /** + * Verify the given plugin is compatible with the current Elasticsearch installation. + */ + static void verifyCompatibility(PluginInfo info) { + if (info.getElasticsearchVersion().equals(Version.CURRENT) == false) { + throw new IllegalArgumentException("Plugin [" + info.getName() + "] was built for Elasticsearch version " + + info.getElasticsearchVersion() + " but version " + Version.CURRENT + " is running"); + } + JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); + } + // similar in impl to getPluginBundles, but DO NOT try to make them share code. // we don't need to inherit all the leniency, and things are different enough. static Set getModuleBundles(Path modulesDirectory) throws IOException { @@ -325,39 +448,46 @@ static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOE * @throws IOException if an I/O exception occurs reading the plugin bundles */ static Set getPluginBundles(final Path pluginsDirectory) throws IOException { - return getPluginBundles(pluginsDirectory, true); + return getPluginBundleCollections(pluginsDirectory).stream().flatMap(b -> b.bundles().stream()).collect(Collectors.toSet()); } - /** - * Get the plugin bundles from the specified directory. If {@code enforceVersion} is true, then the version in each plugin descriptor - * must match the current version. - * - * @param pluginsDirectory the directory - * @param enforceVersion whether or not to enforce the version when reading plugin descriptors - * @return the set of plugin bundles in the specified directory - * @throws IOException if an I/O exception occurs reading the plugin bundles - */ - static Set getPluginBundles(final Path pluginsDirectory, final boolean enforceVersion) throws IOException { - Logger logger = Loggers.getLogger(PluginsService.class); - Set bundles = new LinkedHashSet<>(); - - List infos = PluginInfo.extractAllPlugins(pluginsDirectory); - for (Path plugin : infos) { - logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath()); - final PluginInfo info; - try { - info = PluginInfo.readFromProperties(plugin, enforceVersion); - } catch (IOException e) { - throw new IllegalStateException("Could not load plugin descriptor for existing plugin [" - + plugin.getFileName() + "]. Was the plugin built before 2.0?", e); - } - if (bundles.add(new Bundle(info, plugin)) == false) { - throw new IllegalStateException("duplicate plugin: " + info); + private static List getPluginBundleCollections(final Path pluginsDirectory) throws IOException { + final List bundles = new ArrayList<>(); + final Set seenBundles = new HashSet<>(); + final Tuple, Map>> groupedPluginDirs = findGroupedPluginDirs(pluginsDirectory); + for (final Path plugin : groupedPluginDirs.v1()) { + final Bundle bundle = bundle(seenBundles, plugin); + bundles.add(bundle); + } + for (final Map.Entry> metaPlugin : groupedPluginDirs.v2().entrySet()) { + final List metaPluginBundles = new ArrayList<>(); + for (final Path metaPluginPlugin : metaPlugin.getValue()) { + final Bundle bundle = bundle(seenBundles, metaPluginPlugin); + metaPluginBundles.add(bundle); } + final MetaBundle metaBundle = new MetaBundle(metaPlugin.getKey(), metaPluginBundles); + bundles.add(metaBundle); } + return bundles; } + private static Bundle bundle(final Set bundles, final Path plugin) throws IOException { + Loggers.getLogger(PluginsService.class).trace("--- adding plugin [{}]", plugin.toAbsolutePath()); + final PluginInfo info; + try { + info = PluginInfo.readFromProperties(plugin); + } catch (final IOException e) { + throw new IllegalStateException("Could not load plugin descriptor for existing plugin [" + + plugin.getFileName() + "]. Was the plugin built before 2.0?", e); + } + final Bundle bundle = new Bundle(info, plugin); + if (bundles.add(bundle) == false) { + throw new IllegalStateException("duplicate plugin: " + info); + } + return bundle; + } + /** * Return the given bundles, sorted in dependency loading order. * @@ -433,6 +563,7 @@ static void checkBundleJarHell(Bundle bundle, Map> transitiveUr List exts = bundle.plugin.getExtendedPlugins(); try { + final Logger logger = ESLoggerFactory.getLogger(JarHell.class); Set urls = new HashSet<>(); for (String extendedPlugin : exts) { Set pluginUrls = transitiveUrls.get(extendedPlugin); @@ -453,11 +584,11 @@ static void checkBundleJarHell(Bundle bundle, Map> transitiveUr } urls.addAll(pluginUrls); - JarHell.checkJarHell(urls); // check jarhell as we add each extended plugin's urls + JarHell.checkJarHell(urls, logger::debug); // check jarhell as we add each extended plugin's urls } urls.addAll(bundle.urls); - JarHell.checkJarHell(urls); // check jarhell of each extended plugin against this plugin + JarHell.checkJarHell(urls, logger::debug); // check jarhell of each extended plugin against this plugin transitiveUrls.put(bundle.plugin.getName(), urls); Set classpath = JarHell.parseClassPath(); @@ -470,7 +601,7 @@ static void checkBundleJarHell(Bundle bundle, Map> transitiveUr // check we don't have conflicting classes Set union = new HashSet<>(classpath); union.addAll(bundle.urls); - JarHell.checkJarHell(union); + JarHell.checkJarHell(union, logger::debug); } catch (Exception e) { throw new IllegalStateException("failed to load plugin " + bundle.plugin.getName() + " due to jar hell", e); } @@ -479,6 +610,8 @@ static void checkBundleJarHell(Bundle bundle, Map> transitiveUr private Plugin loadBundle(Bundle bundle, Map loaded) { String name = bundle.plugin.getName(); + verifyCompatibility(bundle.plugin); + // collect loaders of extended plugins List extendedLoaders = new ArrayList<>(); for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) { diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 4c3d58e67ff72..c8f830c461129 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -176,7 +176,7 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long /** * Creates a snapshot of the shard based on the index commit point. *

- * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireIndexCommit} method. + * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method. * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. *

* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java index 2f25ec3b508cc..22de2b8af7b9e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentHelper; @@ -109,7 +110,8 @@ protected String blobName(String name) { } protected T read(BytesReference bytes) throws IOException { - try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, bytes)) { + try (XContentParser parser = XContentHelper + .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes)) { T obj = reader.apply(parser); return obj; } diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index 1a74a6c68111b..bd3226d7a01c0 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; @@ -342,7 +343,7 @@ public NamedXContentRegistry getXContentRegistry() { */ public final XContentParser contentParser() throws IOException { BytesReference content = requiredContent(); // will throw exception if body or content type missing - return xContentType.get().xContent().createParser(xContentRegistry, content); + return xContentType.get().xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, content.streamInput()); } /** @@ -371,7 +372,7 @@ public final boolean hasContentOrSourceParam() { */ public final XContentParser contentOrSourceParamParser() throws IOException { Tuple tuple = contentOrSourceParam(); - return tuple.v1().xContent().createParser(xContentRegistry, tuple.v2()); + return tuple.v1().xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, tuple.v2().streamInput()); } /** @@ -384,7 +385,8 @@ public final void withContentOrSourceParamParserOrNull(CheckedConsumer tuple = contentOrSourceParam(); BytesReference content = tuple.v2(); XContentType xContentType = tuple.v1(); - try (XContentParser parser = xContentType.xContent().createParser(xContentRegistry, content)) { + try (XContentParser parser = xContentType.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, content.streamInput())) { withParser.accept(parser); } } else { diff --git a/server/src/main/java/org/elasticsearch/rest/action/AcknowledgedRestListener.java b/server/src/main/java/org/elasticsearch/rest/action/AcknowledgedRestListener.java deleted file mode 100644 index 9f08c43fa0f3f..0000000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/AcknowledgedRestListener.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.rest.action; - -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestResponse; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestStatus.OK; - -public class AcknowledgedRestListener extends RestBuilderListener { - - public AcknowledgedRestListener(RestChannel channel) { - super(channel); - } - - @Override - public RestResponse buildResponse(T response, XContentBuilder builder) throws Exception { - // TODO - Once AcknowledgedResponse implements ToXContent, this method should be updated to call response.toXContent. - builder.startObject() - .field(Fields.ACKNOWLEDGED, response.isAcknowledged()); - addCustomFields(builder, response); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - - /** - * Adds api specific fields to the rest response - * Does nothing by default but can be overridden by subclasses - */ - protected void addCustomFields(XContentBuilder builder, T response) throws IOException { - - } - - static final class Fields { - static final String ACKNOWLEDGED = "acknowledged"; - } -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java index 1eafe29cfe7fb..4b019e98e6231 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; -import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; @@ -31,12 +30,10 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Collections; @@ -71,27 +68,16 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ClusterRerouteRequest clusterRerouteRequest = createRequest(request); - + settingsFilter.addFilterSettingParams(request); + if (clusterRerouteRequest.explain()) { + request.params().put("explain", Boolean.TRUE.toString()); + } // by default, return everything but metadata final String metric = request.param("metric"); if (metric == null) { request.params().put("metric", DEFAULT_METRICS); } - - return channel -> - client.admin().cluster().reroute(clusterRerouteRequest, new AcknowledgedRestListener(channel) { - @Override - protected void addCustomFields(XContentBuilder builder, ClusterRerouteResponse response) throws IOException { - builder.startObject("state"); - settingsFilter.addFilterSettingParams(request); - response.getState().toXContent(builder, request); - builder.endObject(); - if (clusterRerouteRequest.explain()) { - assert response.getExplanations() != null; - response.getExplanations().toXContent(builder, ToXContent.EMPTY_PARAMS); - } - } - }); + return channel -> client.admin().cluster().reroute(clusterRerouteRequest, new RestToXContentListener<>(channel)); } private static final Set RESPONSE_PARAMS; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java index 88e8ada55976d..2901cdd2d9ba8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java @@ -20,22 +20,25 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Map; import java.util.Set; public class RestClusterUpdateSettingsAction extends BaseRestHandler { + + private static final String PERSISTENT = "persistent"; + private static final String TRANSIENT = "transient"; + public RestClusterUpdateSettingsAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(RestRequest.Method.PUT, "/_cluster/settings", this); @@ -56,26 +59,14 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC try (XContentParser parser = request.contentParser()) { source = parser.map(); } - if (source.containsKey("transient")) { - clusterUpdateSettingsRequest.transientSettings((Map) source.get("transient")); + if (source.containsKey(TRANSIENT)) { + clusterUpdateSettingsRequest.transientSettings((Map) source.get(TRANSIENT)); } - if (source.containsKey("persistent")) { - clusterUpdateSettingsRequest.persistentSettings((Map) source.get("persistent")); + if (source.containsKey(PERSISTENT)) { + clusterUpdateSettingsRequest.persistentSettings((Map) source.get(PERSISTENT)); } - return channel -> client.admin().cluster().updateSettings(clusterUpdateSettingsRequest, - new AcknowledgedRestListener(channel) { - @Override - protected void addCustomFields(XContentBuilder builder, ClusterUpdateSettingsResponse response) throws IOException { - builder.startObject("persistent"); - response.getPersistentSettings().toXContent(builder, request); - builder.endObject(); - - builder.startObject("transient"); - response.getTransientSettings().toXContent(builder, request); - builder.endObject(); - } - }); + return channel -> client.admin().cluster().updateSettings(clusterUpdateSettingsRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index 33455365dcccc..d2740827d1ebf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -52,6 +52,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout())); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); - return channel -> client.admin().cluster().deleteRepository(deleteRepositoryRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().deleteRepository(deleteRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java index fbd8822a84ea4..6b18e9a3d5043 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -50,6 +50,6 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteSnapshotRequest deleteSnapshotRequest = deleteSnapshotRequest(request.param("repository"), request.param("snapshot")); deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteSnapshotRequest.masterNodeTimeout())); - return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java index 0085de36f1925..2da902df9dab6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -50,6 +50,6 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client deleteStoredScriptRequest.timeout(request.paramAsTime("timeout", deleteStoredScriptRequest.timeout())); deleteStoredScriptRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteStoredScriptRequest.masterNodeTimeout())); - return channel -> client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index 4e1b158a18cf0..14c8655e48c18 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -71,6 +71,7 @@ public RestNodesStatsAction(Settings settings, RestController controller) { metrics.put("script", r -> r.script(true)); metrics.put("discovery", r -> r.discovery(true)); metrics.put("ingest", r -> r.ingest(true)); + metrics.put("adaptive_selection", r -> r.adaptiveSelection(true)); METRICS = Collections.unmodifiableMap(metrics); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java index b87871e064e3a..62cc06cc404ce 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -58,6 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout())); putRepositoryRequest.timeout(request.paramAsTime("timeout", putRepositoryRequest.timeout())); - return channel -> client.admin().cluster().putRepository(putRepositoryRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().putRepository(putRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java index 27083503195e0..4a4530b6fa3ee 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.script.StoredScriptSource; import java.io.IOException; @@ -61,6 +61,6 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client PutStoredScriptRequest putRequest = new PutStoredScriptRequest(id, context, content, request.getXContentType(), source); putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); putRequest.timeout(request.paramAsTime("timeout", putRequest.timeout())); - return channel -> client.admin().cluster().putStoredScript(putRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().putStoredScript(putRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java index 62c0e97c03506..d9d6bbcfee98d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java @@ -86,9 +86,11 @@ static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeReques while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - } else if (Fields.TEXT.match(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { + } else if (Fields.TEXT.match(currentFieldName, parser.getDeprecationHandler()) && + token == XContentParser.Token.VALUE_STRING) { analyzeRequest.text(parser.text()); - } else if (Fields.TEXT.match(currentFieldName) && token == XContentParser.Token.START_ARRAY) { + } else if (Fields.TEXT.match(currentFieldName, parser.getDeprecationHandler()) && + token == XContentParser.Token.START_ARRAY) { List texts = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { @@ -97,11 +99,13 @@ static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeReques texts.add(parser.text()); } analyzeRequest.text(texts.toArray(new String[texts.size()])); - } else if (Fields.ANALYZER.match(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { + } else if (Fields.ANALYZER.match(currentFieldName, parser.getDeprecationHandler()) + && token == XContentParser.Token.VALUE_STRING) { analyzeRequest.analyzer(parser.text()); - } else if (Fields.FIELD.match(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { + } else if (Fields.FIELD.match(currentFieldName, parser.getDeprecationHandler()) && + token == XContentParser.Token.VALUE_STRING) { analyzeRequest.field(parser.text()); - } else if (Fields.TOKENIZER.match(currentFieldName)) { + } else if (Fields.TOKENIZER.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.VALUE_STRING) { analyzeRequest.tokenizer(parser.text()); } else if (token == XContentParser.Token.START_OBJECT) { @@ -109,7 +113,7 @@ static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeReques } else { throw new IllegalArgumentException(currentFieldName + " should be tokenizer's name or setting"); } - } else if (Fields.TOKEN_FILTERS.match(currentFieldName) + } else if (Fields.TOKEN_FILTERS.match(currentFieldName, parser.getDeprecationHandler()) && token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -121,7 +125,7 @@ static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeReques + " array element should contain filter's name or setting"); } } - } else if (Fields.CHAR_FILTERS.match(currentFieldName) + } else if (Fields.CHAR_FILTERS.match(currentFieldName, parser.getDeprecationHandler()) && token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -133,13 +137,14 @@ static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeReques + " array element should contain char filter's name or setting"); } } - } else if (Fields.EXPLAIN.match(currentFieldName)) { + } else if (Fields.EXPLAIN.match(currentFieldName, parser.getDeprecationHandler())) { if (parser.isBooleanValue()) { analyzeRequest.explain(parser.booleanValue()); } else { throw new IllegalArgumentException(currentFieldName + " must be either 'true' or 'false'"); } - } else if (Fields.ATTRIBUTES.match(currentFieldName) && token == XContentParser.Token.START_ARRAY) { + } else if (Fields.ATTRIBUTES.match(currentFieldName, parser.getDeprecationHandler()) && + token == XContentParser.Token.START_ARRAY) { List attributes = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { @@ -148,7 +153,7 @@ static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeReques attributes.add(parser.text()); } analyzeRequest.attributes(attributes.toArray(new String[attributes.size()])); - } else if (Fields.NORMALIZER.match(currentFieldName)) { + } else if (Fields.NORMALIZER.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.VALUE_STRING) { analyzeRequest.normalizer(parser.text()); } else { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index f5fac9ced283b..b96ada4cdd974 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; @@ -83,13 +84,13 @@ public boolean canTripCircuitBreaker() { public static ClearIndicesCacheRequest fromRequest(final RestRequest request, ClearIndicesCacheRequest clearIndicesCacheRequest) { for (Map.Entry entry : request.params().entrySet()) { - if (Fields.QUERY.match(entry.getKey())) { + if (Fields.QUERY.match(entry.getKey(), LoggingDeprecationHandler.INSTANCE)) { clearIndicesCacheRequest.queryCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.queryCache())); - } else if (Fields.REQUEST.match(entry.getKey())) { + } else if (Fields.REQUEST.match(entry.getKey(), LoggingDeprecationHandler.INSTANCE)) { clearIndicesCacheRequest.requestCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.requestCache())); - } else if (Fields.FIELD_DATA.match(entry.getKey())) { + } else if (Fields.FIELD_DATA.match(entry.getKey(), LoggingDeprecationHandler.INSTANCE)) { clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.fieldDataCache())); - } else if (Fields.FIELDS.match(entry.getKey())) { + } else if (Fields.FIELDS.match(entry.getKey(), LoggingDeprecationHandler.INSTANCE)) { clearIndicesCacheRequest.fields(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.fields())); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java index d750dedd4b8d0..b2475cafcbeb6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -49,7 +49,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout())); closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions())); - return channel -> client.admin().indices().close(closeIndexRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().close(closeIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 0934d8557158b..201a3b66b086d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -20,15 +20,13 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -52,11 +50,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout())); createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().create(createIndexRequest, new AcknowledgedRestListener(channel) { - @Override - public void addCustomFields(XContentBuilder builder, CreateIndexResponse response) throws IOException { - response.addCustomFields(builder); - } - }); + return channel -> client.admin().indices().create(createIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java index d3f2566026151..f6c4c17857287 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -49,6 +49,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout())); deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions())); - return channel -> client.admin().indices().delete(deleteIndexRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().delete(deleteIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java index c3fad98087755..114e043e2e17a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -43,6 +43,6 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); - return channel -> client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java index 696b7768404b4..270a9c8fdc4f8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -53,6 +53,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesAliasesRequest.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases)); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); - return channel -> client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().aliases(indicesAliasesRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java index dc95e15802e88..73e9bad45a57b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Map; @@ -118,6 +118,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC aliasAction.filter(filter); } indicesAliasesRequest.addAliasAction(aliasAction); - return channel -> client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().aliases(indicesAliasesRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java index faae93803c84b..fe4155f8cb292 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java @@ -20,16 +20,13 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -58,6 +55,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (indicesAliasesRequest.getAliasActions().isEmpty()) { throw new IllegalArgumentException("No action specified"); } - return channel -> client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().aliases(indicesAliasesRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java index 73817c1565d4a..fcf2422be7c2b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java @@ -20,17 +20,15 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -56,11 +54,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (waitForActiveShards != null) { openIndexRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); } - return channel -> client.admin().indices().open(openIndexRequest, new AcknowledgedRestListener(channel) { - @Override - protected void addCustomFields(XContentBuilder builder, OpenIndexResponse response) throws IOException { - builder.field("shards_acknowledged", response.isShardsAcknowledged()); - } - }); + return channel -> client.admin().indices().open(openIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java index c96f127ee7a94..58f5dcb3e9c4e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Arrays; @@ -63,7 +63,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "")); putRequest.source(request.requiredContent(), request.getXContentType()); - return channel -> client.admin().indices().putTemplate(putRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().putTemplate(putRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index cdac83037db30..6d3804eddc90e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -73,6 +73,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions())); - return channel -> client.admin().indices().putMapping(putMappingRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().putMapping(putMappingRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index bd31ec3a70cc9..489001bf2a14f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -45,11 +45,12 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RolloverRequest rolloverIndexRequest = new RolloverRequest(request.param("index"), request.param("new_index")); - request.applyContentParser(parser -> RolloverRequest.PARSER.parse(parser, rolloverIndexRequest, null)); + request.applyContentParser(rolloverIndexRequest::fromXContent); rolloverIndexRequest.dryRun(request.paramAsBoolean("dry_run", false)); rolloverIndexRequest.timeout(request.paramAsTime("timeout", rolloverIndexRequest.timeout())); rolloverIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", rolloverIndexRequest.masterNodeTimeout())); - rolloverIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); + rolloverIndexRequest.getCreateIndexRequest().waitForActiveShards( + ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return channel -> client.admin().indices().rolloverIndex(rolloverIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java index c72dad2654e61..be875dd0a55aa 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java @@ -20,16 +20,14 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -47,23 +45,12 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - if (request.param("target") == null) { - throw new IllegalArgumentException("no target index"); - } - if (request.param("index") == null) { - throw new IllegalArgumentException("no source index"); - } ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); shrinkIndexRequest.setResizeType(ResizeType.SHRINK); request.applyContentParser(shrinkIndexRequest::fromXContent); shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new AcknowledgedRestListener(channel) { - @Override - public void addCustomFields(XContentBuilder builder, ResizeResponse response) throws IOException { - response.addCustomFields(builder); - } - }); + return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java index 9bd1711d15a6e..d465c4ebb749b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java @@ -20,16 +20,14 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -47,23 +45,12 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - if (request.param("target") == null) { - throw new IllegalArgumentException("no target index"); - } - if (request.param("index") == null) { - throw new IllegalArgumentException("no source index"); - } ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); shrinkIndexRequest.setResizeType(ResizeType.SPLIT); request.applyContentParser(shrinkIndexRequest::fromXContent); shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new AcknowledgedRestListener(channel) { - @Override - public void addCustomFields(XContentBuilder builder, ResizeResponse response) throws IOException { - response.addCustomFields(builder); - } - }); + return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index c2b6b09fa3ae3..93090ba25eee6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.HashMap; @@ -73,7 +73,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } updateSettingsRequest.settings(settings); - return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java index 5ad478deea628..8fca614f63c1c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -45,6 +45,6 @@ public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient cl DeletePipelineRequest request = new DeletePipelineRequest(restRequest.param("id")); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); - return channel -> client.admin().cluster().deletePipeline(request, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().deletePipeline(request, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java index aa3149fc13d19..9cd66c8c9e456 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.AcknowledgedRestListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -50,7 +50,7 @@ public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient cl PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), sourceTuple.v2(), sourceTuple.v1()); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); - return channel -> client.admin().cluster().putPipeline(request, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().putPipeline(request, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/script/Script.java b/server/src/main/java/org/elasticsearch/script/Script.java index 6078c55f76640..becc58d0da7cd 100644 --- a/server/src/main/java/org/elasticsearch/script/Script.java +++ b/server/src/main/java/org/elasticsearch/script/Script.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; @@ -281,7 +282,8 @@ public static Script parse(Settings settings) { builder.startObject(); settings.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - return parse(JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, builder.bytes())); + return parse(JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, builder.bytes().streamInput())); } catch (IOException e) { // it should not happen since we are not actually reading from a stream but an in-memory byte[] throw new IllegalStateException(e); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptService.java b/server/src/main/java/org/elasticsearch/script/ScriptService.java index 652ec3dda3d29..a68166779ef6c 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptService.java @@ -30,7 +30,7 @@ import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -57,7 +57,7 @@ import java.util.Set; import java.util.function.Function; -public class ScriptService extends AbstractComponent implements Closeable, ClusterStateListener { +public class ScriptService extends AbstractComponent implements Closeable, ClusterStateApplier { static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic"; @@ -508,7 +508,7 @@ public ScriptStats stats() { } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { clusterState = event.state(); } diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 52ab99cc5cbb5..a74108c55defa 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; @@ -242,7 +243,8 @@ private StoredScriptSource build() { * @return The parsed {@link StoredScriptSource}. */ public static StoredScriptSource parse(BytesReference content, XContentType xContentType) { - try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, content)) { + try (XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, content.streamInput())) { Token token = parser.nextToken(); if (token != Token.START_OBJECT) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index d067f84e3f700..68822f3f10c63 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -25,6 +25,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.internal.SearchContext; @@ -128,10 +130,10 @@ public ParseField parseField() { return parseField; } - public static SubAggCollectionMode parse(String value) { + public static SubAggCollectionMode parse(String value, DeprecationHandler deprecationHandler) { SubAggCollectionMode[] modes = SubAggCollectionMode.values(); for (SubAggCollectionMode mode : modes) { - if (mode.parseField.match(value)) { + if (mode.parseField.match(value, deprecationHandler)) { return mode; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index 63490ab317f1a..262164fb4004e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -259,21 +259,21 @@ public static FiltersAggregationBuilder parse(String aggregationName, XContentPa if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (OTHER_BUCKET_FIELD.match(currentFieldName)) { + if (OTHER_BUCKET_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { otherBucket = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (OTHER_BUCKET_KEY_FIELD.match(currentFieldName)) { + if (OTHER_BUCKET_KEY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { otherBucketKey = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (FILTERS_FIELD.match(currentFieldName)) { + if (FILTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { keyedFilters = new ArrayList<>(); String key = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -289,7 +289,7 @@ public static FiltersAggregationBuilder parse(String aggregationName, XContentPa "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (FILTERS_FIELD.match(currentFieldName)) { + if (FILTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { nonKeyedFilters = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { QueryBuilder filter = parseInnerQueryBuilder(parser); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index f73bea0638b63..20b7eb0aed3dd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -125,7 +125,7 @@ public static NestedAggregationBuilder parse(String aggregationName, XContentPar if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (NestedAggregator.PATH_FIELD.match(currentFieldName)) { + if (NestedAggregator.PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { path = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 459950725077d..a85225e846372 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -47,7 +47,7 @@ import java.util.List; import java.util.Map; -class NestedAggregator extends BucketsAggregator implements SingleBucketAggregator { +public class NestedAggregator extends BucketsAggregator implements SingleBucketAggregator { static final ParseField PATH_FIELD = new ParseField("path"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index d555a59071148..ee943793e349f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -181,26 +181,27 @@ private static Range parseRange(XContentParser parser) throws IOException { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (FROM_FIELD.match(currentFieldName)) { + if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { from = parser.doubleValue(); - } else if (TO_FIELD.match(currentFieldName)) { + } else if (TO_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { to = parser.doubleValue(); } else { XContentParserUtils.throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (KEY_FIELD.match(currentFieldName)) { + if (KEY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { key = parser.text(); - } else if (FROM_FIELD.match(currentFieldName)) { + } else if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fromAsStr = parser.text(); - } else if (TO_FIELD.match(currentFieldName)) { + } else if (TO_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { toAsStr = parser.text(); } else { XContentParserUtils.throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_NULL) { - if (FROM_FIELD.match(currentFieldName) || TO_FIELD.match(currentFieldName) - || KEY_FIELD.match(currentFieldName)) { + if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler()) + || TO_FIELD.match(currentFieldName, parser.getDeprecationHandler()) + || KEY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore null value } else { XContentParserUtils.throwUnknownField(currentFieldName, parser.getTokenLocation()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java index 5fc0cadcf3157..69582d49060a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java @@ -89,13 +89,13 @@ private static Range parseRange(XContentParser parser) throws IOException { if (parser.currentToken() == Token.FIELD_NAME) { continue; } - if (RangeAggregator.Range.KEY_FIELD.match(parser.currentName())) { + if (RangeAggregator.Range.KEY_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) { key = parser.text(); - } else if (RangeAggregator.Range.FROM_FIELD.match(parser.currentName())) { + } else if (RangeAggregator.Range.FROM_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) { from = parser.textOrNull(); - } else if (RangeAggregator.Range.TO_FIELD.match(parser.currentName())) { + } else if (RangeAggregator.Range.TO_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) { to = parser.textOrNull(); - } else if (MASK_FIELD.match(parser.currentName())) { + } else if (MASK_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) { mask = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "Unexpected ip range parameter: [" + parser.currentName() + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 2416bf99a1194..d998beedf142d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -141,26 +141,27 @@ public static Range fromXContent(XContentParser parser) throws IOException { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (FROM_FIELD.match(currentFieldName)) { + if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { from = parser.doubleValue(); - } else if (TO_FIELD.match(currentFieldName)) { + } else if (TO_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { to = parser.doubleValue(); } else { XContentParserUtils.throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (FROM_FIELD.match(currentFieldName)) { + if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fromAsStr = parser.text(); - } else if (TO_FIELD.match(currentFieldName)) { + } else if (TO_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { toAsStr = parser.text(); - } else if (KEY_FIELD.match(currentFieldName)) { + } else if (KEY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { key = parser.text(); } else { XContentParserUtils.throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_NULL) { - if (FROM_FIELD.match(currentFieldName) || TO_FIELD.match(currentFieldName) - || KEY_FIELD.match(currentFieldName)) { + if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler()) + || TO_FIELD.match(currentFieldName, parser.getDeprecationHandler()) + || KEY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // ignore null value } else { XContentParserUtils.throwUnknownField(currentFieldName, parser.getTokenLocation()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java index f161fd2e06f8d..ea987dcc64c93 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java @@ -107,7 +107,7 @@ public static SamplerAggregationBuilder parse(String aggregationName, XContentPa if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (SamplerAggregator.SHARD_SIZE_FIELD.match(currentFieldName)) { + if (SamplerAggregator.SHARD_SIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { shardSize = parser.intValue(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index ba6cece729504..59e491705c69e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -112,7 +113,7 @@ boolean needsGlobalOrdinals() { public static ExecutionMode fromString(String value) { for (ExecutionMode mode : values()) { - if (mode.parseField.match(value)) { + if (mode.parseField.match(value, LoggingDeprecationHandler.INSTANCE)) { return mode; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index 5ba302d5642fc..b5c5a17d11d7b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -96,7 +97,7 @@ public static Aggregator.Parser getParser(ParseFieldRegistry { SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserRegistry - .lookupReturningNullIfNotFound(name); + .lookupReturningNullIfNotFound(name, p.getDeprecationHandler()); return significanceHeuristicParser.parse(p); }, new ParseField(name)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java index b91017466962e..5a2c65f7a4725 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -111,7 +112,7 @@ public static Aggregator.Parser getParser( PARSER.declareObject(SignificantTextAggregationBuilder::significanceHeuristic, (p, context) -> { SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserRegistry - .lookupReturningNullIfNotFound(name); + .lookupReturningNullIfNotFound(name, p.getDeprecationHandler()); return significanceHeuristicParser.parse(p); }, new ParseField(name)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java index 2c74fe1ba0124..f0347f9248b84 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java @@ -117,7 +117,7 @@ public SignificanceHeuristic parse(XContentParser parser) throws IOException, Qu boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); while (!token.equals(XContentParser.Token.END_OBJECT)) { - if (BACKGROUND_IS_SUPERSET.match(parser.currentName())) { + if (BACKGROUND_IS_SUPERSET.match(parser.currentName(), parser.getDeprecationHandler())) { parser.nextToken(); backgroundIsSuperset = parser.booleanValue(); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java index f34a77d32bf83..5da082537bc37 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java @@ -68,8 +68,19 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public boolean equals(Object other) { - return ((NXYSignificanceHeuristic) other).includeNegatives == includeNegatives && ((NXYSignificanceHeuristic) other).backgroundIsSuperset == backgroundIsSuperset; + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + NXYSignificanceHeuristic other = (NXYSignificanceHeuristic) obj; + if (backgroundIsSuperset != other.backgroundIsSuperset) + return false; + if (includeNegatives != other.includeNegatives) + return false; + return true; } @Override @@ -158,10 +169,10 @@ public SignificanceHeuristic parse(XContentParser parser) boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); while (!token.equals(XContentParser.Token.END_OBJECT)) { - if (INCLUDE_NEGATIVES_FIELD.match(parser.currentName())) { + if (INCLUDE_NEGATIVES_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) { parser.nextToken(); includeNegatives = parser.booleanValue(); - } else if (BACKGROUND_IS_SUPERSET.match(parser.currentName())) { + } else if (BACKGROUND_IS_SUPERSET.match(parser.currentName(), parser.getDeprecationHandler())) { parser.nextToken(); backgroundIsSuperset = parser.booleanValue(); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index 5f812ae4cc389..a2cbd49693a27 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -158,7 +158,7 @@ public static SignificanceHeuristic parse(XContentParser parser) if (token.equals(XContentParser.Token.FIELD_NAME)) { currentFieldName = parser.currentName(); } else { - if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. unknown object [{}]", heuristicName, currentFieldName); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index d111c7ee80198..9e3012c5eb9d6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -104,9 +104,9 @@ public static IncludeExclude parseInclude(XContentParser parser) throws IOExcept while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - } else if (NUM_PARTITIONS_FIELD.match(currentFieldName)) { + } else if (NUM_PARTITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { numPartitions = parser.intValue(); - } else if (PARTITION_FIELD.match(currentFieldName)) { + } else if (PARTITION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { partition = parser.intValue(); } else { throw new ElasticsearchParseException( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 4a536fb681d47..3def87d7dfec0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -82,7 +83,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder SubAggCollectionMode.parse(p.text()), + (p, c) -> SubAggCollectionMode.parse(p.text(), LoggingDeprecationHandler.INSTANCE), SubAggCollectionMode.KEY, ObjectParser.ValueType.STRING); PARSER.declareObjectArray(TermsAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java index 8294986bcce8c..f3b867307d172 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregator; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationPath; @@ -187,7 +188,16 @@ public TermsAggregator(String name, AggregatorFactories factories, SearchContext this.bucketCountThresholds = bucketCountThresholds; this.order = InternalOrder.validate(order, this); this.format = format; - this.collectMode = collectMode; + if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { + /** + * Force the execution to depth_first because we need to access the score of + * nested documents in a sub-aggregation and we are not able to generate this score + * while replaying deferred documents. + */ + this.collectMode = SubAggCollectionMode.DEPTH_FIRST; + } else { + this.collectMode = collectMode; + } // Don't defer any child agg if we are dependent on it for pruning results if (order instanceof Aggregation){ AggregationPath path = ((Aggregation) order).path(); @@ -203,6 +213,25 @@ public TermsAggregator(String name, AggregatorFactories factories, SearchContext } } + static boolean descendsFromNestedAggregator(Aggregator parent) { + while (parent != null) { + if (parent.getClass() == NestedAggregator.class) { + return true; + } + parent = parent.parent(); + } + return false; + } + + private boolean subAggsNeedScore() { + for (Aggregator subAgg : subAggregators) { + if (subAgg.needsScores()) { + return true; + } + } + return false; + } + /** * Internal Optimization for ordering {@link InternalTerms.Bucket}s by a sub aggregation. *

diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 34b10c9d77465..69ac175c419c8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -255,16 +255,16 @@ public static ScriptedMetricAggregationBuilder parse(String aggregationName, XCo if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT || token == XContentParser.Token.VALUE_STRING) { - if (INIT_SCRIPT_FIELD.match(currentFieldName)) { + if (INIT_SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { initScript = Script.parse(parser); - } else if (MAP_SCRIPT_FIELD.match(currentFieldName)) { + } else if (MAP_SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { mapScript = Script.parse(parser); - } else if (COMBINE_SCRIPT_FIELD.match(currentFieldName)) { + } else if (COMBINE_SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { combineScript = Script.parse(parser); - } else if (REDUCE_SCRIPT_FIELD.match(currentFieldName)) { + } else if (REDUCE_SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { reduceScript = Script.parse(parser); } else if (token == XContentParser.Token.START_OBJECT && - PARAMS_FIELD.match(currentFieldName)) { + PARAMS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { params = parser.map(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 996148959db13..39e184f557dc3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -642,31 +642,31 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (SearchSourceBuilder.FROM_FIELD.match(currentFieldName)) { + if (SearchSourceBuilder.FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.from(parser.intValue()); - } else if (SearchSourceBuilder.SIZE_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.SIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.size(parser.intValue()); - } else if (SearchSourceBuilder.VERSION_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.VERSION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.version(parser.booleanValue()); - } else if (SearchSourceBuilder.EXPLAIN_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.EXPLAIN_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.explain(parser.booleanValue()); - } else if (SearchSourceBuilder.TRACK_SCORES_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.trackScores(parser.booleanValue()); - } else if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.fetchSource(FetchSourceContext.fromXContent(parser)); - } else if (SearchSourceBuilder.STORED_FIELDS_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.STORED_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.storedFieldsContext = StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), parser); - } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.sort(parser.text()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { - if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName)) { + if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.fetchSource(FetchSourceContext.fromXContent(parser)); - } else if (SearchSourceBuilder.SCRIPT_FIELDS_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.SCRIPT_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List scriptFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { String scriptFieldName = parser.currentName(); @@ -678,9 +678,10 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (SearchSourceBuilder.SCRIPT_FIELD.match(currentFieldName)) { + if (SearchSourceBuilder.SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); - } else if (SearchSourceBuilder.IGNORE_FAILURE_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.IGNORE_FAILURE_FIELD.match(currentFieldName, + parser.getDeprecationHandler())) { ignoreFailure = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -688,7 +689,7 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { - if (SearchSourceBuilder.SCRIPT_FIELD.match(currentFieldName)) { + if (SearchSourceBuilder.SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), @@ -707,9 +708,9 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa } } factory.scriptFields(scriptFields); - } else if (SearchSourceBuilder.HIGHLIGHT_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.HIGHLIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.highlighter(HighlightBuilder.fromXContent(parser)); - } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List> sorts = SortBuilder.fromXContent(parser); factory.sorts(sorts); } else { @@ -718,10 +719,10 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa } } else if (token == XContentParser.Token.START_ARRAY) { - if (SearchSourceBuilder.STORED_FIELDS_FIELD.match(currentFieldName)) { + if (SearchSourceBuilder.STORED_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.storedFieldsContext = StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), parser); - } else if (SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List fieldDataFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -732,10 +733,10 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa } } factory.fieldDataFields(fieldDataFields); - } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List> sorts = SortBuilder.fromXContent(parser); factory.sorts(sorts); - } else if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName)) { + } else if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.fetchSource(FetchSourceContext.fromXContent(parser)); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java index 2e3756684446a..d2c973ebec26c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; @@ -64,7 +65,7 @@ public enum GapPolicy { public static GapPolicy parse(String text, XContentLocation tokenLocation) { GapPolicy result = null; for (GapPolicy policy : values()) { - if (policy.parseField.match(text)) { + if (policy.parseField.match(text, LoggingDeprecationHandler.INSTANCE)) { if (result == null) { result = policy; } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java index d5a28100a5784..3f29a3bfdc034 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java @@ -56,17 +56,17 @@ public final BucketMetricsPipelineAggregationBuilder parse(String pipelineAgg if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName)) { + if (FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName)) { + } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { bucketsPaths = new String[] { parser.text() }; - } else if (GAP_POLICY.match(currentFieldName)) { + } else if (GAP_POLICY.match(currentFieldName, parser.getDeprecationHandler())) { gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation()); } else { parseToken(pipelineAggregatorName, parser, currentFieldName, token, params); } } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { + if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java index a6673d3a9dacd..0870ef0e18725 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java @@ -135,7 +135,7 @@ protected PercentilesBucketPipelineAggregationBuilder buildFactory(String pipeli @Override protected boolean token(XContentParser parser, String field, XContentParser.Token token, Map params) throws IOException { - if (PERCENTS_FIELD.match(field) && token == XContentParser.Token.START_ARRAY) { + if (PERCENTS_FIELD.match(field, parser.getDeprecationHandler()) && token == XContentParser.Token.START_ARRAY) { DoubleArrayList percents = new DoubleArrayList(10); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { percents.add(parser.doubleValue()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java index 83ad5a06fa043..d5e9e4f11e5ab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java @@ -45,7 +45,7 @@ protected ExtendedStatsBucketPipelineAggregationBuilder buildFactory(String pipe @Override protected boolean token(XContentParser parser, String field, XContentParser.Token token, Map params) throws IOException { - if (SIGMA.match(field) && token == XContentParser.Token.VALUE_NUMBER) { + if (SIGMA.match(field, parser.getDeprecationHandler()) && token == XContentParser.Token.VALUE_NUMBER) { params.put(SIGMA.getPreferredName(), parser.doubleValue()); return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java index 7c7f1383915bb..a63fd005f9cb2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java @@ -168,21 +168,21 @@ public static BucketScriptPipelineAggregationBuilder parse(String reducerName, X if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName)) { + if (FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName)) { + } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { bucketsPathsMap = new HashMap<>(); bucketsPathsMap.put("_value", parser.text()); - } else if (GAP_POLICY.match(currentFieldName)) { + } else if (GAP_POLICY.match(currentFieldName, parser.getDeprecationHandler())) { gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation()); - } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { + if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -197,9 +197,9 @@ public static BucketScriptPipelineAggregationBuilder parse(String reducerName, X "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); - } else if (BUCKETS_PATH.match(currentFieldName)) { + } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { Map map = parser.map(); bucketsPathsMap = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java index 219150667f63a..cb8ba81cee6ed 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java @@ -133,19 +133,19 @@ public static BucketSelectorPipelineAggregationBuilder parse(String reducerName, if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (BUCKETS_PATH.match(currentFieldName)) { + if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { bucketsPathsMap = new HashMap<>(); bucketsPathsMap.put("_value", parser.text()); - } else if (GAP_POLICY.match(currentFieldName)) { + } else if (GAP_POLICY.match(currentFieldName, parser.getDeprecationHandler())) { gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation()); - } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { + if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -160,9 +160,9 @@ public static BucketSelectorPipelineAggregationBuilder parse(String reducerName, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); - } else if (BUCKETS_PATH.match(currentFieldName)) { + } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { Map map = parser.map(); bucketsPathsMap = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java index eaf34f7e70946..dbbb7fa534a57 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java @@ -140,16 +140,16 @@ public static CumulativeSumPipelineAggregationBuilder parse(String pipelineAggre if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName)) { + if (FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName)) { + } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { bucketsPaths = new String[] { parser.text() }; } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { + if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -194,4 +194,4 @@ protected boolean doEquals(Object obj) { public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java index 3d2494459f6bf..ba7a2a2c03f7f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java @@ -206,20 +206,20 @@ public static DerivativePipelineAggregationBuilder parse(String pipelineAggregat if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT_FIELD.match(currentFieldName)) { + if (FORMAT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); - } else if (BUCKETS_PATH_FIELD.match(currentFieldName)) { + } else if (BUCKETS_PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { bucketsPaths = new String[] { parser.text() }; - } else if (GAP_POLICY_FIELD.match(currentFieldName)) { + } else if (GAP_POLICY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation()); - } else if (UNIT_FIELD.match(currentFieldName)) { + } else if (UNIT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { units = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH_FIELD.match(currentFieldName)) { + if (BUCKETS_PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -279,4 +279,4 @@ protected int doHashCode() { public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java index 4fca370ab3ffd..d2210e1da322c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -321,13 +322,13 @@ public static MovAvgPipelineAggregationBuilder parse( if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (WINDOW.match(currentFieldName)) { + if (WINDOW.match(currentFieldName, parser.getDeprecationHandler())) { window = parser.intValue(); if (window <= 0) { throw new ParsingException(parser.getTokenLocation(), "[" + currentFieldName + "] value must be a positive, " + "non-zero integer. Value supplied was [" + predict + "] in [" + pipelineAggregatorName + "]."); } - } else if (PREDICT.match(currentFieldName)) { + } else if (PREDICT.match(currentFieldName, parser.getDeprecationHandler())) { predict = parser.intValue(); if (predict <= 0) { throw new ParsingException(parser.getTokenLocation(), "[" + currentFieldName + "] value must be a positive integer." @@ -338,20 +339,20 @@ public static MovAvgPipelineAggregationBuilder parse( "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName)) { + if (FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName)) { + } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { bucketsPaths = new String[] { parser.text() }; - } else if (GAP_POLICY.match(currentFieldName)) { + } else if (GAP_POLICY.match(currentFieldName, parser.getDeprecationHandler())) { gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation()); - } else if (MODEL.match(currentFieldName)) { + } else if (MODEL.match(currentFieldName, parser.getDeprecationHandler())) { model = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { + if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -363,14 +364,14 @@ public static MovAvgPipelineAggregationBuilder parse( "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (SETTINGS.match(currentFieldName)) { + if (SETTINGS.match(currentFieldName, parser.getDeprecationHandler())) { settings = parser.map(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (MINIMIZE.match(currentFieldName)) { + if (MINIMIZE.match(currentFieldName, parser.getDeprecationHandler())) { minimize = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -402,7 +403,8 @@ public static MovAvgPipelineAggregationBuilder parse( factory.predict(predict); } if (model != null) { - MovAvgModel.AbstractModelParser modelParser = movingAverageMdelParserRegistry.lookup(model, parser.getTokenLocation()); + MovAvgModel.AbstractModelParser modelParser = movingAverageMdelParserRegistry.lookup(model, + parser.getTokenLocation(), parser.getDeprecationHandler()); MovAvgModel movAvgModel; try { movAvgModel = modelParser.parse(settings, pipelineAggregatorName, factory.window()); @@ -437,4 +439,4 @@ protected boolean doEquals(Object obj) { public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java index 92b2e4d3ea26e..a750145e5f9ab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; @@ -67,7 +68,7 @@ public static SeasonalityType parse(String text) { } SeasonalityType result = null; for (SeasonalityType policy : values()) { - if (policy.parseField.match(text)) { + if (policy.parseField.match(text, LoggingDeprecationHandler.INSTANCE)) { result = policy; break; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java index b09c26cce7c5e..a7e43c401e8a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java @@ -160,18 +160,18 @@ public static SerialDiffPipelineAggregationBuilder parse(String reducerName, XCo if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName)) { + if (FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName)) { + } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { bucketsPaths = new String[] { parser.text() }; - } else if (GAP_POLICY.match(currentFieldName)) { + } else if (GAP_POLICY.match(currentFieldName, parser.getDeprecationHandler())) { gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (LAG.match(currentFieldName)) { + if (LAG.match(currentFieldName, parser.getDeprecationHandler())) { lag = parser.intValue(true); if (lag <= 0) { throw new ParsingException(parser.getTokenLocation(), @@ -184,7 +184,7 @@ public static SerialDiffPipelineAggregationBuilder parse(String reducerName, XCo "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { + if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -236,4 +236,4 @@ protected boolean doEquals(Object obj) { public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 4654e2bb57fde..815abf1b7a7c4 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -967,50 +967,50 @@ public void parseXContent(XContentParser parser) throws IOException { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (FROM_FIELD.match(currentFieldName)) { + if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { from = parser.intValue(); - } else if (SIZE_FIELD.match(currentFieldName)) { + } else if (SIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { size = parser.intValue(); - } else if (TIMEOUT_FIELD.match(currentFieldName)) { + } else if (TIMEOUT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { timeout = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName()); - } else if (TERMINATE_AFTER_FIELD.match(currentFieldName)) { + } else if (TERMINATE_AFTER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { terminateAfter = parser.intValue(); - } else if (MIN_SCORE_FIELD.match(currentFieldName)) { + } else if (MIN_SCORE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { minScore = parser.floatValue(); - } else if (VERSION_FIELD.match(currentFieldName)) { + } else if (VERSION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { version = parser.booleanValue(); - } else if (EXPLAIN_FIELD.match(currentFieldName)) { + } else if (EXPLAIN_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { explain = parser.booleanValue(); - } else if (TRACK_SCORES_FIELD.match(currentFieldName)) { + } else if (TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { trackScores = parser.booleanValue(); - } else if (TRACK_TOTAL_HITS_FIELD.match(currentFieldName)) { + } else if (TRACK_TOTAL_HITS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { trackTotalHits = parser.booleanValue(); - } else if (_SOURCE_FIELD.match(currentFieldName)) { + } else if (_SOURCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); - } else if (STORED_FIELDS_FIELD.match(currentFieldName)) { + } else if (STORED_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { storedFieldsContext = StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), parser); - } else if (SORT_FIELD.match(currentFieldName)) { + } else if (SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { sort(parser.text()); - } else if (PROFILE_FIELD.match(currentFieldName)) { + } else if (PROFILE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { profile = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { - if (QUERY_FIELD.match(currentFieldName)) { + if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryBuilder = parseInnerQueryBuilder(parser); - } else if (POST_FILTER_FIELD.match(currentFieldName)) { + } else if (POST_FILTER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { postQueryBuilder = parseInnerQueryBuilder(parser); - } else if (_SOURCE_FIELD.match(currentFieldName)) { + } else if (_SOURCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); - } else if (SCRIPT_FIELDS_FIELD.match(currentFieldName)) { + } else if (SCRIPT_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { scriptFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { scriptFields.add(new ScriptField(parser)); } - } else if (INDICES_BOOST_FIELD.match(currentFieldName)) { + } else if (INDICES_BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { DEPRECATION_LOGGER.deprecated( "Object format in indices_boost is deprecated, please use array format instead"); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -1023,19 +1023,19 @@ public void parseXContent(XContentParser parser) throws IOException { " in [" + currentFieldName + "].", parser.getTokenLocation()); } } - } else if (AGGREGATIONS_FIELD.match(currentFieldName) - || AGGS_FIELD.match(currentFieldName)) { + } else if (AGGREGATIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler()) + || AGGS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { aggregations = AggregatorFactories.parseAggregators(parser); - } else if (HIGHLIGHT_FIELD.match(currentFieldName)) { + } else if (HIGHLIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { highlightBuilder = HighlightBuilder.fromXContent(parser); - } else if (SUGGEST_FIELD.match(currentFieldName)) { + } else if (SUGGEST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { suggestBuilder = SuggestBuilder.fromXContent(parser); - } else if (SORT_FIELD.match(currentFieldName)) { + } else if (SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { sorts = new ArrayList<>(SortBuilder.fromXContent(parser)); - } else if (RESCORE_FIELD.match(currentFieldName)) { + } else if (RESCORE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { rescoreBuilders = new ArrayList<>(); rescoreBuilders.add(RescorerBuilder.parseFromXContent(parser)); - } else if (EXT_FIELD.match(currentFieldName)) { + } else if (EXT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { extBuilders = new ArrayList<>(); String extSectionName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -1051,18 +1051,18 @@ public void parseXContent(XContentParser parser) throws IOException { extBuilders.add(searchExtBuilder); } } - } else if (SLICE.match(currentFieldName)) { + } else if (SLICE.match(currentFieldName, parser.getDeprecationHandler())) { sliceBuilder = SliceBuilder.fromXContent(parser); - } else if (COLLAPSE.match(currentFieldName)) { + } else if (COLLAPSE.match(currentFieldName, parser.getDeprecationHandler())) { collapse = CollapseBuilder.fromXContent(parser); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { - if (STORED_FIELDS_FIELD.match(currentFieldName)) { + if (STORED_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { storedFieldsContext = StoredFieldsContext.fromXContent(STORED_FIELDS_FIELD.getPreferredName(), parser); - } else if (DOCVALUE_FIELDS_FIELD.match(currentFieldName)) { + } else if (DOCVALUE_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { docValueFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -1072,18 +1072,18 @@ public void parseXContent(XContentParser parser) throws IOException { "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); } } - } else if (INDICES_BOOST_FIELD.match(currentFieldName)) { + } else if (INDICES_BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexBoosts.add(new IndexBoost(parser)); } - } else if (SORT_FIELD.match(currentFieldName)) { + } else if (SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { sorts = new ArrayList<>(SortBuilder.fromXContent(parser)); - } else if (RESCORE_FIELD.match(currentFieldName)) { + } else if (RESCORE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { rescoreBuilders = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { rescoreBuilders.add(RescorerBuilder.parseFromXContent(parser)); } - } else if (STATS_FIELD.match(currentFieldName)) { + } else if (STATS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { stats = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -1093,9 +1093,9 @@ public void parseXContent(XContentParser parser) throws IOException { "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); } } - } else if (_SOURCE_FIELD.match(currentFieldName)) { + } else if (_SOURCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); - } else if (SEARCH_AFTER.match(currentFieldName)) { + } else if (SEARCH_AFTER.match(currentFieldName, parser.getDeprecationHandler())) { searchAfterBuilder = SearchAfterBuilder.fromXContent(parser); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", @@ -1373,16 +1373,16 @@ public ScriptField(XContentParser parser) throws IOException { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (SCRIPT_FIELD.match(currentFieldName)) { + if (SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); - } else if (IGNORE_FAILURE_FIELD.match(currentFieldName)) { + } else if (IGNORE_FAILURE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { ignoreFailure = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { - if (SCRIPT_FIELD.match(currentFieldName)) { + if (SCRIPT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { script = Script.parse(parser); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java index 4ffc290f1ee89..e3c0bec6900e8 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java @@ -143,7 +143,7 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (INCLUDES_FIELD.match(currentFieldName)) { + if (INCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List includesList = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -154,7 +154,7 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx } } includes = includesList.toArray(new String[includesList.size()]); - } else if (EXCLUDES_FIELD.match(currentFieldName)) { + } else if (EXCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List excludesList = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -170,9 +170,9 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (INCLUDES_FIELD.match(currentFieldName)) { + if (INCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { includes = new String[] {parser.text()}; - } else if (EXCLUDES_FIELD.match(currentFieldName)) { + } else if (EXCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { excludes = new String[] {parser.text()}; } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token diff --git a/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java index 16a2f8c8ebf4c..13dfa712ab448 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java @@ -176,21 +176,21 @@ public static ProfileResult fromXContent(XContentParser parser) throws IOExcepti if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (TYPE.match(currentFieldName)) { + if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { type = parser.text(); - } else if (DESCRIPTION.match(currentFieldName)) { + } else if (DESCRIPTION.match(currentFieldName, parser.getDeprecationHandler())) { description = parser.text(); - } else if (NODE_TIME.match(currentFieldName)) { + } else if (NODE_TIME.match(currentFieldName, parser.getDeprecationHandler())) { // skip, total time is calculate by adding up 'timings' values in ProfileResult ctor parser.text(); - } else if (NODE_TIME_RAW.match(currentFieldName)) { + } else if (NODE_TIME_RAW.match(currentFieldName, parser.getDeprecationHandler())) { // skip, total time is calculate by adding up 'timings' values in ProfileResult ctor parser.longValue(); } else { parser.skipChildren(); } } else if (token == XContentParser.Token.START_OBJECT) { - if (BREAKDOWN.match(currentFieldName)) { + if (BREAKDOWN.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { ensureExpectedToken(parser.currentToken(), XContentParser.Token.FIELD_NAME, parser::getTokenLocation); String name = parser.currentName(); @@ -202,7 +202,7 @@ public static ProfileResult fromXContent(XContentParser parser) throws IOExcepti parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { - if (CHILDREN.match(currentFieldName)) { + if (CHILDREN.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { children.add(ProfileResult.fromXContent(parser)); } diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java b/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java index 0d4ae0384baae..d553e1a8a7359 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java @@ -169,20 +169,20 @@ public static CollectorResult fromXContent(XContentParser parser) throws IOExcep if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (NAME.match(currentFieldName)) { + if (NAME.match(currentFieldName, parser.getDeprecationHandler())) { name = parser.text(); - } else if (REASON.match(currentFieldName)) { + } else if (REASON.match(currentFieldName, parser.getDeprecationHandler())) { reason = parser.text(); - } else if (TIME.match(currentFieldName)) { + } else if (TIME.match(currentFieldName, parser.getDeprecationHandler())) { // we need to consume this value, but we use the raw nanosecond value parser.text(); - } else if (TIME_NANOS.match(currentFieldName)) { + } else if (TIME_NANOS.match(currentFieldName, parser.getDeprecationHandler())) { time = parser.longValue(); } else { parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { - if (CHILDREN.match(currentFieldName)) { + if (CHILDREN.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { children.add(CollectorResult.fromXContent(parser)); } diff --git a/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java b/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java index 2429c1c68e6ec..8b17437740cdf 100644 --- a/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java +++ b/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java @@ -27,39 +27,55 @@ import org.apache.lucene.search.LeafCollector; import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; /** * A {@link Collector} that early terminates collection after maxCountHits docs have been collected. */ public class EarlyTerminatingCollector extends FilterCollector { + static final class EarlyTerminationException extends RuntimeException { + EarlyTerminationException(String msg) { + super(msg); + } + } + private final int maxCountHits; private int numCollected; - private boolean terminatedEarly = false; + private boolean forceTermination; - EarlyTerminatingCollector(final Collector delegate, int maxCountHits) { + /** + * Ctr + * @param delegate The delegated collector. + * @param maxCountHits The number of documents to collect before termination. + * @param forceTermination Whether the collection should be terminated with an exception ({@link EarlyTerminationException}) + * that is not caught by other {@link Collector} or with a {@link CollectionTerminatedException} otherwise. + */ + EarlyTerminatingCollector(final Collector delegate, int maxCountHits, boolean forceTermination) { super(delegate); this.maxCountHits = maxCountHits; + this.forceTermination = forceTermination; } @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { if (numCollected >= maxCountHits) { - throw new CollectionTerminatedException(); + if (forceTermination) { + throw new EarlyTerminationException("early termination [CountBased]"); + } else { + throw new CollectionTerminatedException(); + } } return new FilterLeafCollector(super.getLeafCollector(context)) { @Override public void collect(int doc) throws IOException { - super.collect(doc); - if (++numCollected >= maxCountHits) { - terminatedEarly = true; - throw new CollectionTerminatedException(); + if (++numCollected > maxCountHits) { + if (forceTermination) { + throw new EarlyTerminationException("early termination [CountBased]"); + } else { + throw new CollectionTerminatedException(); + } } + super.collect(doc); }; }; } - - public boolean terminatedEarly() { - return terminatedEarly; - } } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java index 2ed806a32ae14..ff80dda77fb6d 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java @@ -171,16 +171,9 @@ static QueryCollectorContext createEarlyTerminationCollectorContext(int numHits) @Override Collector create(Collector in) throws IOException { assert collector == null; - this.collector = new EarlyTerminatingCollector(in, numHits); + this.collector = new EarlyTerminatingCollector(in, numHits, true); return collector; } - - @Override - void postProcess(QuerySearchResult result) throws IOException { - if (collector.terminatedEarly()) { - result.terminatedEarly(true); - } - } }; } } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 6d8e2d60687e1..ca06005448c0d 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -177,6 +177,13 @@ static boolean execute(SearchContext searchContext, final LinkedList collectors = new LinkedList<>(); // whether the chain contains a collector that filters documents boolean hasFilterCollector = false; + if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER) { + // add terminate_after before the filter collectors + // it will only be applied on documents accepted by these filter collectors + collectors.add(createEarlyTerminationCollectorContext(searchContext.terminateAfter())); + // this collector can filter documents during the collection + hasFilterCollector = true; + } if (searchContext.parsedPostFilter() != null) { // add post filters before aggregations // it will only be applied to top hits @@ -194,12 +201,6 @@ static boolean execute(SearchContext searchContext, // this collector can filter documents during the collection hasFilterCollector = true; } - if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER) { - // apply terminate after after all filters collectors - collectors.add(createEarlyTerminationCollectorContext(searchContext.terminateAfter())); - // this collector can filter documents during the collection - hasFilterCollector = true; - } boolean timeoutSet = scrollContext == null && searchContext.timeout() != null && searchContext.timeout().equals(SearchService.NO_TIMEOUT) == false; @@ -263,6 +264,8 @@ static boolean execute(SearchContext searchContext, try { searcher.search(query, queryCollector); + } catch (EarlyTerminatingCollector.EarlyTerminationException e) { + queryResult.terminatedEarly(true); } catch (TimeExceededException e) { assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index 18e351e34a79b..cf4ff6c77b823 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -103,11 +103,11 @@ private EmptyTopDocsCollectorContext(IndexReader reader, Query query, this.collector = hitCountCollector; this.hitCountSupplier = hitCountCollector::getTotalHits; } else { - this.collector = new EarlyTerminatingCollector(hitCountCollector, 0); + this.collector = new EarlyTerminatingCollector(hitCountCollector, 0, false); this.hitCountSupplier = () -> hitCount; } } else { - this.collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 0); + this.collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 0, false); // for bwc hit count is set to 0, it will be converted to -1 by the coordinating node this.hitCountSupplier = () -> 0; } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java index 531961e8d8f89..f991e9f14f07d 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java @@ -85,7 +85,7 @@ public static RescorerBuilder parseFromXContent(XContentParser parser) throws if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token.isValue()) { - if (WINDOW_SIZE_FIELD.match(fieldName)) { + if (WINDOW_SIZE_FIELD.match(fieldName, parser.getDeprecationHandler())) { windowSize = parser.intValue(); } else { throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 8dcd2fc766f4e..1d488a58857df 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -475,10 +475,10 @@ public static GeoDistanceSortBuilder fromXContent(XContentParser parser, String fieldName = currentName; } else if (token == XContentParser.Token.START_OBJECT) { - if (NESTED_FILTER_FIELD.match(currentName)) { + if (NESTED_FILTER_FIELD.match(currentName, parser.getDeprecationHandler())) { DEPRECATION_LOGGER.deprecated("[nested_filter] has been deprecated in favour of the [nested] parameter"); nestedFilter = parseInnerQueryBuilder(parser); - } else if (NESTED_FIELD.match(currentName)) { + } else if (NESTED_FIELD.match(currentName, parser.getDeprecationHandler())) { nestedSort = NestedSortBuilder.fromXContent(parser); } else { // the json in the format of -> field : { lat : 30, lon : 12 } @@ -495,17 +495,17 @@ public static GeoDistanceSortBuilder fromXContent(XContentParser parser, String geoPoints.add(point); } } else if (token.isValue()) { - if (ORDER_FIELD.match(currentName)) { + if (ORDER_FIELD.match(currentName, parser.getDeprecationHandler())) { order = SortOrder.fromString(parser.text()); - } else if (UNIT_FIELD.match(currentName)) { + } else if (UNIT_FIELD.match(currentName, parser.getDeprecationHandler())) { unit = DistanceUnit.fromString(parser.text()); - } else if (DISTANCE_TYPE_FIELD.match(currentName)) { + } else if (DISTANCE_TYPE_FIELD.match(currentName, parser.getDeprecationHandler())) { geoDistance = GeoDistance.fromString(parser.text()); - } else if (VALIDATION_METHOD_FIELD.match(currentName)) { + } else if (VALIDATION_METHOD_FIELD.match(currentName, parser.getDeprecationHandler())) { validation = GeoValidationMethod.fromString(parser.text()); - } else if (SORTMODE_FIELD.match(currentName)) { + } else if (SORTMODE_FIELD.match(currentName, parser.getDeprecationHandler())) { sortMode = SortMode.fromString(parser.text()); - } else if (NESTED_PATH_FIELD.match(currentName)) { + } else if (NESTED_PATH_FIELD.match(currentName, parser.getDeprecationHandler())) { DEPRECATION_LOGGER.deprecated("[nested_path] has been deprecated in favour of the [nested] parameter"); nestedPath = parser.text(); } else if (token == Token.VALUE_STRING){ diff --git a/server/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java index 10d155eea02d1..95e6cc81b6ca1 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java @@ -151,7 +151,7 @@ public static SuggestBuilder fromXContent(XContentParser parser) throws IOExcept if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token.isValue()) { - if (GLOBAL_TEXT_FIELD.match(fieldName)) { + if (GLOBAL_TEXT_FIELD.match(fieldName, parser.getDeprecationHandler())) { suggestBuilder.setGlobalText(parser.text()); } else { throw new IllegalArgumentException("[suggest] does not support [" + fieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java index 62d93db405a2a..dcdc669539f53 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java @@ -265,11 +265,11 @@ static SuggestionBuilder fromXContent(XContentParser parser) throws IOExcepti if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (TEXT_FIELD.match(currentFieldName)) { + if (TEXT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { suggestText = parser.text(); - } else if (PREFIX_FIELD.match(currentFieldName)) { + } else if (PREFIX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { prefix = parser.text(); - } else if (REGEX_FIELD.match(currentFieldName)) { + } else if (REGEX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { regex = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "suggestion does not support [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java index 222f69019164a..c4a6f15e03ced 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java @@ -110,7 +110,7 @@ public static SmoothingModel fromXContent(XContentParser parser) throws IOExcept if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } - if (token.isValue() && ALPHA_FIELD.match(fieldName)) { + if (token.isValue() && ALPHA_FIELD.match(fieldName, parser.getDeprecationHandler())) { alpha = parser.doubleValue(); } } @@ -122,4 +122,4 @@ public WordScorerFactory buildWordScorerFactory() { return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) -> new LaplaceScorer(reader, terms, field, realWordLikelyhood, separator, alpha); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java index 10a170d766dea..e609be1d77c18 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java @@ -139,17 +139,17 @@ public static LinearInterpolation fromXContent(XContentParser parser) throws IOE if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token.isValue()) { - if (TRIGRAM_FIELD.match(fieldName)) { + if (TRIGRAM_FIELD.match(fieldName, parser.getDeprecationHandler())) { trigramLambda = parser.doubleValue(); if (trigramLambda < 0) { throw new IllegalArgumentException("trigram_lambda must be positive"); } - } else if (BIGRAM_FIELD.match(fieldName)) { + } else if (BIGRAM_FIELD.match(fieldName, parser.getDeprecationHandler())) { bigramLambda = parser.doubleValue(); if (bigramLambda < 0) { throw new IllegalArgumentException("bigram_lambda must be positive"); } - } else if (UNIGRAM_FIELD.match(fieldName)) { + } else if (UNIGRAM_FIELD.match(fieldName, parser.getDeprecationHandler())) { unigramLambda = parser.doubleValue(); if (unigramLambda < 0) { throw new IllegalArgumentException("unigram_lambda must be positive"); @@ -172,4 +172,4 @@ public WordScorerFactory buildWordScorerFactory() { new LinearInterpolatingScorer(reader, terms, field, realWordLikelyhood, separator, trigramLambda, bigramLambda, unigramLambda); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index ff06dfe81a9aa..670dac75ab7a0 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -30,6 +30,7 @@ import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -115,8 +116,8 @@ public Suggestion> innerExecute(String name, P vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString()); QueryShardContext shardContext = suggestion.getShardContext(); final String querySource = scriptFactory.newInstance(vars).execute(); - try (XContentParser parser = XContentFactory.xContent(querySource).createParser(shardContext.getXContentRegistry(), - querySource)) { + try (XContentParser parser = XContentFactory.xContent(querySource) + .createParser(shardContext.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, querySource)) { QueryBuilder innerQueryBuilder = AbstractQueryBuilder.parseInnerQueryBuilder(parser); final ParsedQuery parsedQuery = shardContext.toQuery(innerQueryBuilder); collateMatch = Lucene.exists(searcher, parsedQuery.query()); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 77f8fb55aa74e..4f20fad38cf0e 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -500,34 +500,34 @@ public static PhraseSuggestionBuilder fromXContent(XContentParser parser) throws if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (SuggestionBuilder.ANALYZER_FIELD.match(currentFieldName)) { + if (SuggestionBuilder.ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.analyzer(parser.text()); - } else if (SuggestionBuilder.FIELDNAME_FIELD.match(currentFieldName)) { + } else if (SuggestionBuilder.FIELDNAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fieldname = parser.text(); - } else if (SuggestionBuilder.SIZE_FIELD.match(currentFieldName)) { + } else if (SuggestionBuilder.SIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.size(parser.intValue()); - } else if (SuggestionBuilder.SHARDSIZE_FIELD.match(currentFieldName)) { + } else if (SuggestionBuilder.SHARDSIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.shardSize(parser.intValue()); - } else if (PhraseSuggestionBuilder.RWE_LIKELIHOOD_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.RWE_LIKELIHOOD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.realWordErrorLikelihood(parser.floatValue()); - } else if (PhraseSuggestionBuilder.CONFIDENCE_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.CONFIDENCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.confidence(parser.floatValue()); - } else if (PhraseSuggestionBuilder.SEPARATOR_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.SEPARATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.separator(parser.text()); - } else if (PhraseSuggestionBuilder.MAXERRORS_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.MAXERRORS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.maxErrors(parser.floatValue()); - } else if (PhraseSuggestionBuilder.GRAMSIZE_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.GRAMSIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.gramSize(parser.intValue()); - } else if (PhraseSuggestionBuilder.FORCE_UNIGRAM_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.FORCE_UNIGRAM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.forceUnigrams(parser.booleanValue()); - } else if (PhraseSuggestionBuilder.TOKEN_LIMIT_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.TOKEN_LIMIT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.tokenLimit(parser.intValue()); } else { throw new ParsingException(parser.getTokenLocation(), "suggester[phrase] doesn't support field [" + currentFieldName + "]"); } } else if (token == Token.START_ARRAY) { - if (DirectCandidateGeneratorBuilder.DIRECT_GENERATOR_FIELD.match(currentFieldName)) { + if (DirectCandidateGeneratorBuilder.DIRECT_GENERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { // for now we only have a single type of generators while ((token = parser.nextToken()) == Token.START_OBJECT) { tmpSuggestion.addCandidateGenerator(DirectCandidateGeneratorBuilder.PARSER.apply(parser, null)); @@ -537,19 +537,19 @@ public static PhraseSuggestionBuilder fromXContent(XContentParser parser) throws "suggester[phrase] doesn't support array field [" + currentFieldName + "]"); } } else if (token == Token.START_OBJECT) { - if (PhraseSuggestionBuilder.SMOOTHING_MODEL_FIELD.match(currentFieldName)) { + if (PhraseSuggestionBuilder.SMOOTHING_MODEL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { ensureNoSmoothing(tmpSuggestion); tmpSuggestion.smoothingModel(SmoothingModel.fromXContent(parser)); - } else if (PhraseSuggestionBuilder.HIGHLIGHT_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.HIGHLIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String preTag = null; String postTag = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (PhraseSuggestionBuilder.PRE_TAG_FIELD.match(currentFieldName)) { + if (PhraseSuggestionBuilder.PRE_TAG_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { preTag = parser.text(); - } else if (PhraseSuggestionBuilder.POST_TAG_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.POST_TAG_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { postTag = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -558,11 +558,11 @@ public static PhraseSuggestionBuilder fromXContent(XContentParser parser) throws } } tmpSuggestion.highlight(preTag, postTag); - } else if (PhraseSuggestionBuilder.COLLATE_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.COLLATE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - } else if (PhraseSuggestionBuilder.COLLATE_QUERY_FIELD.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.COLLATE_QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (tmpSuggestion.collateQuery() != null) { throw new ParsingException(parser.getTokenLocation(), "suggester[phrase][collate] query already set, doesn't support additional [" @@ -570,9 +570,9 @@ public static PhraseSuggestionBuilder fromXContent(XContentParser parser) throws } Script template = Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG); tmpSuggestion.collateQuery(template); - } else if (PhraseSuggestionBuilder.COLLATE_QUERY_PARAMS.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.COLLATE_QUERY_PARAMS.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.collateParams(parser.map()); - } else if (PhraseSuggestionBuilder.COLLATE_QUERY_PRUNE.match(currentFieldName)) { + } else if (PhraseSuggestionBuilder.COLLATE_QUERY_PRUNE.match(currentFieldName, parser.getDeprecationHandler())) { if (parser.isBooleanValue()) { tmpSuggestion.collatePrune(parser.booleanValue()); } else { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java index f0fbc81ecd7f0..9c81f86058024 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java @@ -72,11 +72,11 @@ public static SmoothingModel fromXContent(XContentParser parser) throws IOExcept if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (LinearInterpolation.PARSE_FIELD.match(fieldName)) { + if (LinearInterpolation.PARSE_FIELD.match(fieldName, parser.getDeprecationHandler())) { model = LinearInterpolation.fromXContent(parser); - } else if (Laplace.PARSE_FIELD.match(fieldName)) { + } else if (Laplace.PARSE_FIELD.match(fieldName, parser.getDeprecationHandler())) { model = Laplace.fromXContent(parser); - } else if (StupidBackoff.PARSE_FIELD.match(fieldName)) { + } else if (StupidBackoff.PARSE_FIELD.match(fieldName, parser.getDeprecationHandler())) { model = StupidBackoff.fromXContent(parser); } else { throw new IllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]"); @@ -97,4 +97,4 @@ public static SmoothingModel fromXContent(XContentParser parser) throws IOExcept protected abstract boolean doEquals(SmoothingModel other); protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException; -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java index ebdd1c9ec2892..c7edde8bbaf76 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java @@ -113,7 +113,7 @@ public static SmoothingModel fromXContent(XContentParser parser) throws IOExcept if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } - if (token.isValue() && DISCOUNT_FIELD.match(fieldName)) { + if (token.isValue() && DISCOUNT_FIELD.match(fieldName, parser.getDeprecationHandler())) { discount = parser.doubleValue(); } } @@ -125,4 +125,4 @@ public WordScorerFactory buildWordScorerFactory() { return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) -> new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, discount); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index b0d9891087f08..5d004dc824777 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -396,33 +396,33 @@ public static TermSuggestionBuilder fromXContent(XContentParser parser) throws I if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (SuggestionBuilder.ANALYZER_FIELD.match(currentFieldName)) { + if (SuggestionBuilder.ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.analyzer(parser.text()); - } else if (SuggestionBuilder.FIELDNAME_FIELD.match(currentFieldName)) { + } else if (SuggestionBuilder.FIELDNAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fieldname = parser.text(); - } else if (SuggestionBuilder.SIZE_FIELD.match(currentFieldName)) { + } else if (SuggestionBuilder.SIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.size(parser.intValue()); - } else if (SuggestionBuilder.SHARDSIZE_FIELD.match(currentFieldName)) { + } else if (SuggestionBuilder.SHARDSIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.shardSize(parser.intValue()); - } else if (SUGGESTMODE_FIELD.match(currentFieldName)) { + } else if (SUGGESTMODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.suggestMode(SuggestMode.resolve(parser.text())); - } else if (ACCURACY_FIELD.match(currentFieldName)) { + } else if (ACCURACY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.accuracy(parser.floatValue()); - } else if (SORT_FIELD.match(currentFieldName)) { + } else if (SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.sort(SortBy.resolve(parser.text())); - } else if (STRING_DISTANCE_FIELD.match(currentFieldName)) { + } else if (STRING_DISTANCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.stringDistance(StringDistanceImpl.resolve(parser.text())); - } else if (MAX_EDITS_FIELD.match(currentFieldName)) { + } else if (MAX_EDITS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.maxEdits(parser.intValue()); - } else if (MAX_INSPECTIONS_FIELD.match(currentFieldName)) { + } else if (MAX_INSPECTIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.maxInspections(parser.intValue()); - } else if (MAX_TERM_FREQ_FIELD.match(currentFieldName)) { + } else if (MAX_TERM_FREQ_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.maxTermFreq(parser.floatValue()); - } else if (PREFIX_LENGTH_FIELD.match(currentFieldName)) { + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.prefixLength(parser.intValue()); - } else if (MIN_WORD_LENGTH_FIELD.match(currentFieldName)) { + } else if (MIN_WORD_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.minWordLength(parser.intValue()); - } else if (MIN_DOC_FREQ_FIELD.match(currentFieldName)) { + } else if (MIN_DOC_FREQ_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { tmpSuggestion.minDocFreq(parser.floatValue()); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 7e2a7aab27743..df955c2e3b63d 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -390,7 +390,7 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina final Repository repository = snapshotsService.getRepositoriesService().repository(snapshot.getRepository()); try { // we flush first to make sure we get the latest writes snapshotted - try (Engine.IndexCommitRef snapshotRef = indexShard.acquireIndexCommit(false, true)) { + try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java new file mode 100644 index 0000000000000..011f5b380ecbd --- /dev/null +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +import org.apache.commons.codec.DecoderException; +import org.elasticsearch.test.ESTestCase; + +import java.util.Optional; + +import static org.elasticsearch.ExceptionsHelper.MAX_ITERATIONS; +import static org.elasticsearch.ExceptionsHelper.maybeError; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ExceptionsHelperTests extends ESTestCase { + + public void testMaybeError() { + final Error outOfMemoryError = new OutOfMemoryError(); + assertError(outOfMemoryError, outOfMemoryError); + + final DecoderException decoderException = new DecoderException(outOfMemoryError); + assertError(decoderException, outOfMemoryError); + + final Exception e = new Exception(); + e.addSuppressed(decoderException); + assertError(e, outOfMemoryError); + + final int depth = randomIntBetween(1, 16); + Throwable cause = new Exception(); + boolean fatal = false; + Error error = null; + for (int i = 0; i < depth; i++) { + final int length = randomIntBetween(1, 4); + for (int j = 0; j < length; j++) { + if (!fatal && rarely()) { + error = new Error(); + cause.addSuppressed(error); + fatal = true; + } else { + cause.addSuppressed(new Exception()); + } + } + if (!fatal && rarely()) { + cause = error = new Error(cause); + fatal = true; + } else { + cause = new Exception(cause); + } + } + if (fatal) { + assertError(cause, error); + } else { + assertFalse(maybeError(cause, logger).isPresent()); + } + + assertFalse(maybeError(new Exception(new DecoderException()), logger).isPresent()); + + Throwable chain = outOfMemoryError; + for (int i = 0; i < MAX_ITERATIONS; i++) { + chain = new Exception(chain); + } + assertFalse(maybeError(chain, logger).isPresent()); + } + + private void assertError(final Throwable cause, final Error error) { + final Optional maybeError = maybeError(cause, logger); + assertTrue(maybeError.isPresent()); + assertThat(maybeError.get(), equalTo(error)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java new file mode 100644 index 0000000000000..79b4fa6dedc1e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -0,0 +1,202 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.reroute; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; +import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; +import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class ClusterRerouteResponseTests extends ESTestCase { + + public void testToXContent() throws IOException { + DiscoveryNode node0 = new DiscoveryNode("node0", new TransportAddress(TransportAddress.META_ADDRESS, 9000), Version.CURRENT); + DiscoveryNodes nodes = new DiscoveryNodes.Builder().add(node0).masterNodeId(node0.getId()).build(); + IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(Settings.builder() + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), true) + .put(IndexSettings.MAX_SCRIPT_FIELDS_SETTING.getKey(), 10) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()).build(); + ImmutableOpenMap.Builder openMapBuilder = ImmutableOpenMap.builder(); + openMapBuilder.put("index", indexMetaData); + MetaData metaData = MetaData.builder().indices(openMapBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(nodes).metaData(metaData).build(); + + RoutingExplanations routingExplanations = new RoutingExplanations(); + routingExplanations.add(new RerouteExplanation(new AllocateReplicaAllocationCommand("index", 0, "node0"), Decision.YES)); + ClusterRerouteResponse clusterRerouteResponse = new ClusterRerouteResponse(true, clusterState, routingExplanations); + { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + clusterRerouteResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals("{\n" + + " \"acknowledged\" : true,\n" + + " \"state\" : {\n" + + " \"version\" : 0,\n" + + " \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" + + " \"master_node\" : \"node0\",\n" + + " \"blocks\" : { },\n" + + " \"nodes\" : {\n" + + " \"node0\" : {\n" + + " \"name\" : \"\",\n" + + " \"ephemeral_id\" : \"" + node0.getEphemeralId() + "\",\n" + + " \"transport_address\" : \"0.0.0.0:9000\",\n" + + " \"attributes\" : { }\n" + + " }\n" + + " },\n" + + " \"metadata\" : {\n" + + " \"cluster_uuid\" : \"_na_\",\n" + + " \"templates\" : { },\n" + + " \"indices\" : {\n" + + " \"index\" : {\n" + + " \"state\" : \"open\",\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"shard\" : {\n" + + " \"check_on_startup\" : \"true\"\n" + + " },\n" + + " \"number_of_shards\" : \"1\",\n" + + " \"number_of_replicas\" : \"0\",\n" + + " \"version\" : {\n" + + " \"created\" : \"" + Version.CURRENT.id + "\"\n" + + " },\n" + + " \"max_script_fields\" : \"10\"\n" + + " }\n" + + " },\n" + + " \"mappings\" : { },\n" + + " \"aliases\" : [ ],\n" + + " \"primary_terms\" : {\n" + + " \"0\" : 0\n" + + " },\n" + + " \"in_sync_allocations\" : {\n" + + " \"0\" : [ ]\n" + + " }\n" + + " }\n" + + " },\n" + + " \"index-graveyard\" : {\n" + + " \"tombstones\" : [ ]\n" + + " }\n" + + " },\n" + + " \"routing_table\" : {\n" + + " \"indices\" : { }\n" + + " },\n" + + " \"routing_nodes\" : {\n" + + " \"unassigned\" : [ ],\n" + + " \"nodes\" : {\n" + + " \"node0\" : [ ]\n" + + " }\n" + + " }\n" + + " }\n" + + "}", builder.string()); + + } + { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + Map params = new HashMap<>(); + params.put("explain", "true"); + params.put("metric", "version,master_node"); + clusterRerouteResponse.toXContent(builder, new ToXContent.MapParams(params)); + assertEquals("{\n" + + " \"acknowledged\" : true,\n" + + " \"state\" : {\n" + + " \"version\" : 0,\n" + + " \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" + + " \"master_node\" : \"node0\"\n" + + " },\n" + + " \"explanations\" : [\n" + + " {\n" + + " \"command\" : \"allocate_replica\",\n" + + " \"parameters\" : {\n" + + " \"index\" : \"index\",\n" + + " \"shard\" : 0,\n" + + " \"node\" : \"node0\"\n" + + " },\n" + + " \"decisions\" : [\n" + + " {\n" + + " \"decider\" : null,\n" + + " \"decision\" : \"YES\",\n" + + " \"explanation\" : \"none\"\n" + + " }\n" + + " ]\n" + + " }\n" + + " ]\n" + + "}", builder.string()); + } + { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + Map params = new HashMap<>(); + params.put("metric", "metadata"); + params.put("settings_filter", "index.number*,index.version.created"); + clusterRerouteResponse.toXContent(builder, new ToXContent.MapParams(params)); + assertEquals("{\n" + + " \"acknowledged\" : true,\n" + + " \"state\" : {\n" + + " \"metadata\" : {\n" + + " \"cluster_uuid\" : \"_na_\",\n" + + " \"templates\" : { },\n" + + " \"indices\" : {\n" + + " \"index\" : {\n" + + " \"state\" : \"open\",\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"max_script_fields\" : \"10\",\n" + + " \"shard\" : {\n" + + " \"check_on_startup\" : \"true\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"mappings\" : { },\n" + + " \"aliases\" : [ ],\n" + + " \"primary_terms\" : {\n" + + " \"0\" : 0\n" + + " },\n" + + " \"in_sync_allocations\" : {\n" + + " \"0\" : [ ]\n" + + " }\n" + + " }\n" + + " },\n" + + " \"index-graveyard\" : {\n" + + " \"tombstones\" : [ ]\n" + + " }\n" + + " }\n" + + " }\n" + + "}", builder.string()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java new file mode 100644 index 0000000000000..fdca03ebcda4e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.XContentTestUtils; + +import java.io.IOException; +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class ClusterUpdateSettingsRequestTests extends ESTestCase { + + public void testFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { + final ClusterUpdateSettingsRequest request = createTestItem(); + boolean humanReadable = randomBoolean(); + final XContentType xContentType = XContentType.JSON; + BytesReference originalBytes = toShuffledXContent(request, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + + if (addRandomFields) { + String unsupportedField = "unsupported_field"; + BytesReference mutated = XContentTestUtils.insertIntoXContent(xContentType.xContent(), originalBytes, + Collections.singletonList(""), () -> unsupportedField, () -> randomAlphaOfLengthBetween(3, 10)).bytes(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> ClusterUpdateSettingsRequest.fromXContent(createParser(xContentType.xContent(), mutated))); + assertThat(iae.getMessage(), + equalTo("[cluster_update_settings_request] unknown field [" + unsupportedField + "], parser not found")); + } else { + XContentParser parser = createParser(xContentType.xContent(), originalBytes); + ClusterUpdateSettingsRequest parsedRequest = ClusterUpdateSettingsRequest.fromXContent(parser); + + assertNull(parser.nextToken()); + assertThat(parsedRequest.transientSettings(), equalTo(request.transientSettings())); + assertThat(parsedRequest.persistentSettings(), equalTo(request.persistentSettings())); + } + } + + private static ClusterUpdateSettingsRequest createTestItem() { + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + request.persistentSettings(ClusterUpdateSettingsResponseTests.randomClusterSettings(0, 2)); + request.transientSettings(ClusterUpdateSettingsResponseTests.randomClusterSettings(0, 2)); + return request; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java new file mode 100644 index 0000000000000..efbf33c9683b0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.List; +import java.util.Set; +import java.util.function.Predicate; + +public class ClusterUpdateSettingsResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected ClusterUpdateSettingsResponse doParseInstance(XContentParser parser) { + return ClusterUpdateSettingsResponse.fromXContent(parser); + } + + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> { + int i = randomIntBetween(0, 2); + switch(i) { + case 0: + return new ClusterUpdateSettingsResponse(response.isAcknowledged() == false, + response.transientSettings, response.persistentSettings); + case 1: + return new ClusterUpdateSettingsResponse(response.isAcknowledged(), mutateSettings(response.transientSettings), + response.persistentSettings); + case 2: + return new ClusterUpdateSettingsResponse(response.isAcknowledged(), response.transientSettings, + mutateSettings(response.persistentSettings)); + default: + throw new UnsupportedOperationException(); + } + }; + } + + private static Settings mutateSettings(Settings settings) { + if (settings.isEmpty()) { + return randomClusterSettings(1, 3); + } + Set allKeys = settings.keySet(); + List keysToBeModified = randomSubsetOf(randomIntBetween(1, allKeys.size()), allKeys); + Builder builder = Settings.builder(); + for (String key : allKeys) { + String value = settings.get(key); + if (keysToBeModified.contains(key)) { + value += randomAlphaOfLengthBetween(2, 5); + } + builder.put(key, value); + } + return builder.build(); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return p -> p.startsWith("transient") || p.startsWith("persistent"); + } + + public static Settings randomClusterSettings(int min, int max) { + int num = randomIntBetween(min, max); + Builder builder = Settings.builder(); + for (int i = 0; i < num; i++) { + Setting setting = randomFrom(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + builder.put(setting.getKey(), randomAlphaOfLengthBetween(2, 10)); + } + return builder.build(); + } + + @Override + protected ClusterUpdateSettingsResponse createTestInstance() { + return new ClusterUpdateSettingsResponse(randomBoolean(), randomClusterSettings(0, 2), randomClusterSettings(0, 2)); + } + + @Override + protected ClusterUpdateSettingsResponse createBlankInstance() { + return new ClusterUpdateSettingsResponse(); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponseTests.java new file mode 100644 index 0000000000000..b4f48805154c6 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponseTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.alias; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class IndicesAliasesResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected IndicesAliasesResponse doParseInstance(XContentParser parser) { + return IndicesAliasesResponse.fromXContent(parser); + } + + @Override + protected IndicesAliasesResponse createTestInstance() { + return new IndicesAliasesResponse(randomBoolean()); + } + + @Override + protected IndicesAliasesResponse createBlankInstance() { + return new IndicesAliasesResponse(); + } + + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> new IndicesAliasesResponse(response.isAcknowledged() == false); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index e616e0383118d..6701270d11a99 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -19,42 +19,29 @@ package org.elasticsearch.action.admin.indices.close; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; -import java.io.IOException; +public class CloseIndexResponseTests extends AbstractStreamableXContentTestCase { -import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; -import static org.hamcrest.CoreMatchers.equalTo; - -public class CloseIndexResponseTests extends ESTestCase { + @Override + protected CloseIndexResponse doParseInstance(XContentParser parser) { + return CloseIndexResponse.fromXContent(parser); + } - public void testFromToXContent() throws IOException { - final CloseIndexResponse closeIndexResponse = createTestItem(); + @Override + protected CloseIndexResponse createTestInstance() { + return new CloseIndexResponse(randomBoolean()); + } - boolean humanReadable = randomBoolean(); - final XContentType xContentType = randomFrom(XContentType.values()); - BytesReference originalBytes = toShuffledXContent(closeIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - BytesReference mutated; - if (randomBoolean()) { - mutated = insertRandomFields(xContentType, originalBytes, null, random()); - } else { - mutated = originalBytes; - } - - CloseIndexResponse parsedCloseIndexResponse; - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedCloseIndexResponse = CloseIndexResponse.fromXContent(parser); - assertNull(parser.nextToken()); - } - assertThat(parsedCloseIndexResponse.isAcknowledged(), equalTo(closeIndexResponse.isAcknowledged())); + @Override + protected CloseIndexResponse createBlankInstance() { + return new CloseIndexResponse(); } - - private static CloseIndexResponse createTestItem() { - boolean acknowledged = randomBoolean(); - return new CloseIndexResponse(acknowledged); + + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> new CloseIndexResponse(response.isAcknowledged() == false); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java index acb7161fb5a1f..034570bbc5c11 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -26,11 +26,14 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; import java.util.Map; @@ -119,17 +122,22 @@ public void testToAndFromXContent() throws IOException { assertMappingsEqual(createIndexRequest.mappings(), parsedCreateIndexRequest.mappings()); assertAliasesEqual(createIndexRequest.aliases(), parsedCreateIndexRequest.aliases()); assertEquals(createIndexRequest.settings(), parsedCreateIndexRequest.settings()); + + BytesReference finalBytes = toShuffledXContent(parsedCreateIndexRequest, xContentType, EMPTY_PARAMS, humanReadable); + ElasticsearchAssertions.assertToXContentEquivalent(originalBytes, finalBytes, xContentType); } - private void assertMappingsEqual(Map expected, Map actual) throws IOException { + public static void assertMappingsEqual(Map expected, Map actual) throws IOException { assertEquals(expected.keySet(), actual.keySet()); for (Map.Entry expectedEntry : expected.entrySet()) { String expectedValue = expectedEntry.getValue(); String actualValue = actual.get(expectedEntry.getKey()); - XContentParser expectedJson = createParser(XContentType.JSON.xContent(), expectedValue); - XContentParser actualJson = createParser(XContentType.JSON.xContent(), actualValue); - assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); + XContentParser expectedJson = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, expectedValue); + XContentParser actualJson = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, actualValue); + assertEquals(expectedJson.map(), actualJson.map()); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java index 69238599194d3..21bf3a13a9624 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -21,34 +21,53 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; -import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +public class CreateIndexResponseTests extends AbstractStreamableXContentTestCase { -public class CreateIndexResponseTests extends ESTestCase { - - public void testSerialization() throws IOException { - CreateIndexResponse response = new CreateIndexResponse(true, true, "foo"); + @Override + protected CreateIndexResponse createTestInstance() { + boolean acknowledged = randomBoolean(); + boolean shardsAcknowledged = acknowledged && randomBoolean(); + String index = randomAlphaOfLength(5); + return new CreateIndexResponse(acknowledged, shardsAcknowledged, index); + } - try (BytesStreamOutput output = new BytesStreamOutput()) { - response.writeTo(output); + @Override + protected CreateIndexResponse createBlankInstance() { + return new CreateIndexResponse(); + } - try (StreamInput in = output.bytes().streamInput()) { - CreateIndexResponse serialized = new CreateIndexResponse(); - serialized.readFrom(in); - assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); - assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); - assertEquals(response.index(), serialized.index()); + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> { + if (randomBoolean()) { + if (randomBoolean()) { + boolean acknowledged = response.isAcknowledged() == false; + boolean shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); + return new CreateIndexResponse(acknowledged, shardsAcknowledged, response.index()); + } else { + boolean shardsAcknowledged = response.isShardsAcknowledged() == false; + boolean acknowledged = shardsAcknowledged || response.isAcknowledged(); + return new CreateIndexResponse(acknowledged, shardsAcknowledged, response.index()); + } + } else { + return new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcknowledged(), + response.index() + randomAlphaOfLengthBetween(2, 5)); } - } + }; + } + + @Override + protected CreateIndexResponse doParseInstance(XContentParser parser) { + return CreateIndexResponse.fromXContent(parser); } public void testSerializationWithOldVersion() throws IOException { @@ -76,52 +95,15 @@ public void testToXContent() { assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":false,\"index\":\"index_name\"}", output); } - public void testToAndFromXContent() throws IOException { - doFromXContentTestWithRandomFields(false); - } - - /** - * This test adds random fields and objects to the xContent rendered out to - * ensure we can parse it back to be forward compatible with additions to - * the xContent - */ - public void testFromXContentWithRandomFields() throws IOException { - doFromXContentTestWithRandomFields(true); - } - - private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - - final CreateIndexResponse createIndexResponse = createTestItem(); - - boolean humanReadable = randomBoolean(); - final XContentType xContentType = randomFrom(XContentType.values()); - BytesReference originalBytes = toShuffledXContent(createIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - - BytesReference mutated; - if (addRandomFields) { - mutated = insertRandomFields(xContentType, originalBytes, null, random()); - } else { - mutated = originalBytes; - } - CreateIndexResponse parsedCreateIndexResponse; - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedCreateIndexResponse = CreateIndexResponse.fromXContent(parser); - assertNull(parser.nextToken()); + public void testToAndFromXContentIndexNull() throws IOException { + CreateIndexResponse response = new CreateIndexResponse(true, false, null); + String output = Strings.toString(response); + assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":false,\"index\":null}", output); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, output)) { + CreateIndexResponse parsedResponse = CreateIndexResponse.fromXContent(parser); + assertNull(parsedResponse.index()); + assertTrue(parsedResponse.isAcknowledged()); + assertFalse(parsedResponse.isShardsAcknowledged()); } - - assertEquals(createIndexResponse.index(), parsedCreateIndexResponse.index()); - assertEquals(createIndexResponse.isShardsAcknowledged(), parsedCreateIndexResponse.isShardsAcknowledged()); - assertEquals(createIndexResponse.isAcknowledged(), parsedCreateIndexResponse.isAcknowledged()); - } - - /** - * Returns a random {@link CreateIndexResponse}. - */ - private static CreateIndexResponse createTestItem() { - boolean acknowledged = randomBoolean(); - boolean shardsAcknowledged = acknowledged && randomBoolean(); - String index = randomAlphaOfLength(5); - - return new CreateIndexResponse(acknowledged, shardsAcknowledged, index); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponseTests.java index 4e036319ad95e..9325dbdfa2bd8 100755 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponseTests.java @@ -20,17 +20,11 @@ package org.elasticsearch.action.admin.indices.delete; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; -import java.io.IOException; - -import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; - -public class DeleteIndexResponseTests extends ESTestCase { +public class DeleteIndexResponseTests extends AbstractStreamableXContentTestCase { public void testToXContent() { DeleteIndexResponse response = new DeleteIndexResponse(true); @@ -38,48 +32,23 @@ public void testToXContent() { assertEquals("{\"acknowledged\":true}", output); } - public void testToAndFromXContent() throws IOException { - doFromXContentTestWithRandomFields(false); + @Override + protected DeleteIndexResponse doParseInstance(XContentParser parser) { + return DeleteIndexResponse.fromXContent(parser); } - /** - * This test adds random fields and objects to the xContent rendered out to - * ensure we can parse it back to be forward compatible with additions to - * the xContent - */ - public void testFromXContentWithRandomFields() throws IOException { - doFromXContentTestWithRandomFields(true); + @Override + protected DeleteIndexResponse createTestInstance() { + return new DeleteIndexResponse(randomBoolean()); } - private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - - final DeleteIndexResponse deleteIndexResponse = createTestItem(); - - boolean humanReadable = randomBoolean(); - final XContentType xContentType = randomFrom(XContentType.values()); - BytesReference originalBytes = toShuffledXContent(deleteIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - - BytesReference mutated; - if (addRandomFields) { - mutated = insertRandomFields(xContentType, originalBytes, null, random()); - } else { - mutated = originalBytes; - } - DeleteIndexResponse parsedDeleteIndexResponse; - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedDeleteIndexResponse = DeleteIndexResponse.fromXContent(parser); - assertNull(parser.nextToken()); - } - - assertEquals(deleteIndexResponse.isAcknowledged(), parsedDeleteIndexResponse.isAcknowledged()); + @Override + protected DeleteIndexResponse createBlankInstance() { + return new DeleteIndexResponse(); } - /** - * Returns a random {@link DeleteIndexResponse}. - */ - private static DeleteIndexResponse createTestItem() throws IOException { - boolean acknowledged = randomBoolean(); - - return new DeleteIndexResponse(acknowledged); + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> new DeleteIndexResponse(response.isAcknowledged() == false); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java index a52969c628106..7d42d707605ac 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java @@ -20,17 +20,11 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; -import java.io.IOException; - -import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; - -public class PutMappingResponseTests extends ESTestCase { +public class PutMappingResponseTests extends AbstractStreamableXContentTestCase { public void testToXContent() { PutMappingResponse response = new PutMappingResponse(true); @@ -38,48 +32,23 @@ public void testToXContent() { assertEquals("{\"acknowledged\":true}", output); } - public void testToAndFromXContent() throws IOException { - doFromXContentTestWithRandomFields(false); + @Override + protected PutMappingResponse doParseInstance(XContentParser parser) { + return PutMappingResponse.fromXContent(parser); } - /** - * This test adds random fields and objects to the xContent rendered out to - * ensure we can parse it back to be forward compatible with additions to - * the xContent - */ - public void testFromXContentWithRandomFields() throws IOException { - doFromXContentTestWithRandomFields(true); + @Override + protected PutMappingResponse createTestInstance() { + return new PutMappingResponse(randomBoolean()); } - private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - - final PutMappingResponse putMappingResponse = createTestItem(); - - boolean humanReadable = randomBoolean(); - final XContentType xContentType = randomFrom(XContentType.values()); - BytesReference originalBytes = toShuffledXContent(putMappingResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - - BytesReference mutated; - if (addRandomFields) { - mutated = insertRandomFields(xContentType, originalBytes, null, random()); - } else { - mutated = originalBytes; - } - PutMappingResponse parsedPutMappingResponse; - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedPutMappingResponse = PutMappingResponse.fromXContent(parser); - assertNull(parser.nextToken()); - } - - assertEquals(putMappingResponse.isAcknowledged(), parsedPutMappingResponse.isAcknowledged()); + @Override + protected PutMappingResponse createBlankInstance() { + return new PutMappingResponse(); } - /** - * Returns a random {@link PutMappingResponse}. - */ - private static PutMappingResponse createTestItem() throws IOException { - boolean acknowledged = randomBoolean(); - - return new PutMappingResponse(acknowledged); + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> new PutMappingResponse(response.isAcknowledged() == false); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java index df49de0c1eeb0..5eca6254edf4f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java @@ -19,45 +19,41 @@ package org.elasticsearch.action.admin.indices.open; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; -import java.io.IOException; +public class OpenIndexResponseTests extends AbstractStreamableXContentTestCase { -import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; -import static org.hamcrest.CoreMatchers.equalTo; - -public class OpenIndexResponseTests extends ESTestCase { - - public void testFromToXContent() throws IOException { - final OpenIndexResponse openIndexResponse = createTestItem(); - - boolean humanReadable = randomBoolean(); - final XContentType xContentType = randomFrom(XContentType.values()); - BytesReference originalBytes = toShuffledXContent(openIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - BytesReference mutated; - if (randomBoolean()) { - mutated = insertRandomFields(xContentType, originalBytes, null, random()); - } else { - mutated = originalBytes; - } - - OpenIndexResponse parsedOpenIndexResponse; - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedOpenIndexResponse = OpenIndexResponse.fromXContent(parser); - assertNull(parser.nextToken()); - } - - assertThat(parsedOpenIndexResponse.isShardsAcknowledged(), equalTo(openIndexResponse.isShardsAcknowledged())); - assertThat(parsedOpenIndexResponse.isAcknowledged(), equalTo(openIndexResponse.isAcknowledged())); + @Override + protected OpenIndexResponse doParseInstance(XContentParser parser){ + return OpenIndexResponse.fromXContent(parser); } - - private static OpenIndexResponse createTestItem() { + + @Override + protected OpenIndexResponse createTestInstance() { boolean acknowledged = randomBoolean(); boolean shardsAcknowledged = acknowledged && randomBoolean(); return new OpenIndexResponse(acknowledged, shardsAcknowledged); } + + @Override + protected OpenIndexResponse createBlankInstance() { + return new OpenIndexResponse(); + } + + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> { + if (randomBoolean()) { + boolean acknowledged = response.isAcknowledged() == false; + boolean shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); + return new OpenIndexResponse(acknowledged, shardsAcknowledged); + } else { + boolean shardsAcknowledged = response.isShardsAcknowledged() == false; + boolean acknowledged = shardsAcknowledged || response.isAcknowledged(); + return new OpenIndexResponse(acknowledged, shardsAcknowledged); + } + }; + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java index a4e6cdfade7ef..bc6e2d1b610ad 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java @@ -23,12 +23,13 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; import static org.hamcrest.Matchers.equalTo; public class ConditionTests extends ESTestCase { - public void testMaxAge() throws Exception { + public void testMaxAge() { final MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(1)); long indexCreatedMatch = System.currentTimeMillis() - TimeValue.timeValueMinutes(61).getMillis(); @@ -42,7 +43,7 @@ public void testMaxAge() throws Exception { assertThat(evaluate.matched, equalTo(false)); } - public void testMaxDocs() throws Exception { + public void testMaxDocs() { final MaxDocsCondition maxDocsCondition = new MaxDocsCondition(100L); long maxDocsMatch = randomIntBetween(100, 1000); @@ -56,7 +57,7 @@ public void testMaxDocs() throws Exception { assertThat(evaluate.matched, equalTo(false)); } - public void testMaxSize() throws Exception { + public void testMaxSize() { MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomIntBetween(10, 20), ByteSizeUnit.MB)); Condition.Result result = maxSizeCondition.evaluate(new Condition.Stats(randomNonNegativeLong(), randomNonNegativeLong(), @@ -72,7 +73,21 @@ public void testMaxSize() throws Exception { assertThat(result.matched, equalTo(true)); } - private ByteSizeValue randomByteSize() { + public void testEqualsAndHashCode() { + MaxDocsCondition maxDocsCondition = new MaxDocsCondition(randomLong()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(maxDocsCondition, condition -> new MaxDocsCondition(condition.value), + condition -> new MaxDocsCondition(randomLong())); + + MaxSizeCondition maxSizeCondition = new MaxSizeCondition(randomByteSize()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(maxSizeCondition, condition -> new MaxSizeCondition(condition.value), + condition -> new MaxSizeCondition(randomByteSize())); + + MaxAgeCondition maxAgeCondition = new MaxAgeCondition(new TimeValue(randomNonNegativeLong())); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(maxAgeCondition, condition -> new MaxAgeCondition(condition.value), + condition -> new MaxAgeCondition(new TimeValue(randomNonNegativeLong()))); + } + + private static ByteSizeValue randomByteSize() { return new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index e2c31db81ce60..869bba452fefe 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -37,15 +37,12 @@ import java.util.Collection; import java.util.Collections; -import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; -import static org.hamcrest.Matchers.hasProperty; import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) @@ -143,12 +140,8 @@ public void testRolloverConditionsNotMet() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(false)); assertThat(response.getConditionStatus().size(), equalTo(2)); - - - assertThat(response.getConditionStatus(), everyItem(hasProperty("value", is(false)))); - Set conditions = response.getConditionStatus().stream() - .map(Map.Entry::getKey) - .collect(Collectors.toSet()); + assertThat(response.getConditionStatus().values(), everyItem(is(false))); + Set conditions = response.getConditionStatus().keySet(); assertThat(conditions, containsInAnyOrder( new MaxSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)).toString(), new MaxAgeCondition(TimeValue.timeValueHours(4)).toString())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 290ba79af0738..a2ef02af486a7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -19,6 +19,10 @@ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestTests; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -29,15 +33,22 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Before; +import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; +import java.util.Map; +import java.util.function.Consumer; +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.equalTo; public class RolloverRequestTests extends ESTestCase { @@ -61,23 +72,15 @@ public void testConditionsParsing() throws Exception { .field("max_size", "45gb") .endObject() .endObject(); - RolloverRequest.PARSER.parse(createParser(builder), request, null); - Set conditions = request.getConditions(); + request.fromXContent(createParser(builder)); + Map conditions = request.getConditions(); assertThat(conditions.size(), equalTo(3)); - for (Condition condition : conditions) { - if (condition instanceof MaxAgeCondition) { - MaxAgeCondition maxAgeCondition = (MaxAgeCondition) condition; - assertThat(maxAgeCondition.value.getMillis(), equalTo(TimeValue.timeValueHours(24 * 10).getMillis())); - } else if (condition instanceof MaxDocsCondition) { - MaxDocsCondition maxDocsCondition = (MaxDocsCondition) condition; - assertThat(maxDocsCondition.value, equalTo(100L)); - } else if (condition instanceof MaxSizeCondition) { - MaxSizeCondition maxSizeCondition = (MaxSizeCondition) condition; - assertThat(maxSizeCondition.value.getBytes(), equalTo(ByteSizeUnit.GB.toBytes(45))); - } else { - fail("unexpected condition " + condition); - } - } + MaxAgeCondition maxAgeCondition = (MaxAgeCondition)conditions.get(MaxAgeCondition.NAME); + assertThat(maxAgeCondition.value.getMillis(), equalTo(TimeValue.timeValueHours(24 * 10).getMillis())); + MaxDocsCondition maxDocsCondition = (MaxDocsCondition)conditions.get(MaxDocsCondition.NAME); + assertThat(maxDocsCondition.value, equalTo(100L)); + MaxSizeCondition maxSizeCondition = (MaxSizeCondition)conditions.get(MaxSizeCondition.NAME); + assertThat(maxSizeCondition.value.getBytes(), equalTo(ByteSizeUnit.GB.toBytes(45))); } public void testParsingWithIndexSettings() throws Exception { @@ -105,8 +108,8 @@ public void testParsingWithIndexSettings() throws Exception { .startObject("alias1").endObject() .endObject() .endObject(); - RolloverRequest.PARSER.parse(createParser(builder), request, null); - Set conditions = request.getConditions(); + request.fromXContent(createParser(builder)); + Map conditions = request.getConditions(); assertThat(conditions.size(), equalTo(2)); assertThat(request.getCreateIndexRequest().mappings().size(), equalTo(1)); assertThat(request.getCreateIndexRequest().aliases().size(), equalTo(1)); @@ -126,19 +129,92 @@ public void testSerialize() throws Exception { cloneRequest.readFrom(in); assertThat(cloneRequest.getNewIndexName(), equalTo(originalRequest.getNewIndexName())); assertThat(cloneRequest.getAlias(), equalTo(originalRequest.getAlias())); + for (Map.Entry entry : cloneRequest.getConditions().entrySet()) { + Condition condition = originalRequest.getConditions().get(entry.getKey()); + //here we compare the string representation as there is some information loss when serializing + //and de-serializing MaxAgeCondition + assertEquals(condition.toString(), entry.getValue().toString()); + } + } + } + } - List originalConditions = originalRequest.getConditions().stream() - .map(Condition::toString) - .sorted() - .collect(Collectors.toList()); + public void testToAndFromXContent() throws IOException { + RolloverRequest rolloverRequest = createTestItem(); - List cloneConditions = cloneRequest.getConditions().stream() - .map(Condition::toString) - .sorted() - .collect(Collectors.toList()); + final XContentType xContentType = randomFrom(XContentType.values()); + boolean humanReadable = randomBoolean(); + BytesReference originalBytes = toShuffledXContent(rolloverRequest, xContentType, EMPTY_PARAMS, humanReadable); - assertThat(originalConditions, equalTo(cloneConditions)); - } + RolloverRequest parsedRolloverRequest = new RolloverRequest(); + parsedRolloverRequest.fromXContent(createParser(xContentType.xContent(), originalBytes)); + + CreateIndexRequest createIndexRequest = rolloverRequest.getCreateIndexRequest(); + CreateIndexRequest parsedCreateIndexRequest = parsedRolloverRequest.getCreateIndexRequest(); + CreateIndexRequestTests.assertMappingsEqual(createIndexRequest.mappings(), parsedCreateIndexRequest.mappings()); + CreateIndexRequestTests.assertAliasesEqual(createIndexRequest.aliases(), parsedCreateIndexRequest.aliases()); + assertEquals(createIndexRequest.settings(), parsedCreateIndexRequest.settings()); + assertEquals(rolloverRequest.getConditions(), parsedRolloverRequest.getConditions()); + + BytesReference finalBytes = toShuffledXContent(parsedRolloverRequest, xContentType, EMPTY_PARAMS, humanReadable); + ElasticsearchAssertions.assertToXContentEquivalent(originalBytes, finalBytes, xContentType); + } + + public void testUnknownFields() throws IOException { + final RolloverRequest request = new RolloverRequest(); + XContentType xContentType = randomFrom(XContentType.values()); + final XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + builder.startObject(); + { + builder.startObject("conditions"); + builder.field("max_age", "10d"); + builder.endObject(); + } + builder.endObject(); + BytesReference mutated = XContentTestUtils.insertRandomFields(xContentType, builder.bytes(), null, random()); + expectThrows(ParsingException.class, () -> request.fromXContent(createParser(xContentType.xContent(), mutated))); + } + + public void testSameConditionCanOnlyBeAddedOnce() { + RolloverRequest rolloverRequest = new RolloverRequest(); + Consumer rolloverRequestConsumer = randomFrom(conditionsGenerator); + rolloverRequestConsumer.accept(rolloverRequest); + expectThrows(IllegalArgumentException.class, () -> rolloverRequestConsumer.accept(rolloverRequest)); + } + + public void testValidation() { + RolloverRequest rolloverRequest = new RolloverRequest(); + assertNotNull(rolloverRequest.getCreateIndexRequest()); + ActionRequestValidationException validationException = rolloverRequest.validate(); + assertNotNull(validationException); + assertEquals(1, validationException.validationErrors().size()); + assertEquals("index alias is missing", validationException.validationErrors().get(0)); + } + + private static List> conditionsGenerator = new ArrayList<>(); + static { + conditionsGenerator.add((request) -> request.addMaxIndexDocsCondition(randomNonNegativeLong())); + conditionsGenerator.add((request) -> request.addMaxIndexSizeCondition(new ByteSizeValue(randomNonNegativeLong()))); + conditionsGenerator.add((request) -> request.addMaxIndexAgeCondition(new TimeValue(randomNonNegativeLong()))); + } + + private static RolloverRequest createTestItem() throws IOException { + RolloverRequest rolloverRequest = new RolloverRequest(); + if (randomBoolean()) { + String type = randomAlphaOfLengthBetween(3, 10); + rolloverRequest.getCreateIndexRequest().mapping(type, RandomCreateIndexGenerator.randomMapping(type)); + } + if (randomBoolean()) { + RandomCreateIndexGenerator.randomAliases(rolloverRequest.getCreateIndexRequest()); + } + if (randomBoolean()) { + rolloverRequest.getCreateIndexRequest().settings(RandomCreateIndexGenerator.randomIndexSettings()); + } + int numConditions = randomIntBetween(0, 3); + List> conditions = randomSubsetOf(numConditions, conditionsGenerator); + for (Consumer consumer : conditions) { + consumer.accept(rolloverRequest); } + return rolloverRequest; } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java new file mode 100644 index 0000000000000..2f76f0f7ea725 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class RolloverResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected RolloverResponse createTestInstance() { + boolean acknowledged = randomBoolean(); + boolean shardsAcknowledged = acknowledged && randomBoolean(); + return new RolloverResponse(randomAlphaOfLengthBetween(3, 10), + randomAlphaOfLengthBetween(3, 10), randomResults(true), randomBoolean(), randomBoolean(), acknowledged, shardsAcknowledged); + } + + private static Map randomResults(boolean allowNoItems) { + Map results = new HashMap<>(); + int numResults = randomIntBetween(allowNoItems ? 0 : 1, 3); + List>> conditions = randomSubsetOf(numResults, conditionSuppliers); + for (Supplier> condition : conditions) { + Condition cond = condition.get(); + results.put(cond.name, randomBoolean()); + } + return results; + } + + private static final List>> conditionSuppliers = new ArrayList<>(); + static { + conditionSuppliers.add(() -> new MaxAgeCondition(new TimeValue(randomNonNegativeLong()))); + conditionSuppliers.add(() -> new MaxDocsCondition(randomNonNegativeLong())); + conditionSuppliers.add(() -> new MaxDocsCondition(randomNonNegativeLong())); + } + + @Override + protected RolloverResponse createBlankInstance() { + return new RolloverResponse(); + } + + @Override + protected RolloverResponse doParseInstance(XContentParser parser) { + return RolloverResponse.fromXContent(parser); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> field.startsWith("conditions"); + } + + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> { + int i = randomIntBetween(0, 6); + switch(i) { + case 0: + return new RolloverResponse(response.getOldIndex() + randomAlphaOfLengthBetween(2, 5), + response.getNewIndex(), response.getConditionStatus(), response.isDryRun(), response.isRolledOver(), + response.isAcknowledged(), response.isShardsAcknowledged()); + case 1: + return new RolloverResponse(response.getOldIndex(), response.getNewIndex() + randomAlphaOfLengthBetween(2, 5), + response.getConditionStatus(), response.isDryRun(), response.isRolledOver(), + response.isAcknowledged(), response.isShardsAcknowledged()); + case 2: + Map results; + if (response.getConditionStatus().isEmpty()) { + results = randomResults(false); + } else { + results = new HashMap<>(response.getConditionStatus().size()); + List keys = randomSubsetOf(randomIntBetween(1, response.getConditionStatus().size()), + response.getConditionStatus().keySet()); + for (Map.Entry entry : response.getConditionStatus().entrySet()) { + boolean value = keys.contains(entry.getKey()) ? entry.getValue() == false : entry.getValue(); + results.put(entry.getKey(), value); + } + } + return new RolloverResponse(response.getOldIndex(), response.getNewIndex(), results, response.isDryRun(), + response.isRolledOver(), response.isAcknowledged(), response.isShardsAcknowledged()); + case 3: + return new RolloverResponse(response.getOldIndex(), response.getNewIndex(), + response.getConditionStatus(), response.isDryRun() == false, response.isRolledOver(), + response.isAcknowledged(), response.isShardsAcknowledged()); + case 4: + return new RolloverResponse(response.getOldIndex(), response.getNewIndex(), + response.getConditionStatus(), response.isDryRun(), response.isRolledOver() == false, + response.isAcknowledged(), response.isShardsAcknowledged()); + case 5: { + boolean acknowledged = response.isAcknowledged() == false; + boolean shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); + return new RolloverResponse(response.getOldIndex(), response.getNewIndex(), + response.getConditionStatus(), response.isDryRun(), response.isRolledOver(), + acknowledged, shardsAcknowledged); + } + case 6: { + boolean shardsAcknowledged = response.isShardsAcknowledged() == false; + boolean acknowledged = shardsAcknowledged || response.isAcknowledged(); + return new RolloverResponse(response.getOldIndex(), response.getNewIndex(), + response.getConditionStatus(), response.isDryRun(), response.isRolledOver(), + acknowledged, shardsAcknowledged); + } + default: + throw new UnsupportedOperationException(); + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 3366646e24a79..be88a69a8f4a3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -44,6 +44,7 @@ import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions; @@ -58,7 +59,7 @@ public class TransportRolloverActionTests extends ESTestCase { - public void testDocStatsSelectionFromPrimariesOnly() throws Exception { + public void testDocStatsSelectionFromPrimariesOnly() { long docsInPrimaryShards = 100; long docsInShards = 200; @@ -70,7 +71,7 @@ public void testDocStatsSelectionFromPrimariesOnly() throws Exception { assertEquals(docsInPrimaryShards, argument.getValue().numDocs); } - public void testEvaluateConditions() throws Exception { + public void testEvaluateConditions() { MaxDocsCondition maxDocsCondition = new MaxDocsCondition(100L); MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(2)); MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomIntBetween(10, 100), ByteSizeUnit.MB)); @@ -89,29 +90,29 @@ public void testEvaluateConditions() throws Exception { .settings(settings) .build(); final Set conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition, maxSizeCondition); - Set results = evaluateConditions(conditions, + Map results = evaluateConditions(conditions, new DocsStats(matchMaxDocs, 0L, ByteSizeUnit.MB.toBytes(120)), metaData); assertThat(results.size(), equalTo(3)); - for (Condition.Result result : results) { - assertThat(result.matched, equalTo(true)); + for (Boolean matched : results.values()) { + assertThat(matched, equalTo(true)); } results = evaluateConditions(conditions, new DocsStats(notMatchMaxDocs, 0, notMatchMaxSize.getBytes()), metaData); assertThat(results.size(), equalTo(3)); - for (Condition.Result result : results) { - if (result.condition instanceof MaxAgeCondition) { - assertThat(result.matched, equalTo(true)); - } else if (result.condition instanceof MaxDocsCondition) { - assertThat(result.matched, equalTo(false)); - } else if (result.condition instanceof MaxSizeCondition) { - assertThat(result.matched, equalTo(false)); + for (Map.Entry entry : results.entrySet()) { + if (entry.getKey().equals(maxAgeCondition.toString())) { + assertThat(entry.getValue(), equalTo(true)); + } else if (entry.getKey().equals(maxDocsCondition.toString())) { + assertThat(entry.getValue(), equalTo(false)); + } else if (entry.getKey().equals(maxSizeCondition.toString())) { + assertThat(entry.getValue(), equalTo(false)); } else { - fail("unknown condition result found " + result.condition); + fail("unknown condition result found " + entry.getKey()); } } } - public void testEvaluateWithoutDocStats() throws Exception { + public void testEvaluateWithoutDocStats() { MaxDocsCondition maxDocsCondition = new MaxDocsCondition(randomNonNegativeLong()); MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(randomIntBetween(1, 3))); MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomNonNegativeLong())); @@ -128,23 +129,23 @@ public void testEvaluateWithoutDocStats() throws Exception { .creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(randomIntBetween(5, 10)).getMillis()) .settings(settings) .build(); - Set results = evaluateConditions(conditions, null, metaData); + Map results = evaluateConditions(conditions, null, metaData); assertThat(results.size(), equalTo(3)); - for (Condition.Result result : results) { - if (result.condition instanceof MaxAgeCondition) { - assertThat(result.matched, equalTo(true)); - } else if (result.condition instanceof MaxDocsCondition) { - assertThat(result.matched, equalTo(false)); - } else if (result.condition instanceof MaxSizeCondition) { - assertThat(result.matched, equalTo(false)); + for (Map.Entry entry : results.entrySet()) { + if (entry.getKey().equals(maxAgeCondition.toString())) { + assertThat(entry.getValue(), equalTo(true)); + } else if (entry.getKey().equals(maxDocsCondition.toString())) { + assertThat(entry.getValue(), equalTo(false)); + } else if (entry.getKey().equals(maxSizeCondition.toString())) { + assertThat(entry.getValue(), equalTo(false)); } else { - fail("unknown condition result found " + result.condition); + fail("unknown condition result found " + entry.getKey()); } } } - public void testCreateUpdateAliasRequest() throws Exception { + public void testCreateUpdateAliasRequest() { String sourceAlias = randomAlphaOfLength(10); String sourceIndex = randomAlphaOfLength(10); String targetIndex = randomAlphaOfLength(10); @@ -171,7 +172,7 @@ public void testCreateUpdateAliasRequest() throws Exception { assertTrue(foundRemove); } - public void testValidation() throws Exception { + public void testValidation() { String index1 = randomAlphaOfLength(10); String alias = randomAlphaOfLength(10); String index2 = randomAlphaOfLength(10); @@ -206,7 +207,7 @@ public void testValidation() throws Exception { TransportRolloverAction.validate(metaData, new RolloverRequest(alias, randomAlphaOfLength(10))); } - public void testGenerateRolloverIndexName() throws Exception { + public void testGenerateRolloverIndexName() { String invalidIndexName = randomAlphaOfLength(10) + "A"; IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); expectThrows(IllegalArgumentException.class, () -> @@ -224,12 +225,12 @@ public void testGenerateRolloverIndexName() throws Exception { indexNameExpressionResolver)); } - public void testCreateIndexRequest() throws Exception { + public void testCreateIndexRequest() { String alias = randomAlphaOfLength(10); String rolloverIndex = randomAlphaOfLength(10); final RolloverRequest rolloverRequest = new RolloverRequest(alias, randomAlphaOfLength(10)); final ActiveShardCount activeShardCount = randomBoolean() ? ActiveShardCount.ALL : ActiveShardCount.ONE; - rolloverRequest.setWaitForActiveShards(activeShardCount); + rolloverRequest.getCreateIndexRequest().waitForActiveShards(activeShardCount); final Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) @@ -244,7 +245,7 @@ public void testCreateIndexRequest() throws Exception { assertThat(createIndexRequest.cause(), equalTo("rollover_index")); } - public void testRejectDuplicateAlias() throws Exception { + public void testRejectDuplicateAlias() { final IndexTemplateMetaData template = IndexTemplateMetaData.builder("test-template") .patterns(Arrays.asList("foo-*", "bar-*")) .putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write")) @@ -271,7 +272,7 @@ private IndicesStatsResponse createIndicesStatResponse(long totalDocs, long prim return response; } - private IndexMetaData createMetaData() { + private static IndexMetaData createMetaData() { final Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) @@ -284,7 +285,7 @@ private IndexMetaData createMetaData() { .build(); } - private Condition createTestCondition() { + private static Condition createTestCondition() { final Condition condition = mock(Condition.class); when(condition.evaluate(any())).thenReturn(new Condition.Result(condition, true)); return condition; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java index 6061be3353929..77ead591a01f2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; @@ -76,6 +77,9 @@ public void testToAndFromXContent() throws IOException { CreateIndexRequestTests.assertAliasesEqual(resizeRequest.getTargetIndexRequest().aliases(), parsedResizeRequest.getTargetIndexRequest().aliases()); assertEquals(resizeRequest.getTargetIndexRequest().settings(), parsedResizeRequest.getTargetIndexRequest().settings()); + + BytesReference finalBytes = toShuffledXContent(parsedResizeRequest, xContentType, EMPTY_PARAMS, humanReadable); + ElasticsearchAssertions.assertToXContentEquivalent(originalBytes, finalBytes, xContentType); } private static ResizeRequest createTestItem() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java index 6ed3d01e1b7dd..7e03e980304fd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java @@ -20,17 +20,11 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; -import java.io.IOException; - -import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; - -public class ResizeResponseTests extends ESTestCase { +public class ResizeResponseTests extends AbstractStreamableXContentTestCase { public void testToXContent() { ResizeResponse response = new ResizeResponse(true, false, "index_name"); @@ -38,49 +32,41 @@ public void testToXContent() { assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":false,\"index\":\"index_name\"}", output); } - public void testToAndFromXContent() throws IOException { - doFromXContentTestWithRandomFields(false); - } - - /** - * This test adds random fields and objects to the xContent rendered out to - * ensure we can parse it back to be forward compatible with additions to - * the xContent - */ - public void testFromXContentWithRandomFields() throws IOException { - doFromXContentTestWithRandomFields(true); + @Override + protected ResizeResponse doParseInstance(XContentParser parser) { + return ResizeResponse.fromXContent(parser); } - private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - - final ResizeResponse resizeResponse = createTestItem(); - - boolean humanReadable = randomBoolean(); - final XContentType xContentType = randomFrom(XContentType.values()); - BytesReference originalBytes = toShuffledXContent(resizeResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - - BytesReference mutated; - if (addRandomFields) { - mutated = insertRandomFields(xContentType, originalBytes, null, random()); - } else { - mutated = originalBytes; - } - ResizeResponse parsedResizeResponse; - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedResizeResponse = ResizeResponse.fromXContent(parser); - assertNull(parser.nextToken()); - } - - assertEquals(resizeResponse.index(), parsedResizeResponse.index()); - assertEquals(resizeResponse.isShardsAcknowledged(), parsedResizeResponse.isShardsAcknowledged()); - assertEquals(resizeResponse.isAcknowledged(), parsedResizeResponse.isAcknowledged()); - } - - private static ResizeResponse createTestItem() { + @Override + protected ResizeResponse createTestInstance() { boolean acknowledged = randomBoolean(); boolean shardsAcknowledged = acknowledged && randomBoolean(); String index = randomAlphaOfLength(5); - return new ResizeResponse(acknowledged, shardsAcknowledged, index); } + + @Override + protected ResizeResponse createBlankInstance() { + return new ResizeResponse(); + } + + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return response -> { + if (randomBoolean()) { + if (randomBoolean()) { + boolean acknowledged = response.isAcknowledged() == false; + boolean shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); + return new ResizeResponse(acknowledged, shardsAcknowledged, response.index()); + } else { + boolean shardsAcknowledged = response.isShardsAcknowledged() == false; + boolean acknowledged = shardsAcknowledged || response.isAcknowledged(); + return new ResizeResponse(acknowledged, shardsAcknowledged, response.index()); + } + } else { + return new ResizeResponse(response.isAcknowledged(), response.isShardsAcknowledged(), + response.index() + randomAlphaOfLengthBetween(2, 5)); + } + }; + } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java index 8b4ce79f3ecc4..00815807eee8a 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; +import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.CompoundProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Pipeline; @@ -44,6 +45,8 @@ import static org.elasticsearch.ingest.IngestDocument.MetaData.PARENT; import static org.elasticsearch.ingest.IngestDocument.MetaData.ROUTING; import static org.elasticsearch.ingest.IngestDocument.MetaData.TYPE; +import static org.elasticsearch.ingest.IngestDocument.MetaData.VERSION; +import static org.elasticsearch.ingest.IngestDocument.MetaData.VERSION_TYPE; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; @@ -98,7 +101,7 @@ public void testParseUsingPipelineStore() throws Exception { Iterator> expectedDocsIterator = expectedDocs.iterator(); for (IngestDocument ingestDocument : actualRequest.getDocuments()) { Map expectedDocument = expectedDocsIterator.next(); - Map metadataMap = ingestDocument.extractMetadata(); + Map metadataMap = ingestDocument.extractMetadata(); assertThat(metadataMap.get(INDEX), equalTo(expectedDocument.get(INDEX.getFieldName()))); assertThat(metadataMap.get(TYPE), equalTo(expectedDocument.get(TYPE.getFieldName()))); assertThat(metadataMap.get(ID), equalTo(expectedDocument.get(ID.getFieldName()))); @@ -120,17 +123,28 @@ public void testParseWithProvidedPipeline() throws Exception { for (int i = 0; i < numDocs; i++) { Map doc = new HashMap<>(); Map expectedDoc = new HashMap<>(); - List fields = Arrays.asList(INDEX, TYPE, ID, ROUTING, PARENT); + List fields = Arrays.asList(INDEX, TYPE, ID, ROUTING, PARENT, VERSION, VERSION_TYPE); for(IngestDocument.MetaData field : fields) { - if(randomBoolean()) { - String value = randomAlphaOfLengthBetween(1, 10); + if (field == VERSION) { + Long value = randomLong(); doc.put(field.getFieldName(), value); expectedDoc.put(field.getFieldName(), value); - } - else { - Integer value = randomIntBetween(1, 1000000); + } else if (field == VERSION_TYPE) { + String value = VersionType.toString( + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE) + ); doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), String.valueOf(value)); + expectedDoc.put(field.getFieldName(), value); + } else { + if (randomBoolean()) { + String value = randomAlphaOfLengthBetween(1, 10); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), value); + } else { + Integer value = randomIntBetween(1, 1000000); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), String.valueOf(value)); + } } } String fieldName = randomAlphaOfLengthBetween(1, 10); @@ -175,12 +189,14 @@ public void testParseWithProvidedPipeline() throws Exception { Iterator> expectedDocsIterator = expectedDocs.iterator(); for (IngestDocument ingestDocument : actualRequest.getDocuments()) { Map expectedDocument = expectedDocsIterator.next(); - Map metadataMap = ingestDocument.extractMetadata(); + Map metadataMap = ingestDocument.extractMetadata(); assertThat(metadataMap.get(INDEX), equalTo(expectedDocument.get(INDEX.getFieldName()))); assertThat(metadataMap.get(TYPE), equalTo(expectedDocument.get(TYPE.getFieldName()))); assertThat(metadataMap.get(ID), equalTo(expectedDocument.get(ID.getFieldName()))); assertThat(metadataMap.get(ROUTING), equalTo(expectedDocument.get(ROUTING.getFieldName()))); assertThat(metadataMap.get(PARENT), equalTo(expectedDocument.get(PARENT.getFieldName()))); + assertThat(metadataMap.get(VERSION), equalTo(expectedDocument.get(VERSION.getFieldName()))); + assertThat(metadataMap.get(VERSION_TYPE), equalTo(expectedDocument.get(VERSION_TYPE.getFieldName()))); assertThat(ingestDocument.getSourceAndMetadata(), equalTo(expectedDocument.get(Fields.SOURCE))); } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java b/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java index c5f67e235061e..b04c7dfcd84f8 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java @@ -133,13 +133,13 @@ public void testToXContent() throws IOException { Map toXContentSource = (Map) toXContentDoc.get("_source"); Map toXContentIngestMetadata = (Map) toXContentDoc.get("_ingest"); - Map metadataMap = ingestDocument.extractMetadata(); - for (Map.Entry metadata : metadataMap.entrySet()) { + Map metadataMap = ingestDocument.extractMetadata(); + for (Map.Entry metadata : metadataMap.entrySet()) { String fieldName = metadata.getKey().getFieldName(); if (metadata.getValue() == null) { assertThat(toXContentDoc.containsKey(fieldName), is(false)); } else { - assertThat(toXContentDoc.get(fieldName), equalTo(metadata.getValue())); + assertThat(toXContentDoc.get(fieldName), equalTo(metadata.getValue().toString())); } } diff --git a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java index 2eac6d9369a21..a7e18c29ce743 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java @@ -27,13 +27,12 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Date; -import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; - public class MainResponseTests extends AbstractStreamableXContentTestCase { @Override @@ -87,45 +86,37 @@ public void testToXContent() throws IOException { + "}", builder.string()); } - //TODO this should be removed and the metehod from AbstractStreamableTestCase should be - //used instead once https://github.com/elastic/elasticsearch/pull/25910 goes in - public void testEqualsAndHashcode() { - MainResponse original = createTestInstance(); - checkEqualsAndHashCode(original, MainResponseTests::copy, MainResponseTests::mutate); - } - - private static MainResponse copy(MainResponse o) { - return new MainResponse(o.getNodeName(), o.getVersion(), o.getClusterName(), o.getClusterUuid(), o.getBuild(), o.isAvailable()); - } - - private static MainResponse mutate(MainResponse o) { - String clusterUuid = o.getClusterUuid(); - boolean available = o.isAvailable(); - Build build = o.getBuild(); - Version version = o.getVersion(); - String nodeName = o.getNodeName(); - ClusterName clusterName = o.getClusterName(); - switch (randomIntBetween(0, 5)) { - case 0: - clusterUuid = clusterUuid + randomAlphaOfLength(5); - break; - case 1: - nodeName = nodeName + randomAlphaOfLength(5); - break; - case 2: - available = !available; - break; - case 3: - // toggle the snapshot flag of the original Build parameter - build = new Build(build.shortHash(), build.date(), !build.isSnapshot()); - break; - case 4: - version = randomValueOtherThan(version, () -> VersionUtils.randomVersion(random())); - break; - case 5: - clusterName = new ClusterName(clusterName + randomAlphaOfLength(5)); - break; - } - return new MainResponse(nodeName, version, clusterName, clusterUuid, build, available); + @Override + protected EqualsHashCodeTestUtils.MutateFunction getMutateFunction() { + return o -> { + String clusterUuid = o.getClusterUuid(); + boolean available = o.isAvailable(); + Build build = o.getBuild(); + Version version = o.getVersion(); + String nodeName = o.getNodeName(); + ClusterName clusterName = o.getClusterName(); + switch (randomIntBetween(0, 5)) { + case 0: + clusterUuid = clusterUuid + randomAlphaOfLength(5); + break; + case 1: + nodeName = nodeName + randomAlphaOfLength(5); + break; + case 2: + available = !available; + break; + case 3: + // toggle the snapshot flag of the original Build parameter + build = new Build(build.shortHash(), build.date(), !build.isSnapshot()); + break; + case 4: + version = randomValueOtherThan(version, () -> VersionUtils.randomVersion(random())); + break; + case 5: + clusterName = new ClusterName(clusterName + randomAlphaOfLength(5)); + break; + } + return new MainResponse(nodeName, version, clusterName, clusterUuid, build, available); + }; } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 858cbcce19989..e85c03411f7e2 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -54,7 +53,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Supplier; -import java.util.stream.Collectors; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; @@ -95,28 +93,26 @@ public void testReplication() throws Exception { final Set expectedReplicas = getExpectedReplicas(shardId, initialState, trackedShards); - final Map expectedFailures = new HashMap<>(); - final Set expectedFailedShards = new HashSet<>(); + final Map simulatedFailures = new HashMap<>(); + final Map reportedFailures = new HashMap<>(); for (ShardRouting replica : expectedReplicas) { if (randomBoolean()) { Exception t; boolean criticalFailure = randomBoolean(); if (criticalFailure) { t = new CorruptIndexException("simulated", (String) null); + reportedFailures.put(replica, t); } else { t = new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING); } logger.debug("--> simulating failure on {} with [{}]", replica, t.getClass().getSimpleName()); - expectedFailures.put(replica, t); - if (criticalFailure) { - expectedFailedShards.add(replica); - } + simulatedFailures.put(replica, t); } } Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); - final TestReplicaProxy replicasProxy = new TestReplicaProxy(primaryTerm, expectedFailures); + final TestReplicaProxy replicasProxy = new TestReplicaProxy(primaryTerm, simulatedFailures); final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup); final TestReplicationOperation op = new TestReplicationOperation(request, @@ -125,13 +121,13 @@ public void testReplication() throws Exception { assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnReplicas, equalTo(expectedReplicas)); - assertThat(replicasProxy.failedReplicas, equalTo(expectedFailedShards)); + assertThat(replicasProxy.failedReplicas, equalTo(simulatedFailures.keySet())); assertThat(replicasProxy.markedAsStaleCopies, equalTo(staleAllocationIds)); assertTrue("listener is not marked as done", listener.isDone()); ShardInfo shardInfo = listener.actionGet().getShardInfo(); - assertThat(shardInfo.getFailed(), equalTo(expectedFailedShards.size())); - assertThat(shardInfo.getFailures(), arrayWithSize(expectedFailedShards.size())); - assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size() - expectedFailures.size())); + assertThat(shardInfo.getFailed(), equalTo(reportedFailures.size())); + assertThat(shardInfo.getFailures(), arrayWithSize(reportedFailures.size())); + assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size() - simulatedFailures.size())); final List unassignedShards = indexShardRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED); final int totalShards = 1 + expectedReplicas.size() + unassignedShards.size() + untrackedShards.size(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 2112a231d37a7..b9688053fba2d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -117,6 +117,7 @@ import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; @@ -694,7 +695,7 @@ public void testSeqNoIsSetOnPrimary() throws Exception { doAnswer(invocation -> { ((ActionListener)invocation.getArguments()[0]).onResponse(() -> {}); return null; - }).when(shard).acquirePrimaryOperationPermit(any(), anyString()); + }).when(shard).acquirePrimaryOperationPermit(any(), anyString(), anyObject()); AtomicBoolean closed = new AtomicBoolean(); Releasable releasable = () -> { @@ -1194,7 +1195,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService count.incrementAndGet(); callback.onResponse(count::decrementAndGet); return null; - }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString()); + }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), anyObject()); doAnswer(invocation -> { long term = (Long)invocation.getArguments()[0]; ActionListener callback = (ActionListener) invocation.getArguments()[2]; @@ -1206,7 +1207,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService count.incrementAndGet(); callback.onResponse(count::decrementAndGet); return null; - }).when(indexShard).acquireReplicaOperationPermit(anyLong(), anyLong(), any(ActionListener.class), anyString()); + }).when(indexShard).acquireReplicaOperationPermit(anyLong(), anyLong(), any(ActionListener.class), anyString(), anyObject()); when(indexShard.routingEntry()).thenAnswer(invocationOnMock -> { final ClusterState state = clusterService.state(); final RoutingNode node = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 805553b4a6103..bed1b5de03750 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -80,6 +80,7 @@ import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; @@ -449,7 +450,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService count.incrementAndGet(); callback.onResponse(count::decrementAndGet); return null; - }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString()); + }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), anyObject()); doAnswer(invocation -> { long term = (Long)invocation.getArguments()[0]; ActionListener callback = (ActionListener) invocation.getArguments()[1]; @@ -461,7 +462,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService count.incrementAndGet(); callback.onResponse(count::decrementAndGet); return null; - }).when(indexShard).acquireReplicaOperationPermit(anyLong(), anyLong(), any(ActionListener.class), anyString()); + }).when(indexShard).acquireReplicaOperationPermit(anyLong(), anyLong(), any(ActionListener.class), anyString(), anyObject()); when(indexShard.routingEntry()).thenAnswer(invocationOnMock -> { final ClusterState state = clusterService.state(); final RoutingNode node = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index 2c51c210b1edc..c5b99a91ffa3b 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -83,11 +83,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "I/O exception while trying to read [{}]", new Object[] { procSysVmMaxMapCountPath }, e -> ioException == e)); - ServerLoggers.addAppender(logger, appender); + Loggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - ServerLoggers.removeAppender(logger, appender); + Loggers.removeAppender(logger, appender); appender.stop(); } @@ -105,11 +105,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "unable to parse vm.max_map_count [{}]", new Object[] { "eof" }, e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\""))); - ServerLoggers.addAppender(logger, appender); + Loggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - ServerLoggers.removeAppender(logger, appender); + Loggers.removeAppender(logger, appender); appender.stop(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index b8050d728a6b3..0522f3f15f817 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -44,7 +44,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -343,7 +342,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no completed message logged on dry run", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*") ); - ServerLoggers.addAppender(actionLogger, dryRunMockLog); + Loggers.addAppender(actionLogger, dryRunMockLog); AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); ClusterRerouteResponse dryRunResponse = client().admin().cluster().prepareReroute() @@ -358,7 +357,7 @@ public void testMessageLogging() throws Exception{ dryRunMockLog.assertAllExpectationsMatched(); dryRunMockLog.stop(); - ServerLoggers.removeAppender(actionLogger, dryRunMockLog); + Loggers.removeAppender(actionLogger, dryRunMockLog); MockLogAppender allocateMockLog = new MockLogAppender(); allocateMockLog.start(); @@ -370,7 +369,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no message for second allocate empty primary", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*" + nodeName2 + "*") ); - ServerLoggers.addAppender(actionLogger, allocateMockLog); + Loggers.addAppender(actionLogger, allocateMockLog); AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true); @@ -386,7 +385,7 @@ public void testMessageLogging() throws Exception{ allocateMockLog.assertAllExpectationsMatched(); allocateMockLog.stop(); - ServerLoggers.removeAppender(actionLogger, allocateMockLog); + Loggers.removeAppender(actionLogger, allocateMockLog); } public void testClusterRerouteWithBlocks() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index be03fbe1cd640..c8d5cdc6c86db 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -63,7 +63,7 @@ public static class TestPlugin extends Plugin { protected final Settings settings; public TestPlugin(Settings settings) { - this.logger = ServerLoggers.getLogger(getClass(), settings); + this.logger = Loggers.getLogger(getClass(), settings); this.settings = settings; } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index c104df913b205..7a8261776bd41 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -130,7 +129,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*failed to execute cluster state applier in [2s]*")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - ServerLoggers.addAppender(clusterLogger, mockAppender); + Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(3); clusterApplierService.currentTimeOverride = System.nanoTime(); @@ -180,7 +179,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - ServerLoggers.removeAppender(clusterLogger, mockAppender); + Loggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -210,7 +209,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state applier task [test3] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - ServerLoggers.addAppender(clusterLogger, mockAppender); + Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -276,7 +275,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - ServerLoggers.removeAppender(clusterLogger, mockAppender); + Loggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 3b999b5f7733a..1b747f2268747 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -232,7 +231,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - ServerLoggers.addAppender(clusterLogger, mockAppender); + Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); masterService.currentTimeOverride = System.nanoTime(); @@ -307,7 +306,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - ServerLoggers.removeAppender(clusterLogger, mockAppender); + Loggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -579,7 +578,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state update task [test4] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - ServerLoggers.addAppender(clusterLogger, mockAppender); + Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(5); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -675,7 +674,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - ServerLoggers.removeAppender(clusterLogger, mockAppender); + Loggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 0f4d0cf66346a..f00768651f917 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; @@ -795,8 +795,8 @@ public void testLoggingUpdates() { settings.applySettings(Settings.builder().build()); assertEquals(property, ESLoggerFactory.getLogger("test").getLevel()); } finally { - ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); - ServerLoggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); + Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + Loggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); } } @@ -811,7 +811,7 @@ public void testFallbackToLoggerLevel() { settings.applySettings(Settings.builder().build()); // here we fall back to 'logger.level' which is our default. assertEquals(Level.ERROR, ESLoggerFactory.getRootLogger().getLevel()); } finally { - ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); + Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java index dfece2d9d459c..69b69a2fcf61d 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -134,14 +133,14 @@ private void assertExpectedLogMessages(Consumer consumer, MockLogAppender.LoggingExpectation ... expectations) throws IllegalAccessException { Logger testLogger = Loggers.getLogger("org.elasticsearch.test"); MockLogAppender appender = new MockLogAppender(); - ServerLoggers.addAppender(testLogger, appender); + Loggers.addAppender(testLogger, appender); try { appender.start(); Arrays.stream(expectations).forEach(appender::addExpectation); consumer.accept(testLogger); appender.assertAllExpectationsMatched(); } finally { - ServerLoggers.removeAppender(testLogger, appender); + Loggers.removeAppender(testLogger, appender); } } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java index fdce390fa05e8..e50e205ff1386 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -31,6 +30,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.equalTo; @@ -79,6 +79,34 @@ public void testHasLockedKeys() { assertFalse(lock.hasLockedKeys()); } + public void testTryAcquire() throws InterruptedException { + KeyedLock lock = new KeyedLock<>(); + Releasable foo = lock.tryAcquire("foo"); + Releasable second = lock.tryAcquire("foo"); + assertTrue(lock.hasLockedKeys()); + foo.close(); + assertTrue(lock.hasLockedKeys()); + second.close(); + assertFalse(lock.hasLockedKeys()); + // lock again + Releasable acquire = lock.tryAcquire("foo"); + assertNotNull(acquire); + final AtomicBoolean check = new AtomicBoolean(false); + CountDownLatch latch = new CountDownLatch(1); + Thread thread = new Thread(() -> { + latch.countDown(); + try (Releasable ignore = lock.acquire("foo")) { + assertTrue(check.get()); + } + }); + thread.start(); + latch.await(); + check.set(true); + acquire.close(); + foo.close(); + thread.join(); + } + public void testLockIsReentrant() throws InterruptedException { KeyedLock lock = new KeyedLock<>(); Releasable foo = lock.acquire("foo"); @@ -137,7 +165,24 @@ public void run() { for (int i = 0; i < numRuns; i++) { String curName = names[randomInt(names.length - 1)]; assert connectionLock.isHeldByCurrentThread(curName) == false; - try (Releasable ignored = connectionLock.acquire(curName)) { + Releasable lock; + if (randomIntBetween(0, 10) < 4) { + int tries = 0; + boolean stepOut = false; + while ((lock = connectionLock.tryAcquire(curName)) == null) { + assertFalse(connectionLock.isHeldByCurrentThread(curName)); + if (tries++ == 10) { + stepOut = true; + break; + } + } + if (stepOut) { + break; + } + } else { + lock = connectionLock.acquire(curName); + } + try (Releasable ignore = lock) { assert connectionLock.isHeldByCurrentThread(curName); assert connectionLock.isHeldByCurrentThread(curName + "bla") == false; if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueueTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueueTests.java new file mode 100644 index 0000000000000..cc8d7b5d2b392 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueueTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.equalTo; + +public class SizeBlockingQueueTests extends ESTestCase { + + /* + * Tests that the size of a queue remains at most the capacity while offers are made to a queue when at capacity. This test would have + * previously failed when the size of the queue was incremented and exposed externally even though the item offered to the queue was + * never actually added to the queue. + */ + public void testQueueSize() throws InterruptedException { + final int capacity = randomIntBetween(1, 32); + final BlockingQueue blockingQueue = new ArrayBlockingQueue<>(capacity); + final SizeBlockingQueue sizeBlockingQueue = new SizeBlockingQueue<>(blockingQueue, capacity); + + // fill the queue to capacity + for (int i = 0; i < capacity; i++) { + sizeBlockingQueue.offer(i); + } + + + final int iterations = 1 << 16; + final CyclicBarrier barrier = new CyclicBarrier(2); + + // this thread will try to offer items to the queue while the queue size thread is polling the size + final Thread queueOfferThread = new Thread(() -> { + for (int i = 0; i < iterations; i++) { + try { + // synchronize each iteration of checking the size with each iteration of offering, each iteration is a race + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + sizeBlockingQueue.offer(capacity + i); + } + }); + queueOfferThread.start(); + + // this thread will repeatedly poll the size of the queue keeping track of the maximum size that it sees + final AtomicInteger maxSize = new AtomicInteger(); + final Thread queueSizeThread = new Thread(() -> { + for (int i = 0; i < iterations; i++) { + try { + // synchronize each iteration of checking the size with each iteration of offering, each iteration is a race + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + maxSize.set(Math.max(maxSize.get(), sizeBlockingQueue.size())); + } + }); + queueSizeThread.start(); + + // wait for the threads to finish + queueOfferThread.join(); + queueSizeThread.join(); + + // the maximum size of the queue should be equal to the capacity + assertThat(maxSize.get(), equalTo(capacity)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index c7205b3200f1c..53a1c3c75d301 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -753,7 +753,8 @@ void doTestRawField(XContent source, boolean useStream) throws Exception { generator.writeEndObject(); } - XContentParser parser = xcontentType().xContent().createParser(NamedXContentRegistry.EMPTY, os.toByteArray()); + XContentParser parser = xcontentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, os.toByteArray()); assertEquals(Token.START_OBJECT, parser.nextToken()); assertEquals(Token.FIELD_NAME, parser.nextToken()); assertEquals("bar", parser.currentName()); @@ -787,7 +788,8 @@ void doTestRawValue(XContent source) throws Exception { generator.writeRawValue(new BytesArray(rawData)); } - XContentParser parser = xcontentType().xContent().createParser(NamedXContentRegistry.EMPTY, os.toByteArray()); + XContentParser parser = xcontentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, os.toByteArray()); assertEquals(Token.START_OBJECT, parser.nextToken()); assertEquals(Token.FIELD_NAME, parser.nextToken()); assertEquals("foo", parser.currentName()); @@ -803,7 +805,8 @@ void doTestRawValue(XContent source) throws Exception { generator.writeEndObject(); } - parser = xcontentType().xContent().createParser(NamedXContentRegistry.EMPTY, os.toByteArray()); + parser = xcontentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, os.toByteArray()); assertEquals(Token.START_OBJECT, parser.nextToken()); assertEquals(Token.FIELD_NAME, parser.nextToken()); assertEquals("test", parser.currentName()); @@ -831,7 +834,8 @@ protected void doTestBigInteger(JsonGenerator generator, ByteArrayOutputStream o generator.flush(); byte[] serialized = os.toByteArray(); - XContentParser parser = xcontentType().xContent().createParser(NamedXContentRegistry.EMPTY, serialized); + XContentParser parser = xcontentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, serialized); Map map = parser.map(); assertEquals("bar", map.get("foo")); assertEquals(bigInteger, map.get("bigint")); @@ -1011,7 +1015,7 @@ public void testNamedObject() throws IOException { new NamedXContentRegistry.Entry(Object.class, new ParseField("str"), p -> p.text()))); XContentBuilder b = XContentBuilder.builder(xcontentType().xContent()); b.value("test"); - XContentParser p = xcontentType().xContent().createParser(registry, b.bytes()); + XContentParser p = xcontentType().xContent().createParser(registry, LoggingDeprecationHandler.INSTANCE, b.bytes().streamInput()); assertEquals(test1, p.namedObject(Object.class, "test1", null)); assertEquals(test2, p.namedObject(Object.class, "test2", null)); assertEquals(test2, p.namedObject(Object.class, "deprecated", null)); @@ -1030,7 +1034,8 @@ public void testNamedObject() throws IOException { assertEquals("Unknown namedObject category [java.lang.String]", e.getMessage()); } { - XContentParser emptyRegistryParser = xcontentType().xContent().createParser(NamedXContentRegistry.EMPTY, new byte[] {}); + XContentParser emptyRegistryParser = xcontentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new byte[] {}); Exception e = expectThrows(ElasticsearchException.class, () -> emptyRegistryParser.namedObject(String.class, "doesn't matter", null)); assertEquals("namedObject is not supported for this parser", e.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index baa2b3bcb36e6..7b6f14518fecc 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -34,13 +34,14 @@ import java.util.Collections; import java.util.List; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; public class ObjectParserTests extends ESTestCase { public void testBasics() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, + XContentParser parser = createParser(JsonXContent.jsonXContent, "{\n" + " \"test\" : \"foo\",\n" + " \"test_number\" : 2,\n" @@ -449,7 +450,7 @@ public void setString_or_null(String string_or_null) { } public void testParseNamedObject() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, + XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"named\": {\n" + " \"a\": {}" + "}}"); @@ -460,7 +461,7 @@ public void testParseNamedObject() throws IOException { } public void testParseNamedObjectInOrder() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, + XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"named\": [\n" + " {\"a\": {}}" + "]}"); @@ -471,7 +472,7 @@ public void testParseNamedObjectInOrder() throws IOException { } public void testParseNamedObjectTwoFieldsInArray() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, + XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"named\": [\n" + " {\"a\": {}, \"b\": {}}" + "]}"); @@ -483,7 +484,7 @@ public void testParseNamedObjectTwoFieldsInArray() throws IOException { } public void testParseNamedObjectNoFieldsInArray() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, + XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"named\": [\n" + " {}" + "]}"); @@ -495,7 +496,7 @@ public void testParseNamedObjectNoFieldsInArray() throws IOException { } public void testParseNamedObjectJunkInArray() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, + XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"named\": [\n" + " \"junk\"" + "]}"); @@ -594,6 +595,59 @@ class TestStruct { assertEquals(s.test, "foo"); } + public void testArraysOfGenericValues() throws IOException { + XContentParser parser = createParser(JsonXContent.jsonXContent, + "{\n" + + " \"test_array\": [ 1, null, \"3\", 4.2],\n" + + " \"int_array\": [ 1, 2, 3]\n" + + "}"); + class TestStruct { + List testArray = new ArrayList<>(); + + List ints = new ArrayList<>(); + + public void setInts(List ints) { + this.ints = ints; + } + + public void setArray(List testArray) { + this.testArray = testArray; + } + } + ObjectParser objectParser = new ObjectParser<>("foo"); + TestStruct s = new TestStruct(); + + objectParser.declareFieldArray(TestStruct::setArray, (p, c) -> XContentParserUtils.parseFieldsValue(p), + new ParseField("test_array"), ValueType.VALUE_ARRAY); + objectParser.declareIntArray(TestStruct::setInts, new ParseField("int_array")); + objectParser.parse(parser, s, null); + assertEquals(s.testArray, Arrays.asList(1, null, "3", 4.2)); + assertEquals(s.ints, Arrays.asList(1, 2, 3)); + + parser = createParser(JsonXContent.jsonXContent, "{\"test_array\": 42}"); + s = new TestStruct(); + objectParser.parse(parser, s, null); + assertEquals(s.testArray, Collections.singletonList(42)); + + parser = createParser(JsonXContent.jsonXContent, "{\"test_array\": [null]}"); + s = new TestStruct(); + objectParser.parse(parser, s, null); + assertThat(s.testArray, hasSize(1)); + assertNull(s.testArray.get(0)); + + parser = createParser(JsonXContent.jsonXContent, "{\"test_array\": null}"); + s = new TestStruct(); + objectParser.parse(parser, s, null); + assertThat(s.testArray, hasSize(1)); + assertNull(s.testArray.get(0)); + + // Make sure that we didn't break the null handling in arrays that shouldn't support nulls + XContentParser parser2 = createParser(JsonXContent.jsonXContent, "{\"int_array\": [1, null, 3]}"); + TestStruct s2 = new TestStruct(); + ParsingException ex = expectThrows(ParsingException.class, () -> objectParser.parse(parser2, s2, null)); + assertThat(ex.getMessage(), startsWith("[foo] failed to parse field [int_array]")); + } + static class NamedObjectHolder { public static final ObjectParser PARSER = new ObjectParser<>("named_object_holder", NamedObjectHolder::new); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java index 62ce4c58f9c9c..f550e26024d06 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java @@ -166,7 +166,8 @@ public void testParseTypedKeysObject() throws IOException { BytesReference bytes = toXContent((builder, params) -> builder.startObject("name").field("field", 0).endObject(), xContentType, randomBoolean()); - try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { + try (XContentParser parser = xContentType.xContent() + .createParser(namedXContentRegistry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); @@ -181,7 +182,8 @@ public void testParseTypedKeysObject() throws IOException { bytes = toXContent((builder, params) -> builder.startObject("type" + delimiter + "name").field("bool", true).endObject(), xContentType, randomBoolean()); - try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { + try (XContentParser parser = xContentType.xContent() + .createParser(namedXContentRegistry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); @@ -200,7 +202,8 @@ public void testParseTypedKeysObject() throws IOException { return builder; }, xContentType, randomBoolean()); - try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { + try (XContentParser parser = xContentType.xContent() + .createParser(namedXContentRegistry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); @@ -226,7 +229,8 @@ public void testParseTypedKeysObjectErrors() throws IOException { { BytesReference bytes = toXContent((builder, params) -> builder.startObject("name").field("field", 0).endObject(), xContentType, randomBoolean()); - try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, bytes)) { + try (XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ParsingException exception = expectThrows(ParsingException.class, @@ -238,7 +242,8 @@ public void testParseTypedKeysObjectErrors() throws IOException { { BytesReference bytes = toXContent((builder, params) -> builder.startObject("").field("field", 0).endObject(), xContentType, randomBoolean()); - try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, bytes)) { + try (XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java index 219bd12daeaea..3dc1437e4434e 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent.support; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -93,7 +94,8 @@ public void testToXContent() throws IOException { expectThrows(IOException.class, () -> XContentHelper.toXContent(toXContent, xContentType, randomBoolean())); } else { BytesReference bytes = XContentHelper.toXContent(toXContent, xContentType, randomBoolean()); - try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, bytes)) { + try (XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertTrue(parser.nextToken().isValue()); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java index b7ab56f452827..3f6ec53f4f69f 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.xcontent.support.filtering; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -74,8 +75,12 @@ static void assertXContentBuilderAsString(final XContentBuilder expected, final static void assertXContentBuilderAsBytes(final XContentBuilder expected, final XContentBuilder actual) { try { XContent xContent = XContentFactory.xContent(actual.contentType()); - XContentParser jsonParser = xContent.createParser(NamedXContentRegistry.EMPTY, expected.bytes()); - XContentParser testParser = xContent.createParser(NamedXContentRegistry.EMPTY, actual.bytes()); + XContentParser jsonParser = + xContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, expected.bytes().streamInput()); + XContentParser testParser = + xContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, actual.bytes().streamInput()); while (true) { XContentParser.Token token1 = jsonParser.nextToken(); diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index e82c5fafbf31a..543b5e39026fb 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -74,7 +74,7 @@ public class MetaDataStateFormatTests extends ESTestCase { * Ensure we can read a pre-generated cluster state. */ public void testReadClusterState() throws URISyntaxException, IOException { - final MetaDataStateFormat format = new MetaDataStateFormat(randomFrom(XContentType.values()), "global-") { + final MetaDataStateFormat format = new MetaDataStateFormat("global-") { @Override public void toXContent(XContentBuilder builder, MetaData state) throws IOException { @@ -103,7 +103,7 @@ public void testReadWriteState() throws IOException { dirs[i] = createTempDir(); } final long id = addDummyFiles("foo-", dirs); - Format format = new Format(randomFrom(XContentType.values()), "foo-"); + Format format = new Format("foo-"); DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); int version = between(0, Integer.MAX_VALUE/2); format.write(state, dirs); @@ -145,7 +145,7 @@ public void testVersionMismatch() throws IOException { } final long id = addDummyFiles("foo-", dirs); - Format format = new Format(randomFrom(XContentType.values()), "foo-"); + Format format = new Format("foo-"); DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); int version = between(0, Integer.MAX_VALUE/2); format.write(state, dirs); @@ -169,7 +169,7 @@ public void testCorruption() throws IOException { dirs[i] = createTempDir(); } final long id = addDummyFiles("foo-", dirs); - Format format = new Format(randomFrom(XContentType.values()), "foo-"); + Format format = new Format("foo-"); DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); int version = between(0, Integer.MAX_VALUE/2); format.write(state, dirs); @@ -244,17 +244,16 @@ public void testLoadState() throws IOException { meta.add(randomMeta()); } Set corruptedFiles = new HashSet<>(); - MetaDataStateFormat format = metaDataFormat(randomFrom(XContentType.values())); + MetaDataStateFormat format = metaDataFormat(); for (int i = 0; i < dirs.length; i++) { dirs[i] = createTempDir(); Files.createDirectories(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME)); for (int j = 0; j < numLegacy; j++) { - XContentType type = format.format(); if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) { Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-"+j); Files.createFile(file); // randomly create 0-byte files -- there is extra logic to skip them } else { - try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(type, + try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(MetaDataStateFormat.FORMAT, Files.newOutputStream(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j)))) { xcontentBuilder.startObject(); MetaData.Builder.toXContent(meta.get(j), xcontentBuilder, ToXContent.EMPTY_PARAMS); @@ -309,8 +308,8 @@ public void testLoadState() throws IOException { } } - private static MetaDataStateFormat metaDataFormat(XContentType format) { - return new MetaDataStateFormat(format, MetaData.GLOBAL_STATE_FILE_PREFIX) { + private static MetaDataStateFormat metaDataFormat() { + return new MetaDataStateFormat(MetaData.GLOBAL_STATE_FILE_PREFIX) { @Override public void toXContent(XContentBuilder builder, MetaData state) throws IOException { MetaData.Builder.toXContent(state, builder, ToXContent.EMPTY_PARAMS); @@ -347,8 +346,8 @@ private IndexMetaData.Builder indexBuilder(String index) throws IOException { private class Format extends MetaDataStateFormat { - Format(XContentType format, String prefix) { - super(format, prefix); + Format(String prefix) { + super(prefix); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index df6aa962f317a..bd2170dc1eee8 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -194,25 +194,46 @@ public void testRefreshActuallyWorks() throws Exception { IndexService.AsyncRefreshTask refreshTask = indexService.getRefreshTask(); assertEquals(1000, refreshTask.getInterval().millis()); assertTrue(indexService.getRefreshTask().mustReschedule()); - - // now disable - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build(); - indexService.updateMetaData(metaData); - client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); IndexShard shard = indexService.getShard(0); - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(0, search.totalHits); - } - // refresh every millisecond - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).build(); + client().prepareIndex("test", "test", "0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + // now disable the refresh + IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()) + .settings(Settings.builder().put(indexService.getMetaData().getSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build(); + // when we update we reschedule the existing task AND fire off an async refresh to make sure we make everything visible + // before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible indexService.updateMetaData(metaData); + assertTrue(refreshTask.isClosed()); + refreshTask = indexService.getRefreshTask(); assertBusy(() -> { + // this one either becomes visible due to a concurrently running scheduled refresh OR due to the force refresh + // we are running on updateMetaData if the interval changes try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); assertEquals(1, search.totalHits); - } catch (IOException e) { - fail(e.getMessage()); + } + }); + assertFalse(refreshTask.isClosed()); + // refresh every millisecond + client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + metaData = IndexMetaData.builder(indexService.getMetaData()) + .settings(Settings.builder().put(indexService.getMetaData().getSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).build(); + indexService.updateMetaData(metaData); + assertTrue(refreshTask.isClosed()); + assertBusy(() -> { + // this one becomes visible due to the force refresh we are running on updateMetaData if the interval changes + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(2, search.totalHits); + } + }); + client().prepareIndex("test", "test", "2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + assertBusy(() -> { + // this one becomes visible due to the scheduled refresh + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); + assertEquals(3, search.totalHits); } }); } diff --git a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java index 301d4e3cfa360..e9eb5d8b83d2e 100644 --- a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java @@ -26,7 +26,6 @@ import org.apache.logging.log4j.core.filter.RegexFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -72,8 +71,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - ServerLoggers.addAppender(settingsLogger, mockAppender); - ServerLoggers.setLevel(settingsLogger, Level.TRACE); + Loggers.addAppender(settingsLogger, mockAppender); + Loggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -92,9 +91,9 @@ public void testUpdateAutoThrottleSettings() throws Exception { assertTrue(mockAppender.sawUpdateAutoThrottle); assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), false); } finally { - ServerLoggers.removeAppender(settingsLogger, mockAppender); + Loggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - ServerLoggers.setLevel(settingsLogger, (Level) null); + Loggers.setLevel(settingsLogger, (Level) null); } } @@ -103,8 +102,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - ServerLoggers.addAppender(settingsLogger, mockAppender); - ServerLoggers.setLevel(settingsLogger, Level.TRACE); + Loggers.addAppender(settingsLogger, mockAppender); + Loggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -124,9 +123,9 @@ public void testUpdateMergeMaxThreadCount() throws Exception { // Make sure we log the change: assertTrue(mockAppender.sawUpdateMaxThreadCount); } finally { - ServerLoggers.removeAppender(settingsLogger, mockAppender); + Loggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - ServerLoggers.setLevel(settingsLogger, (Level) null); + Loggers.setLevel(settingsLogger, (Level) null); } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index f663504da9fa7..4588010fe9c63 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -131,10 +131,15 @@ public void testAcquireIndexCommit() throws Exception { assertThat(snapshot.getUserData(), equalTo(commitList.get(commitList.size() - 1).getUserData())); } } - randomSubsetOf(snapshottingCommits).forEach(snapshot -> { + final List releasingSnapshots = randomSubsetOf(snapshottingCommits); + for (IndexCommit snapshot : releasingSnapshots) { snapshottingCommits.remove(snapshot); - indexPolicy.releaseCommit(snapshot); - }); + final long pendingSnapshots = snapshottingCommits.stream().filter(snapshot::equals).count(); + final IndexCommit lastCommit = commitList.get(commitList.size() - 1); + final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); + assertThat(indexPolicy.releaseCommit(snapshot), + equalTo(pendingSnapshots == 0 && snapshot.equals(lastCommit) == false && snapshot.equals(safeCommit) == false)); + } // Snapshotting commits must not be deleted. snapshottingCommits.forEach(snapshot -> assertThat(snapshot.isDeleted(), equalTo(false))); // We don't need to retain translog for snapshotting commits. diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index bb688e59fa653..300bad30da373 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -81,7 +81,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.uid.Versions; @@ -109,8 +108,8 @@ import org.elasticsearch.index.mapper.RootObjectMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.ShardId; @@ -1021,25 +1020,25 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { } engine.flush(); - assertThat(engine.getTranslog().currentFileGeneration(), equalTo(2L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 2L : 1L)); - assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(2L)); + assertThat(engine.getTranslog().currentFileGeneration(), equalTo(3L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 2L)); + assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(3L)); engine.flush(); - assertThat(engine.getTranslog().currentFileGeneration(), equalTo(2L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 2L : 1L)); - assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(2L)); - - engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(3L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 1L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 2L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(3L)); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(4L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(4L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 4L : 2L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(4L)); + + globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + engine.flush(true, true); + assertThat(engine.getTranslog().currentFileGeneration(), equalTo(5L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(5L)); + assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(5L)); } public void testSyncedFlush() throws IOException { @@ -1808,8 +1807,8 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); - ServerLoggers.addAppender(rootLogger, mockAppender); - ServerLoggers.setLevel(rootLogger, Level.DEBUG); + Loggers.addAppender(rootLogger, mockAppender); + Loggers.setLevel(rootLogger, Level.DEBUG); rootLogger = LogManager.getRootLogger(); try { @@ -1820,15 +1819,15 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti assertFalse(mockAppender.sawIndexWriterMessage); // Again, with TRACE, which should log IndexWriter output: - ServerLoggers.setLevel(rootLogger, Level.TRACE); + Loggers.setLevel(rootLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertTrue(mockAppender.sawIndexWriterMessage); } finally { - ServerLoggers.removeAppender(rootLogger, mockAppender); + Loggers.removeAppender(rootLogger, mockAppender); mockAppender.stop(); - ServerLoggers.setLevel(rootLogger, savedLevel); + Loggers.setLevel(rootLogger, savedLevel); } } @@ -1958,9 +1957,9 @@ public void testSeqNoAndCheckpoints() throws IOException { // this test writes documents to the engine while concurrently flushing/commit // and ensuring that the commit points contain the correct sequence number data public void testConcurrentWritesAndCommits() throws Exception { - List commits = new ArrayList<>(); try (Store store = createStore(); InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) { + final List commits = new ArrayList<>(); final int numIndexingThreads = scaledRandomIntBetween(2, 4); final int numDocsPerThread = randomIntBetween(500, 1000); @@ -2000,7 +1999,7 @@ public void testConcurrentWritesAndCommits() throws Exception { boolean doneIndexing; do { doneIndexing = doneLatch.await(sleepTime, TimeUnit.MILLISECONDS); - commits.add(engine.acquireIndexCommit(false, true)); + commits.add(engine.acquireLastIndexCommit(true)); if (commits.size() > commitLimit) { // don't keep on piling up too many commits IOUtils.close(commits.remove(randomIntBetween(0, commits.size()-1))); // we increase the wait time to make sure we eventually if things are slow wait for threads to finish. @@ -2045,8 +2044,6 @@ public void testConcurrentWritesAndCommits() throws Exception { prevLocalCheckpoint = localCheckpoint; prevMaxSeqNo = maxSeqNo; } - } finally { - IOUtils.close(commits); } } @@ -2098,8 +2095,8 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD"); - ServerLoggers.addAppender(iwIFDLogger, mockAppender); - ServerLoggers.setLevel(iwIFDLogger, Level.DEBUG); + Loggers.addAppender(iwIFDLogger, mockAppender); + Loggers.setLevel(iwIFDLogger, Level.DEBUG); try { // First, with DEBUG, which should NOT log IndexWriter output: @@ -2110,16 +2107,16 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce assertFalse(mockAppender.sawIndexWriterIFDMessage); // Again, with TRACE, which should only log IndexWriter IFD output: - ServerLoggers.setLevel(iwIFDLogger, Level.TRACE); + Loggers.setLevel(iwIFDLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); assertTrue(mockAppender.sawIndexWriterIFDMessage); } finally { - ServerLoggers.removeAppender(iwIFDLogger, mockAppender); + Loggers.removeAppender(iwIFDLogger, mockAppender); mockAppender.stop(); - ServerLoggers.setLevel(iwIFDLogger, (Level) null); + Loggers.setLevel(iwIFDLogger, (Level) null); } } @@ -2475,9 +2472,11 @@ public void testRecoverFromForeignTranslog() throws IOException { Translog.TranslogGeneration generation = engine.getTranslog().getGeneration(); engine.close(); + final Path badTranslogLog = createTempDir(); + final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId); Translog translog = new Translog( - new TranslogConfig(shardId, createTempDir(), INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED); translog.add(new Translog.Index("test", "SomeBogusId", 0, "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); @@ -2699,7 +2698,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); } } @@ -2710,14 +2709,14 @@ public void testCurrentTranslogIDisCommitted() throws IOException { assertTrue(engine.isRecovering()); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); if (i == 0) { - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } else { - assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("4", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); engine.recoverFromTranslog(); userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("4", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); } } @@ -2726,10 +2725,10 @@ public void testCurrentTranslogIDisCommitted() throws IOException { { try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); - assertEquals(1, engine.getTranslog().currentFileGeneration()); + assertEquals(2, engine.getTranslog().currentFileGeneration()); assertEquals(0L, engine.getTranslog().uncommittedOperations()); } } @@ -2739,11 +2738,11 @@ public void testCurrentTranslogIDisCommitted() throws IOException { for (int i = 0; i < 2; i++) { try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); engine.recoverFromTranslog(); userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("no changes - nothing to commit", "2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); } } @@ -4115,6 +4114,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } }) { int numDocs = scaledRandomIntBetween(10, 100); + final String translogUUID = engine.getTranslog().getTranslogUUID(); for (int docId = 0; docId < numDocs; docId++) { ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); @@ -4124,7 +4124,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s engine.getTranslog().sync(); } if (frequently()) { - final long lastSyncedGlobalCheckpoint = Translog.readGlobalCheckpoint(translogPath); + final long lastSyncedGlobalCheckpoint = Translog.readGlobalCheckpoint(translogPath, translogUUID); engine.flush(randomBoolean(), true); final List commits = DirectoryReader.listCommits(store.directory()); // Keep only one safe commit as the oldest commit. @@ -4210,13 +4210,17 @@ public void testAcquireIndexCommit() throws Exception { for (int i = 0; i < numDocs; i++) { index(engine, i); } - final boolean inSync = randomBoolean(); - if (inSync) { + if (randomBoolean()) { globalCheckpoint.set(numDocs - 1); } final boolean flushFirst = randomBoolean(); final boolean safeCommit = randomBoolean(); - Engine.IndexCommitRef commit = engine.acquireIndexCommit(safeCommit, flushFirst); + final Engine.IndexCommitRef snapshot; + if (safeCommit) { + snapshot = engine.acquireSafeIndexCommit(); + } else { + snapshot = engine.acquireLastIndexCommit(flushFirst); + } int moreDocs = between(1, 20); for (int i = 0; i < moreDocs; i++) { index(engine, numDocs + i); @@ -4224,11 +4228,11 @@ public void testAcquireIndexCommit() throws Exception { globalCheckpoint.set(numDocs + moreDocs - 1); engine.flush(); // check that we can still read the commit that we captured - try (IndexReader reader = DirectoryReader.open(commit.getIndexCommit())) { - assertThat(reader.numDocs(), equalTo(flushFirst && (safeCommit == false || inSync) ? numDocs : 0)); + try (IndexReader reader = DirectoryReader.open(snapshot.getIndexCommit())) { + assertThat(reader.numDocs(), equalTo(flushFirst && safeCommit == false ? numDocs : 0)); } assertThat(DirectoryReader.listCommits(engine.store.directory()), hasSize(2)); - commit.close(); + snapshot.close(); // check it's clean up engine.flush(true, true); assertThat(DirectoryReader.listCommits(engine.store.directory()), hasSize(1)); @@ -4284,7 +4288,7 @@ public void testOpenIndexCreateTranslogKeepOnlyLastCommit() throws Exception { assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(lastCommit.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))); // Translog tags should be fresh. assertThat(userData.get(Translog.TRANSLOG_UUID_KEY), not(equalTo(lastCommit.get(Translog.TRANSLOG_UUID_KEY)))); - assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("1")); + assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("2")); } } @@ -4313,6 +4317,37 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { } } + public void testCleanupCommitsWhenReleaseSnapshot() throws Exception { + IOUtils.close(engine, store); + store = createStore(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + final int numDocs = scaledRandomIntBetween(10, 100); + for (int docId = 0; docId < numDocs; docId++) { + index(engine, docId); + if (frequently()) { + engine.flush(randomBoolean(), randomBoolean()); + } + } + engine.flush(false, randomBoolean()); + int numSnapshots = between(1, 10); + final List snapshots = new ArrayList<>(); + for (int i = 0; i < numSnapshots; i++) { + snapshots.add(engine.acquireSafeIndexCommit()); // taking snapshots from the safe commit. + } + globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + engine.syncTranslog(); + final List commits = DirectoryReader.listCommits(store.directory()); + for (int i = 0; i < numSnapshots - 1; i++) { + snapshots.get(i).close(); + // pending snapshots - should not release any commit. + assertThat(DirectoryReader.listCommits(store.directory()), equalTo(commits)); + } + snapshots.get(numSnapshots - 1).close(); // release the last snapshot - delete all except the last commit + assertThat(DirectoryReader.listCommits(store.directory()), hasSize(1)); + } + } + public void testShouldPeriodicallyFlush() throws Exception { assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false)); int numDocs = between(10, 100); @@ -4345,4 +4380,60 @@ public void testShouldPeriodicallyFlush() throws Exception { assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo))); assertThat(engine.getTranslog().uncommittedOperations(), equalTo(0)); } + + + public void testStressUpdateSameDocWhileGettingIt() throws IOException, InterruptedException { + final int iters = randomIntBetween(1, 15); + for (int i = 0; i < iters; i++) { + // this is a reproduction of https://github.com/elastic/elasticsearch/issues/28714 + try (Store store = createStore(); InternalEngine engine = createEngine(store, createTempDir())) { + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), TimeValue.timeValueMillis(1))).build(); + engine.engineConfig.getIndexSettings().updateIndexMetaData(indexMetaData); + engine.onSettingsChanged(); + ParsedDocument document = testParsedDocument(Integer.toString(0), null, testDocumentWithTextField(), SOURCE, null); + final Engine.Index doc = new Engine.Index(newUid(document), document, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false); + // first index an append only document and then delete it. such that we have it in the tombstones + engine.index(doc); + engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid())); + + // now index more append only docs and refresh so we re-enabel the optimization for unsafe version map + ParsedDocument document1 = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null); + engine.index(new Engine.Index(newUid(document1), document1, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false)); + engine.refresh("test"); + ParsedDocument document2 = testParsedDocument(Integer.toString(2), null, testDocumentWithTextField(), SOURCE, null); + engine.index(new Engine.Index(newUid(document2), document2, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false)); + engine.refresh("test"); + ParsedDocument document3 = testParsedDocument(Integer.toString(3), null, testDocumentWithTextField(), SOURCE, null); + final Engine.Index doc3 = new Engine.Index(newUid(document3), document3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false); + engine.index(doc3); + engine.engineConfig.setEnableGcDeletes(true); + // once we are here the version map is unsafe again and we need to do a refresh inside the get calls to ensure we + // de-optimize. We also enabled GCDeletes which now causes pruning tombstones inside that refresh that is done internally + // to ensure we de-optimize. One get call will purne and the other will try to lock the version map concurrently while + // holding the lock that pruneTombstones needs and we have a deadlock + CountDownLatch awaitStarted = new CountDownLatch(1); + Thread thread = new Thread(() -> { + awaitStarted.countDown(); + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc3.type(), doc3.id(), doc3.uid()), + engine::acquireSearcher)) { + assertTrue(getResult.exists()); + } + }); + thread.start(); + awaitStarted.await(); + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc.type(), doc.id(), doc.uid()), + engine::acquireSearcher)) { + assertFalse(getResult.exists()); + } + thread.join(); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 96ed042354bb2..8bfe256fe0b8a 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.Assertions; import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -37,6 +36,9 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.StreamSupport; public class LiveVersionMapTests extends ESTestCase { @@ -84,19 +86,21 @@ private BytesRef uid(String string) { public void testBasics() throws IOException { LiveVersionMap map = new LiveVersionMap(); try (Releasable r = map.acquireLock(uid("test"))) { - map.putUnderLock(uid("test"), new VersionValue(1, 1, 1)); - assertEquals(new VersionValue(1, 1, 1), map.getUnderLock(uid("test"))); + map.putUnderLock(uid("test"), new VersionValue(1,1,1)); + assertEquals(new VersionValue(1,1,1), map.getUnderLock(uid("test"))); map.beforeRefresh(); - assertEquals(new VersionValue(1, 1, 1), map.getUnderLock(uid("test"))); + assertEquals(new VersionValue(1,1,1), map.getUnderLock(uid("test"))); map.afterRefresh(randomBoolean()); assertNull(map.getUnderLock(uid("test"))); - map.putUnderLock(uid("test"), new DeleteVersionValue(1, 1, 1, Long.MAX_VALUE)); - assertEquals(new DeleteVersionValue(1, 1, 1, Long.MAX_VALUE), map.getUnderLock(uid("test"))); + + + map.putUnderLock(uid("test"), new DeleteVersionValue(1,1,1,1)); + assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.beforeRefresh(); - assertEquals(new DeleteVersionValue(1, 1, 1, Long.MAX_VALUE), map.getUnderLock(uid("test"))); + assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.afterRefresh(randomBoolean()); - assertEquals(new DeleteVersionValue(1, 1, 1, Long.MAX_VALUE), map.getUnderLock(uid("test"))); - map.removeTombstoneUnderLock(uid("test")); + assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); + map.pruneTombstones(2, 0); assertNull(map.getUnderLock(uid("test"))); } } @@ -109,6 +113,7 @@ public void testConcurrently() throws IOException, InterruptedException { } List keyList = new ArrayList<>(keySet); ConcurrentHashMap values = new ConcurrentHashMap<>(); + ConcurrentHashMap deletes = new ConcurrentHashMap<>(); LiveVersionMap map = new LiveVersionMap(); int numThreads = randomIntBetween(2, 5); @@ -116,6 +121,8 @@ public void testConcurrently() throws IOException, InterruptedException { CountDownLatch startGun = new CountDownLatch(numThreads); CountDownLatch done = new CountDownLatch(numThreads); int randomValuesPerThread = randomIntBetween(5000, 20000); + AtomicLong clock = new AtomicLong(0); + AtomicLong lastPrunedTimestamp = new AtomicLong(-1); for (int j = 0; j < threads.length; j++) { threads[j] = new Thread(() -> { startGun.countDown(); @@ -128,33 +135,39 @@ public void testConcurrently() throws IOException, InterruptedException { try { for (int i = 0; i < randomValuesPerThread; ++i) { BytesRef bytesRef = randomFrom(random(), keyList); + final long clockTick = clock.get(); try (Releasable r = map.acquireLock(bytesRef)) { VersionValue versionValue = values.computeIfAbsent(bytesRef, v -> new VersionValue(randomLong(), randomLong(), randomLong())); boolean isDelete = versionValue instanceof DeleteVersionValue; if (isDelete) { map.removeTombstoneUnderLock(bytesRef); + deletes.remove(bytesRef); } if (isDelete == false && rarely()) { versionValue = new DeleteVersionValue(versionValue.version + 1, versionValue.seqNo + 1, - versionValue.term, Long.MAX_VALUE); + versionValue.term, clock.getAndIncrement()); + deletes.put(bytesRef, (DeleteVersionValue) versionValue); } else { versionValue = new VersionValue(versionValue.version + 1, versionValue.seqNo + 1, versionValue.term); } values.put(bytesRef, versionValue); map.putUnderLock(bytesRef, versionValue); } + if (rarely()) { + map.pruneTombstones(clockTick, 0); + // timestamp we pruned the deletes + lastPrunedTimestamp.updateAndGet(prev -> Math.max(clockTick, prev)); // make sure we track the latest + } } } finally { done.countDown(); } }); threads[j].start(); - - } do { - Map valueMap = new HashMap<>(map.getAllCurrent()); + final Map valueMap = new HashMap<>(map.getAllCurrent()); map.beforeRefresh(); valueMap.forEach((k, v) -> { try (Releasable r = map.acquireLock(k)) { @@ -190,13 +203,33 @@ public void testConcurrently() throws IOException, InterruptedException { assertNotNull(versionValue); assertEquals(v, versionValue); }); + Runnable assertTombstones = () -> + map.getAllTombstones().entrySet().forEach(e -> { + VersionValue versionValue = values.get(e.getKey()); + assertNotNull(versionValue); + assertEquals(e.getValue(), versionValue); + assertTrue(versionValue instanceof DeleteVersionValue); + }); + assertTombstones.run(); + map.beforeRefresh(); + assertTombstones.run(); + map.afterRefresh(false); + assertTombstones.run(); - map.getAllTombstones().forEach(e -> { - VersionValue versionValue = values.get(e.getKey()); - assertNotNull(versionValue); - assertEquals(e.getValue(), versionValue); - assertTrue(versionValue instanceof DeleteVersionValue); + deletes.entrySet().forEach(e -> { + try (Releasable r = map.acquireLock(e.getKey())) { + VersionValue value = map.getUnderLock(e.getKey()); + // here we keep track of the deletes and ensure that all deletes that are not visible anymore ie. not in the map + // have a timestamp that is smaller or equal to the maximum timestamp that we pruned on + if (value == null) { + assertTrue(e.getValue().time + " > " + lastPrunedTimestamp.get(), e.getValue().time <= lastPrunedTimestamp.get()); + } else { + assertEquals(value, e.getValue()); + } + } }); + map.pruneTombstones(clock.incrementAndGet(), 0); + assertEquals(0, StreamSupport.stream(map.getAllTombstones().entrySet().spliterator(), false).count()); } public void testCarryOnSafeAccess() throws IOException { @@ -258,4 +291,84 @@ public void testRefreshTransition() throws IOException { assertTrue(map.isSafeAccessRequired()); } } + + public void testAddAndDeleteRefreshConcurrently() throws IOException, InterruptedException { + LiveVersionMap map = new LiveVersionMap(); + int numIters = randomIntBetween(1000, 5000); + AtomicBoolean done = new AtomicBoolean(false); + AtomicLong version = new AtomicLong(); + CountDownLatch start = new CountDownLatch(2); + BytesRef uid = uid("1"); + VersionValue initialVersion = new VersionValue(version.incrementAndGet(), 1, 1); + try (Releasable ignore = map.acquireLock(uid)) { + map.putUnderLock(uid, initialVersion); + } + Thread t = new Thread(() -> { + start.countDown(); + try { + start.await(); + VersionValue nextVersionValue = initialVersion; + for (int i = 0; i < numIters; i++) { + try (Releasable ignore = map.acquireLock(uid)) { + VersionValue underLock = map.getUnderLock(uid); + if (underLock != null) { + assertEquals(underLock, nextVersionValue); + } else { + underLock = nextVersionValue; + } + if (underLock.isDelete()) { + nextVersionValue = new VersionValue(version.incrementAndGet(), 1, 1); + } else if (randomBoolean()) { + nextVersionValue = new VersionValue(version.incrementAndGet(), 1, 1); + } else { + nextVersionValue = new DeleteVersionValue(version.incrementAndGet(), 1, 1, 0); + } + map.putUnderLock(uid, nextVersionValue); + } + } + } catch (Exception e) { + throw new AssertionError(e); + } finally { + done.set(true); + } + }); + t.start(); + start.countDown(); + while(done.get() == false) { + map.beforeRefresh(); + Thread.yield(); + map.afterRefresh(false); + } + t.join(); + + try (Releasable ignore = map.acquireLock(uid)) { + VersionValue underLock = map.getUnderLock(uid); + if (underLock != null) { + assertEquals(version.get(), underLock.version); + } + } + } + + public void testPruneTombstonesWhileLocked() throws InterruptedException, IOException { + LiveVersionMap map = new LiveVersionMap(); + BytesRef uid = uid("1"); + ; + try (Releasable ignore = map.acquireLock(uid)) { + map.putUnderLock(uid, new DeleteVersionValue(0, 0, 0, 0)); + map.beforeRefresh(); // refresh otherwise we won't prune since it's tracked by the current map + map.afterRefresh(false); + Thread thread = new Thread(() -> { + map.pruneTombstones(Long.MAX_VALUE, 0); + }); + thread.start(); + thread.join(); + assertEquals(1, map.getAllTombstones().size()); + } + Thread thread = new Thread(() -> { + map.pruneTombstones(Long.MAX_VALUE, 0); + }); + thread.start(); + thread.join(); + assertEquals(0, map.getAllTombstones().size()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 4cb9e155c23ba..b4b16537ec22c 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -536,7 +536,7 @@ public void onFailure(Exception e) { listener.onFailure(e); } }, - ThreadPool.Names.INDEX); + ThreadPool.Names.INDEX, request); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java index ab5192dfc3e32..3d961d7f422c0 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java @@ -35,6 +35,7 @@ import org.junit.BeforeClass; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; @@ -48,10 +49,15 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import java.util.stream.Collectors; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; @@ -122,7 +128,7 @@ public void onResponse(Releasable releasable) { public void run() { latch.countDown(); try { - permits.acquire(future, threadPoolName, forceExecution); + permits.acquire(future, threadPoolName, forceExecution, ""); } catch (DummyException dummyException) { // ok, notify future assertTrue(failingListener); @@ -176,7 +182,7 @@ public void run() { public void testOperationsInvokedImmediatelyIfNoBlock() throws ExecutionException, InterruptedException { PlainActionFuture future = new PlainActionFuture<>(); - permits.acquire(future, ThreadPool.Names.GENERIC, true); + permits.acquire(future, ThreadPool.Names.GENERIC, true, ""); assertTrue(future.isDone()); future.get().close(); } @@ -184,7 +190,7 @@ public void testOperationsInvokedImmediatelyIfNoBlock() throws ExecutionExceptio public void testOperationsIfClosed() throws ExecutionException, InterruptedException { PlainActionFuture future = new PlainActionFuture<>(); permits.close(); - permits.acquire(future, ThreadPool.Names.GENERIC, true); + permits.acquire(future, ThreadPool.Names.GENERIC, true, ""); ExecutionException exception = expectThrows(ExecutionException.class, future::get); assertThat(exception.getCause(), instanceOf(IndexShardClosedException.class)); } @@ -198,7 +204,7 @@ public void testBlockIfClosed() throws ExecutionException, InterruptedException public void testOperationsDelayedIfBlock() throws ExecutionException, InterruptedException, TimeoutException { PlainActionFuture future = new PlainActionFuture<>(); try (Releasable ignored = blockAndWait()) { - permits.acquire(future, ThreadPool.Names.GENERIC, true); + permits.acquire(future, ThreadPool.Names.GENERIC, true, ""); assertFalse(future.isDone()); } future.get(1, TimeUnit.HOURS).close(); @@ -245,8 +251,8 @@ public void onResponse(Releasable releasable) { context.putHeader("foo", "bar"); context.putTransient("bar", "baz"); // test both with and without a executor name - permits.acquire(future, ThreadPool.Names.GENERIC, true); - permits.acquire(future2, null, true); + permits.acquire(future, ThreadPool.Names.GENERIC, true, ""); + permits.acquire(future2, null, true, ""); } assertFalse(future.isDone()); } @@ -329,7 +335,7 @@ public void onFailure(Exception e) { } }, ThreadPool.Names.GENERIC, - false)); + false, "")); thread.start(); assertFalse(delayed.get()); releaseBlock.countDown(); @@ -387,7 +393,7 @@ public void onFailure(Exception e) { } }, ThreadPool.Names.GENERIC, - false); + false, ""); }); secondOperationThread.start(); @@ -436,7 +442,7 @@ public void onFailure(Exception e) { } }, ThreadPool.Names.GENERIC, - false); + false, ""); }); thread.start(); threads.add(thread); @@ -490,12 +496,12 @@ public void onFailure(Exception e) { public void testActiveOperationsCount() throws ExecutionException, InterruptedException { PlainActionFuture future1 = new PlainActionFuture<>(); - permits.acquire(future1, ThreadPool.Names.GENERIC, true); + permits.acquire(future1, ThreadPool.Names.GENERIC, true, ""); assertTrue(future1.isDone()); assertThat(permits.getActiveOperationsCount(), equalTo(1)); PlainActionFuture future2 = new PlainActionFuture<>(); - permits.acquire(future2, ThreadPool.Names.GENERIC, true); + permits.acquire(future2, ThreadPool.Names.GENERIC, true, ""); assertTrue(future2.isDone()); assertThat(permits.getActiveOperationsCount(), equalTo(2)); @@ -511,7 +517,7 @@ public void testActiveOperationsCount() throws ExecutionException, InterruptedEx } PlainActionFuture future3 = new PlainActionFuture<>(); - permits.acquire(future3, ThreadPool.Names.GENERIC, true); + permits.acquire(future3, ThreadPool.Names.GENERIC, true, ""); assertTrue(future3.isDone()); assertThat(permits.getActiveOperationsCount(), equalTo(1)); future3.get().close(); @@ -594,7 +600,7 @@ public void onFailure(Exception e) { } }, ThreadPool.Names.GENERIC, - false)); + false, "")); assertThat(e, hasToString(containsString("failed to obtain permit but operations are not delayed"))); permits.semaphore.release(IndexShardOperationPermits.TOTAL_PERMITS); } @@ -645,8 +651,37 @@ public void onFailure(Exception e) { } }, ThreadPool.Names.GENERIC, - false); + false, ""); }; } + public void testPermitTraceCapturing() throws ExecutionException, InterruptedException { + final PlainActionFuture listener1 = new PlainActionFuture<>(); + permits.acquire(listener1, null, false, "listener1"); + final PlainActionFuture listener2 = new PlainActionFuture<>(); + permits.acquire(listener2, null, false, "listener2"); + + assertThat(permits.getActiveOperationsCount(), equalTo(2)); + List messages = permits.getActiveOperations().stream().collect(Collectors.toList()); + assertThat(messages, hasSize(2)); + assertThat(messages, containsInAnyOrder(Arrays.asList(containsString("listener1"), containsString("listener2")))); + + if (randomBoolean()) { + listener1.get().close(); + assertThat(permits.getActiveOperationsCount(), equalTo(1)); + messages = permits.getActiveOperations().stream().collect(Collectors.toList()); + assertThat(messages, hasSize(1)); + assertThat(messages, contains(containsString("listener2"))); + listener2.get().close(); + } else { + listener2.get().close(); + assertThat(permits.getActiveOperationsCount(), equalTo(1)); + messages = permits.getActiveOperations().stream().collect(Collectors.toList()); + assertThat(messages, hasSize(1)); + assertThat(messages, contains(containsString("listener1"))); + listener1.get().close(); + } + assertThat(permits.getActiveOperationsCount(), equalTo(0)); + assertThat(permits.getActiveOperations(), emptyIterable()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index f629355beadc9..36740f3aa3cc1 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -151,7 +151,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; @@ -283,14 +282,14 @@ public void testClosesPreventsNewOperations() throws InterruptedException, Execu closeShards(indexShard); assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); try { - indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX); + indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, ""); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected } try { indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null, - ThreadPool.Names.INDEX); + ThreadPool.Names.INDEX, ""); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected @@ -301,7 +300,7 @@ public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOExc IndexShard indexShard = newShard(false); expectThrows(IndexShardNotStartedException.class, () -> indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100), - SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX)); + SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX, "")); closeShards(indexShard); } @@ -314,6 +313,7 @@ public void testPrimaryPromotionDelaysOperations() throws IOException, BrokenBar final CountDownLatch operationLatch = new CountDownLatch(1); final List threads = new ArrayList<>(); for (int i = 0; i < operations; i++) { + final String id = "t_" + i; final Thread thread = new Thread(() -> { try { barrier.await(); @@ -340,7 +340,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }, - ThreadPool.Names.INDEX); + ThreadPool.Names.INDEX, id); }); thread.start(); threads.add(thread); @@ -370,6 +370,7 @@ public void onFailure(Exception e) { final AtomicLong counter = new AtomicLong(); final List delayedThreads = new ArrayList<>(); for (int i = 0; i < delayedOperations; i++) { + final String id = "d_" + i; final Thread thread = new Thread(() -> { try { delayedOperationsBarrier.await(); @@ -390,7 +391,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }, - ThreadPool.Names.INDEX); + ThreadPool.Names.INDEX, id); }); thread.start(); delayedThreads.add(thread); @@ -504,7 +505,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }, - ThreadPool.Names.GENERIC); + ThreadPool.Names.GENERIC, ""); latch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo((long) maxSeqNo)); @@ -548,7 +549,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }, - ThreadPool.Names.GENERIC); + ThreadPool.Names.GENERIC, ""); latch.await(); assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1)); @@ -581,7 +582,7 @@ public void testOperationPermitsOnPrimaryShards() throws InterruptedException, E assertEquals(0, indexShard.getActiveOperationsCount()); if (indexShard.routingEntry().isRelocationTarget() == false) { try { - indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.INDEX); + indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.INDEX, ""); fail("shard shouldn't accept operations as replica"); } catch (IllegalStateException ignored) { @@ -600,14 +601,14 @@ public void testOperationPermitsOnPrimaryShards() throws InterruptedException, E private Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); - indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX); + indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, ""); return fut.get(); } private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); - indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.INDEX); + indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.INDEX, ""); return fut.get(); } @@ -654,7 +655,8 @@ public void testOperationPermitOnReplicaShards() throws Exception { assertEquals(0, indexShard.getActiveOperationsCount()); if (shardRouting.primary() == false) { final IllegalStateException e = - expectThrows(IllegalStateException.class, () -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX)); + expectThrows(IllegalStateException.class, + () -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, "")); assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary"))); } @@ -691,7 +693,7 @@ public void onFailure(Exception e) { }; indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbers.UNASSIGNED_SEQ_NO, onLockAcquired, - ThreadPool.Names.INDEX); + ThreadPool.Names.INDEX, ""); assertFalse(onResponse.get()); assertTrue(onFailure.get()); @@ -763,7 +765,7 @@ private void finish() { newPrimaryTerm, newGlobalCheckPoint, listener, - ThreadPool.Names.SAME); + ThreadPool.Names.SAME, ""); } catch (Exception e) { listener.onFailure(e); } @@ -902,7 +904,7 @@ public void onFailure(Exception e) { } }, - ThreadPool.Names.SAME); + ThreadPool.Names.SAME, ""); latch.await(); @@ -956,7 +958,7 @@ public void onFailure(final Exception e) { } }, - ThreadPool.Names.SAME); + ThreadPool.Names.SAME, ""); latch.await(); if (globalCheckpointOnReplica == SequenceNumbers.UNASSIGNED_SEQ_NO @@ -1008,7 +1010,7 @@ public void onFailure(Exception e) { latch.countDown(); } }, - ThreadPool.Names.INDEX); + ThreadPool.Names.INDEX, ""); }; final long firstIncrement = 1 + (randomBoolean() ? 0 : 1); @@ -1146,12 +1148,12 @@ public void testShardStats() throws IOException { assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath()); - if (randomBoolean() || true) { // try to serialize it to ensure values survive the serialization - BytesStreamOutput out = new BytesStreamOutput(); - stats.writeTo(out); - StreamInput in = out.bytes().streamInput(); - stats = ShardStats.readShardStats(in); - } + // try to serialize it to ensure values survive the serialization + BytesStreamOutput out = new BytesStreamOutput(); + stats.writeTo(out); + StreamInput in = out.bytes().streamInput(); + stats = ShardStats.readShardStats(in); + XContentBuilder builder = jsonBuilder(); builder.startObject(); stats.toXContent(builder, EMPTY_PARAMS); @@ -1369,7 +1371,7 @@ public void onResponse(Releasable releasable) { super.onResponse(releasable); } }; - shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.INDEX); + shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.INDEX, "i_" + i); onLockAcquiredActions.add(onLockAcquired); } @@ -2926,9 +2928,14 @@ public void testSegmentMemoryTrackedWithRandomSearchers() throws Exception { if (randomBoolean() && searchers.size() > 1) { // Close one of the searchers at random - Engine.Searcher searcher = searchers.remove(0); - logger.debug("--> {} closing searcher {}", threadName, searcher.source()); - IOUtils.close(searcher); + synchronized (searchers) { + // re-check because it could have decremented after the check + if (searchers.size() > 1) { + Engine.Searcher searcher = searchers.remove(0); + logger.debug("--> {} closing searcher {}", threadName, searcher.source()); + IOUtils.close(searcher); + } + } } } catch (Exception e) { logger.warn("--> got exception: ", e); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index 5a279ffd97f15..4077d033da9cd 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -116,7 +116,8 @@ public static Set corruptTranslogFiles(Logger logger, Random random, Colle private static long minTranslogGenUsedInRecovery(Path translogPath) throws IOException { try (NIOFSDirectory directory = new NIOFSDirectory(translogPath.getParent().resolve("index"))) { List commits = DirectoryReader.listCommits(directory); - long globalCheckpoint = Translog.readGlobalCheckpoint(translogPath); + final String translogUUID = commits.get(commits.size() - 1).getUserData().get(Translog.TRANSLOG_UUID_KEY); + long globalCheckpoint = Translog.readGlobalCheckpoint(translogPath, translogUUID); IndexCommit recoveringCommit = CombinedDeletionPolicy.findSafeCommitPoint(commits, globalCheckpoint); return Long.parseLong(recoveringCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 25ecfe3b1ba63..abe72b3c5d558 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -120,6 +121,8 @@ import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.stub; @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class TranslogTests extends ESTestCase { @@ -147,11 +150,19 @@ protected void afterIfSuccessful() throws Exception { } - protected Translog createTranslog(TranslogConfig config, String translogUUID) throws IOException { + protected Translog createTranslog(TranslogConfig config) throws IOException { + String translogUUID = + Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + () -> SequenceNumbers.NO_OPS_PERFORMED); } + protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException { + return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), + () -> SequenceNumbers.NO_OPS_PERFORMED); + } + + private void markCurrentGenAsCommitted(Translog translog) throws IOException { long genToCommit = translog.currentFileGeneration(); long genToRetain = randomLongBetween(translog.getDeletionPolicy().getMinTranslogGenerationForRecovery(), genToCommit); @@ -194,10 +205,11 @@ public void tearDown() throws Exception { } private Translog create(Path path) throws IOException { - globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); - return new Translog(translogConfig, null, deletionPolicy, () -> globalCheckpoint.get()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId); + return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get()); } private TranslogConfig getTranslogConfig(final Path path) { @@ -220,7 +232,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting } final IndexSettings indexSettings = - IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); + IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); } @@ -357,6 +369,29 @@ protected TranslogStats stats() throws IOException { return stats; } + public void testFindEarliestLastModifiedAge() throws IOException { + final int numberOfReaders = scaledRandomIntBetween(1, 10); + long fixedTime = randomLongBetween(0, 10000000000000000L); + long[] periods = new long[numberOfReaders + 1]; + long period = randomLongBetween(10000, 1000000); + periods[numberOfReaders] = period; + TranslogWriter w = mock(TranslogWriter.class); + stub(w.getLastModifiedTime()).toReturn(fixedTime - period); + assertThat(Translog.findEarliestLastModifiedAge(fixedTime, new ArrayList<>(), w), equalTo(period)); + + for (int i = 0; i < numberOfReaders; i++) { + periods[i] = randomLongBetween(10000, 1000000); + } + List readers = new ArrayList<>(); + for (long l : periods) { + TranslogReader r = mock(TranslogReader.class); + stub(r.getLastModifiedTime()).toReturn(fixedTime - l); + readers.add(r); + } + assertThat(Translog.findEarliestLastModifiedAge(fixedTime, readers, w), equalTo + (LongStream.of(periods).max().orElse(0L))); + } + public void testStats() throws IOException { // self control cleaning for test translog.getDeletionPolicy().setRetentionSizeInBytes(1024 * 1024); @@ -372,39 +407,43 @@ public void testStats() throws IOException { { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(1)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(97L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(140L)); assertThat(stats.getUncommittedOperations(), equalTo(1)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(97L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(140L)); + assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } translog.add(new Translog.Delete("test", "2", 1, newUid("2"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(2)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(146L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(189L)); assertThat(stats.getUncommittedOperations(), equalTo(2)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(146L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(189L)); + assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } translog.add(new Translog.Delete("test", "3", 2, newUid("3"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(3)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(195L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(238L)); assertThat(stats.getUncommittedOperations(), equalTo(3)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(195L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(238L)); + assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } translog.add(new Translog.NoOp(3, 1, randomAlphaOfLength(16))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(4)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(237L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(280L)); assertThat(stats.getUncommittedOperations(), equalTo(4)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(237L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(280L)); + assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } - final long expectedSizeInBytes = 280L; + final long expectedSizeInBytes = 323L; translog.rollGeneration(); { final TranslogStats stats = stats(); @@ -412,6 +451,7 @@ public void testStats() throws IOException { assertThat(stats.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes)); assertThat(stats.getUncommittedOperations(), equalTo(4)); assertThat(stats.getUncommittedSizeInBytes(), equalTo(expectedSizeInBytes)); + assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } { @@ -429,7 +469,8 @@ public void testStats() throws IOException { copy.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); assertThat(builder.string(), equalTo("{\"translog\":{\"operations\":4,\"size_in_bytes\":" + expectedSizeInBytes - + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" + expectedSizeInBytes + "}}")); + + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" + expectedSizeInBytes + + ",\"earliest_last_modified_age\":" + stats.getEarliestLastModifiedAge() + "}}")); } } @@ -440,6 +481,7 @@ public void testStats() throws IOException { assertThat(stats.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes)); assertThat(stats.getUncommittedOperations(), equalTo(0)); assertThat(stats.getUncommittedSizeInBytes(), equalTo(firstOperationPosition)); + assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } } @@ -469,12 +511,12 @@ public void testUncommittedOperations() throws Exception { } public void testTotalTests() { - final TranslogStats total = new TranslogStats(); + final TranslogStats total = new TranslogStats(0, 0, 0, 0, 1); final int n = randomIntBetween(0, 16); final List statsList = new ArrayList<>(n); for (int i = 0; i < n; i++) { final TranslogStats stats = new TranslogStats(randomIntBetween(1, 4096), randomIntBetween(1, 1 << 20), - randomIntBetween(1, 1 << 20), randomIntBetween(1, 4096)); + randomIntBetween(1, 1 << 20), randomIntBetween(1, 4096), randomIntBetween(1, 1 << 20)); statsList.add(stats); total.add(stats); } @@ -491,22 +533,30 @@ public void testTotalTests() { assertThat( total.getUncommittedSizeInBytes(), equalTo(statsList.stream().mapToLong(TranslogStats::getUncommittedSizeInBytes).sum())); + assertThat( + total.getEarliestLastModifiedAge(), + equalTo(1L)); } public void testNegativeNumberOfOperations() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1, 1, 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1, 1, 1, 1)); assertThat(e, hasToString(containsString("numberOfOperations must be >= 0"))); - e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, -1, 1)); + e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, -1, 1, 1)); assertThat(e, hasToString(containsString("uncommittedOperations must be >= 0"))); } public void testNegativeSizeInBytes() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1, 1)); assertThat(e, hasToString(containsString("translogSizeInBytes must be >= 0"))); - e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, -1)); + e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, -1, 1)); assertThat(e, hasToString(containsString("uncommittedSizeInBytes must be >= 0"))); } + public void testOldestEntryInSeconds() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, 1, -1)); + assertThat(e, hasToString(containsString("earliestLastModifiedAge must be >= 0"))); + } + public void testSnapshot() throws IOException { ArrayList ops = new ArrayList<>(); try (Translog.Snapshot snapshot = translog.newSnapshot()) { @@ -521,7 +571,7 @@ public void testSnapshot() throws IOException { } try (Translog.Snapshot snapshot = translog.newSnapshot(); - Translog.Snapshot snapshot1 = translog.newSnapshot()) { + Translog.Snapshot snapshot1 = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.totalOperations(), equalTo(1)); @@ -1235,15 +1285,15 @@ public void testBasicRecovery() throws IOException { translog.close(); if (translogGeneration == null) { - translog = createTranslog(config, null); + translog = createTranslog(config); assertEquals(0, translog.stats().estimatedNumberOfOperations()); - assertEquals(1, translog.currentFileGeneration()); + assertEquals(2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try(Translog.Snapshot snapshot = translog.newSnapshot()) { assertNull(snapshot.next()); } } else { - translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { @@ -1269,7 +1319,8 @@ public void testRecoveryUncommitted() throws IOException { if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); - assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration); + assertEquals("expected this to be the first roll (1 gen is on creation, 2 when opened)", + 2L, translogGeneration.translogFileGeneration); assertNotNull(translogGeneration.translogUUID); } } @@ -1281,7 +1332,7 @@ public void testRecoveryUncommitted() throws IOException { TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1295,9 +1346,10 @@ public void testRecoveryUncommitted() throws IOException { } } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); - assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); + assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", + translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; @@ -1323,7 +1375,8 @@ public void testRecoveryUncommittedFileExists() throws IOException { if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); - assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration); + assertEquals("expected this to be the first roll (1 gen is on creation, 2 when opened)", + 2L, translogGeneration.translogFileGeneration); assertNotNull(translogGeneration.translogUUID); } } @@ -1339,7 +1392,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1354,9 +1407,10 @@ public void testRecoveryUncommittedFileExists() throws IOException { } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); - assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); + assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", + translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; @@ -1381,7 +1435,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); - assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration); + assertEquals("expected this to be the first roll (1 gen is on creation, 2 when opened)", + 2L, translogGeneration.translogFileGeneration); assertNotNull(translogGeneration.translogUUID); } } @@ -1391,19 +1446,19 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); Checkpoint read = Checkpoint.read(ckp); - Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.UNASSIGNED_SEQ_NO, 0); + Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.NO_OPS_PERFORMED, 0); Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { fail("corrupted"); } catch (IllegalStateException ex) { - assertEquals("Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " + - "numOps=55, generation=2, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-2, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + - "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-2, minTranslogGeneration=0}", ex.getMessage()); + assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " + + "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + + "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0}", ex.getMessage()); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1480,12 +1535,12 @@ public void testOpenForeignTranslog() throws IOException { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } - this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); @@ -1671,7 +1726,7 @@ public void testFailFlush() throws IOException { translog.close(); // we are closed final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -1807,7 +1862,7 @@ protected void afterAdd() throws IOException { } } try (Translog tlog = - new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -1853,7 +1908,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -1909,7 +1964,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -1957,25 +2012,28 @@ public void onceFailedFailAlways() { private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean partialWrites, final boolean throwUnknownException, String translogUUID, final TranslogDeletionPolicy deletionPolicy) throws IOException { - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO) { + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel channel = FileChannel.open(file, openOption); + boolean success = false; + try { + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel); + success = true; + return throwingFileChannel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(channel); + } + } + }; + if (translogUUID == null) { + translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory); + } + return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED) { @Override ChannelFactory getChannelFactory() { - final ChannelFactory factory = super.getChannelFactory(); - - return (file, openOption) -> { - FileChannel channel = factory.open(file, openOption); - boolean success = false; - try { - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel); - success = true; - return throwingFileChannel; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(channel); - } - } - }; + return channelFactory; } @Override @@ -2079,11 +2137,11 @@ private static final class UnknownException extends RuntimeException { public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { Path tempDir = createTempDir(); TranslogConfig config = getTranslogConfig(tempDir); - Translog translog = createTranslog(config, null); + Translog translog = createTranslog(config); translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO) { + new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED) { @Override protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) throws IOException { @@ -2106,7 +2164,7 @@ public void testRecoverWithUnbackedNextGen() throws IOException { Checkpoint read = Checkpoint.read(ckp); Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation))); Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); - try (Translog tlog = createTranslog(config, translog.getTranslogUUID()); + try (Translog tlog = openTranslog(config, translog.getTranslogUUID()); Translog.Snapshot snapshot = tlog.newSnapshot()) { assertFalse(tlog.syncNeeded()); @@ -2117,7 +2175,7 @@ public void testRecoverWithUnbackedNextGen() throws IOException { tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(2).getBytes(Charset.forName("UTF-8")))); } - try (Translog tlog = createTranslog(config, translog.getTranslogUUID()); + try (Translog tlog = openTranslog(config, translog.getTranslogUUID()); Translog.Snapshot snapshot = tlog.newSnapshot()) { assertFalse(tlog.syncNeeded()); @@ -2141,7 +2199,7 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); try { - Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2163,7 +2221,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2176,7 +2234,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { } try { - Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2282,7 +2340,11 @@ public void testWithRandomException() throws IOException { TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(); deletionPolicy.setTranslogGenerationOfLastCommit(minGenForRecovery); deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); - try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + if (generationUUID == null) { + // we never managed to successfully create a translog, make it + generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + } + try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { @@ -2347,14 +2409,14 @@ public void testPendingDelete() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); translog.add(new Translog.Index("test", "2", 1, new byte[]{2})); translog.rollGeneration(); Closeable lock = translog.acquireRetentionLock(); translog.add(new Translog.Index("test", "3", 2, new byte[]{3})); translog.close(); IOUtils.close(lock); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); } public static Translog.Location randomTranslogLocation() { @@ -2382,7 +2444,7 @@ public void testTranslogOpSerialization() throws Exception { null); Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, - 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); + 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); Translog.Index index = new Translog.Index(eIndex, eIndexResult); @@ -2393,7 +2455,7 @@ public void testTranslogOpSerialization() throws Exception { assertEquals(index, serializedIndex); Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, - 2, VersionType.INTERNAL, Origin.PRIMARY, 0); + 2, VersionType.INTERNAL, Origin.PRIMARY, 0); Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); @@ -2615,6 +2677,22 @@ public void testAcquiredLockIsPassedToDeletionPolicy() throws IOException { } } + public void testReadGlobalCheckpoint() throws Exception { + final String translogUUID = translog.getTranslogUUID(); + globalCheckpoint.set(randomNonNegativeLong()); + final int operations = randomIntBetween(1, 100); + for (int i = 0; i < operations; i++) { + translog.add(new Translog.NoOp(randomNonNegativeLong(), 0, "test'")); + if (rarely()) { + translog.rollGeneration(); + } + } + rollAndCommit(translog); + translog.close(); + assertThat(Translog.readGlobalCheckpoint(translogDir, translogUUID), equalTo(globalCheckpoint.get())); + expectThrows(TranslogCorruptedException.class, () -> Translog.readGlobalCheckpoint(translogDir, UUIDs.randomBase64UUID())); + } + public void testSnapshotReadOperationInReverse() throws Exception { final Deque> views = new ArrayDeque<>(); views.push(new ArrayList<>()); diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 3d7ef82ea1256..6561001ad7d86 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; @@ -38,7 +37,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { @@ -115,7 +113,7 @@ public void testSyncFailsIfOperationIsInFlight() throws InterruptedException, Ex SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); final ShardId shardId = shard.shardId(); PlainActionFuture fut = new PlainActionFuture<>(); - shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX); + shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, ""); try (Releasable operationLock = fut.get()) { SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); flushService.attemptSyncedFlush(shardId, listener); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 13d5ea189f069..d65d40e5bcdaa 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -19,8 +19,20 @@ package org.elasticsearch.indices.recovery; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.translog.Translog; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -74,6 +86,24 @@ public void testGetStartingSeqNo() throws Exception { assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs + moreDocs)); recoveryTarget.decRef(); } + // Different translogUUID, fallback to file-based + { + replica.close("test", false); + final List commits = DirectoryReader.listCommits(replica.store().directory()); + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + try (IndexWriter writer = new IndexWriter(replica.store().directory(), iwc)) { + final Map userData = new HashMap<>(commits.get(commits.size() - 1).getUserData()); + userData.put(Translog.TRANSLOG_UUID_KEY, UUIDs.randomBase64UUID()); + writer.setLiveCommitData(userData.entrySet()); + writer.commit(); + } + final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + recoveryTarget.decRef(); + } } finally { closeShards(replica); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 7ab6925ce57b9..9d0008dce5185 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -88,6 +88,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -396,11 +397,11 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class)); when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class)); when(shard.state()).thenReturn(IndexShardState.RELOCATED); - when(shard.acquireIndexCommit(anyBoolean(), anyBoolean())).thenReturn(mock(Engine.IndexCommitRef.class)); + when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class)); doAnswer(invocation -> { ((ActionListener)invocation.getArguments()[0]).onResponse(() -> {}); return null; - }).when(shard).acquirePrimaryOperationPermit(any(), anyString()); + }).when(shard).acquirePrimaryOperationPermit(any(), anyString(), anyObject()); final AtomicBoolean phase1Called = new AtomicBoolean(); final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean(); final AtomicBoolean phase2Called = new AtomicBoolean(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 69176b03942f6..a496664c0260b 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -44,7 +43,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogConfig; import java.util.HashMap; import java.util.List; @@ -52,7 +50,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; -import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -186,7 +183,6 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { shards.indexDocs(nonFlushedDocs); IndexShard replica = shards.getReplicas().get(0); - final String translogUUID = replica.getTranslog().getTranslogUUID(); final String historyUUID = replica.getHistoryUUID(); Translog.TranslogGeneration translogGeneration = replica.getTranslog().getGeneration(); shards.removeReplica(replica); @@ -204,13 +200,8 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { final String historyUUIDtoUse = UUIDs.randomBase64UUID(random()); if (randomBoolean()) { // create a new translog - final TranslogConfig translogConfig = - new TranslogConfig(replica.shardId(), replica.shardPath().resolveTranslog(), replica.indexSettings(), - BigArrays.NON_RECYCLING_INSTANCE); - try (Translog translog = new Translog(translogConfig, null, createTranslogDeletionPolicy(), () -> flushedDocs)) { - translogUUIDtoUse = translog.getTranslogUUID(); - translogGenToUse = translog.currentFileGeneration(); - } + translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs, replica.shardId()); + translogGenToUse = 1; } else { translogUUIDtoUse = translogGeneration.translogUUID; translogGenToUse = translogGeneration.translogFileGeneration; diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java index 654927b19f2fb..dbbc8e443c076 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java @@ -127,7 +127,7 @@ public void testSimulate() throws Exception { source.put("foo", "bar"); source.put("fail", false); source.put("processed", true); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, source); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, source); assertThat(simulateDocumentBaseResult.getIngestDocument().getSourceAndMetadata(), equalTo(ingestDocument.getSourceAndMetadata())); assertThat(simulateDocumentBaseResult.getFailure(), nullValue()); } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java index 9df2a38c6f14b..04285b3432e12 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java @@ -76,7 +76,7 @@ public void setTestIngestDocument() { list.add(null); document.put("list", list); - ingestDocument = new IngestDocument("index", "type", "id", null, null, document); + ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); } public void testSimpleGetFieldValue() { diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java index d9f9f181b31b6..eb1b7814ab851 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ContextParser; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -60,7 +61,8 @@ public void testParser() throws IOException { bytes = builder.bytes(); } - XContentParser xContentParser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, bytes); + XContentParser xContentParser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput()); PipelineConfiguration parsed = parser.parse(xContentParser, null); assertEquals(xContentType, parsed.getXContentType()); assertEquals("{}", XContentHelper.convertToJson(parsed.getConfig(), false, parsed.getXContentType())); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index 44c8e78bef703..3247761a548f0 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.CustomTypeSafeMatcher; @@ -157,10 +158,18 @@ public void testExecuteEmptyPipeline() throws Exception { public void testExecutePropagateAllMetaDataUpdates() throws Exception { CompoundProcessor processor = mock(CompoundProcessor.class); when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); + long newVersion = randomLong(); + String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); doAnswer((InvocationOnMock invocationOnMock) -> { IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { - ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); + if (metaData == IngestDocument.MetaData.VERSION) { + ingestDocument.setFieldValue(metaData.getFieldName(), newVersion); + } else if (metaData == IngestDocument.MetaData.VERSION_TYPE) { + ingestDocument.setFieldValue(metaData.getFieldName(), versionType); + } else { + ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); + } } return null; }).when(processor).execute(any()); @@ -175,12 +184,13 @@ public void testExecutePropagateAllMetaDataUpdates() throws Exception { verify(processor).execute(any()); verify(failureHandler, never()).accept(any()); verify(completionHandler, times(1)).accept(true); - assertThat(indexRequest.index(), equalTo("update_index")); assertThat(indexRequest.type(), equalTo("update_type")); assertThat(indexRequest.id(), equalTo("update_id")); assertThat(indexRequest.routing(), equalTo("update_routing")); assertThat(indexRequest.parent(), equalTo("update_parent")); + assertThat(indexRequest.version(), equalTo(newVersion)); + assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); } public void testExecuteFailure() throws Exception { @@ -188,13 +198,15 @@ public void testExecuteFailure() throws Exception { when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); + doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); @SuppressWarnings("unchecked") Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); + verify(processor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); verify(failureHandler, times(1)).accept(any(RuntimeException.class)); verify(completionHandler, never()).accept(anyBoolean()); } @@ -207,7 +219,8 @@ public void testExecuteSuccessWithOnFailure() throws Exception { CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id") + .source(Collections.emptyMap()).setPipeline("_id"); doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); @SuppressWarnings("unchecked") Consumer failureHandler = mock(Consumer.class); @@ -225,14 +238,17 @@ public void testExecuteFailureWithOnFailure() throws Exception { Collections.singletonList(new CompoundProcessor(onFailureProcessor))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); - doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); + doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", indexRequest.version(), + indexRequest.versionType(), Collections.emptyMap())); @SuppressWarnings("unchecked") Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); + verify(processor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); verify(failureHandler, times(1)).accept(any(RuntimeException.class)); verify(completionHandler, never()).accept(anyBoolean()); } @@ -246,15 +262,19 @@ public void testExecuteFailureWithNestedOnFailure() throws Exception { Collections.singletonList(onFailureOnFailureProcessor)))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(onFailureOnFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); - doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); - doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); + doThrow(new RuntimeException()).when(onFailureOnFailureProcessor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); @SuppressWarnings("unchecked") Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); + verify(processor).execute(eqID("_index", "_type", "_id", + indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); verify(failureHandler, times(1)).accept(any(RuntimeException.class)); verify(completionHandler, never()).accept(anyBoolean()); } @@ -380,12 +400,20 @@ private IngestDocument eqID(String index, String type, String id, Map source) { + return argThat(new IngestDocumentMatcher(index, type, id, version, versionType, source)); + } + private class IngestDocumentMatcher extends ArgumentMatcher { private final IngestDocument ingestDocument; IngestDocumentMatcher(String index, String type, String id, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, null, source); + this.ingestDocument = new IngestDocument(index, type, id, null, null, null, null, source); + } + + IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { + this.ingestDocument = new IngestDocument(index, type, id, null, null, version, versionType, source); } @Override diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 3de6b54c0c391..0f8f1ac7017e6 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -144,15 +144,15 @@ private static NodeInfo createNodeInfo() { List plugins = new ArrayList<>(); for (int i = 0; i < numPlugins; i++) { plugins.add(new PluginInfo(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), - randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), - randomBoolean(), randomBoolean())); + randomAlphaOfLengthBetween(3, 10), VersionUtils.randomVersion(random()), "1.8", + randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), randomBoolean(), randomBoolean())); } int numModules = randomIntBetween(0, 5); List modules = new ArrayList<>(); for (int i = 0; i < numModules; i++) { modules.add(new PluginInfo(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), - randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), - randomBoolean(), randomBoolean())); + randomAlphaOfLengthBetween(3, 10), VersionUtils.randomVersion(random()), "1.8", + randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), randomBoolean(), randomBoolean())); } pluginsAndModules = new PluginsAndModules(plugins, modules); } diff --git a/server/src/test/java/org/elasticsearch/plugins/MetaPluginInfoTests.java b/server/src/test/java/org/elasticsearch/plugins/MetaPluginInfoTests.java index 2b7f50056a9c8..c54a13bd30267 100644 --- a/server/src/test/java/org/elasticsearch/plugins/MetaPluginInfoTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/MetaPluginInfoTests.java @@ -113,7 +113,7 @@ public void testExtractAllPluginsWithDuplicates() throws Exception { "classname", "FakePlugin"); IllegalStateException exc = - expectThrows(IllegalStateException.class, () -> PluginInfo.extractAllPlugins(pluginDir)); + expectThrows(IllegalStateException.class, () -> PluginsService.findPluginDirs(pluginDir)); assertThat(exc.getMessage(), containsString("duplicate plugin")); assertThat(exc.getMessage(), endsWith("plugin1")); } diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java index a767dad204efc..88f6c7d83ae46 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java @@ -103,20 +103,6 @@ public void testReadFromPropertiesJavaVersionMissing() throws Exception { assertThat(e.getMessage(), containsString("[java.version] is missing")); } - public void testReadFromPropertiesJavaVersionIncompatible() throws Exception { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - PluginTestUtil.writePluginProperties(pluginDir, - "description", "fake desc", - "name", pluginName, - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", "1000000.0", - "classname", "FakePlugin", - "version", "1.0"); - IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString(pluginName + " requires Java")); - } - public void testReadFromPropertiesBadJavaVersionFormat() throws Exception { String pluginName = "fake-plugin"; Path pluginDir = createTempDir().resolve(pluginName); @@ -143,17 +129,6 @@ public void testReadFromPropertiesBogusElasticsearchVersion() throws Exception { assertThat(e.getMessage(), containsString("version needs to contain major, minor, and revision")); } - public void testReadFromPropertiesOldElasticsearchVersion() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties(pluginDir, - "description", "fake desc", - "name", "my_plugin", - "version", "1.0", - "elasticsearch.version", Version.V_5_0_0.toString()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("was designed for version [5.0.0]")); - } - public void testReadFromPropertiesJvmMissingClassname() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties(pluginDir, @@ -209,7 +184,7 @@ public void testExtendedPluginsEmpty() throws Exception { } public void testSerialize() throws Exception { - PluginInfo info = new PluginInfo("c", "foo", "dummy", "dummyclass", + PluginInfo info = new PluginInfo("c", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass", Collections.singletonList("foo"), randomBoolean(), randomBoolean()); BytesStreamOutput output = new BytesStreamOutput(); info.writeTo(output); @@ -222,11 +197,16 @@ public void testSerialize() throws Exception { public void testPluginListSorted() { List plugins = new ArrayList<>(); - plugins.add(new PluginInfo("c", "foo", "dummy", "dummyclass", Collections.emptyList(), randomBoolean(), randomBoolean())); - plugins.add(new PluginInfo("b", "foo", "dummy", "dummyclass", Collections.emptyList(),randomBoolean(), randomBoolean())); - plugins.add(new PluginInfo( "e", "foo", "dummy", "dummyclass", Collections.emptyList(),randomBoolean(), randomBoolean())); - plugins.add(new PluginInfo("a", "foo", "dummy", "dummyclass", Collections.emptyList(),randomBoolean(), randomBoolean())); - plugins.add(new PluginInfo("d", "foo", "dummy", "dummyclass", Collections.emptyList(),randomBoolean(), randomBoolean())); + plugins.add(new PluginInfo("c", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass", + Collections.emptyList(), randomBoolean(), randomBoolean())); + plugins.add(new PluginInfo("b", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass", + Collections.emptyList(), randomBoolean(), randomBoolean())); + plugins.add(new PluginInfo( "e", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass", + Collections.emptyList(), randomBoolean(), randomBoolean())); + plugins.add(new PluginInfo("a", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass", + Collections.emptyList(), randomBoolean(), randomBoolean())); + plugins.add(new PluginInfo("d", "foo", "dummy", Version.CURRENT, "1.8", "dummyclass", + Collections.emptyList(), randomBoolean(), randomBoolean())); PluginsAndModules pluginsInfo = new PluginsAndModules(plugins, Collections.emptyList()); final List infos = pluginsInfo.getPluginInfos(); diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 1bb9d675988d8..36e1266c51118 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -285,7 +285,8 @@ public OneParameterIncorrectType(Object object) { public void testSortBundlesCycleSelfReference() throws Exception { Path pluginDir = createTempDir(); - PluginInfo info = new PluginInfo("foo", "desc", "1.0", "MyPlugin", Collections.singletonList("foo"), false, false); + PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("foo"), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.sortBundles(Collections.singleton(bundle)) @@ -296,13 +297,17 @@ public void testSortBundlesCycleSelfReference() throws Exception { public void testSortBundlesCycle() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order, so we get know the beginning of the cycle - PluginInfo info = new PluginInfo("foo", "desc", "1.0", "MyPlugin", Arrays.asList("bar", "other"), false, false); + PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Arrays.asList("bar", "other"), false, false); bundles.add(new PluginsService.Bundle(info, pluginDir)); - PluginInfo info2 = new PluginInfo("bar", "desc", "1.0", "MyPlugin", Collections.singletonList("baz"), false, false); + PluginInfo info2 = new PluginInfo("bar", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("baz"), false, false); bundles.add(new PluginsService.Bundle(info2, pluginDir)); - PluginInfo info3 = new PluginInfo("baz", "desc", "1.0", "MyPlugin", Collections.singletonList("foo"), false, false); + PluginInfo info3 = new PluginInfo("baz", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("foo"), false, false); bundles.add(new PluginsService.Bundle(info3, pluginDir)); - PluginInfo info4 = new PluginInfo("other", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info4 = new PluginInfo("other", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); bundles.add(new PluginsService.Bundle(info4, pluginDir)); IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.sortBundles(bundles)); @@ -311,7 +316,8 @@ public void testSortBundlesCycle() throws Exception { public void testSortBundlesSingle() throws Exception { Path pluginDir = createTempDir(); - PluginInfo info = new PluginInfo("foo", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); List sortedBundles = PluginsService.sortBundles(Collections.singleton(bundle)); assertThat(sortedBundles, Matchers.contains(bundle)); @@ -320,13 +326,16 @@ public void testSortBundlesSingle() throws Exception { public void testSortBundlesNoDeps() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order - PluginInfo info1 = new PluginInfo("foo", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info1 = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); PluginsService.Bundle bundle1 = new PluginsService.Bundle(info1, pluginDir); bundles.add(bundle1); - PluginInfo info2 = new PluginInfo("bar", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info2 = new PluginInfo("bar", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); PluginsService.Bundle bundle2 = new PluginsService.Bundle(info2, pluginDir); bundles.add(bundle2); - PluginInfo info3 = new PluginInfo("baz", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info3 = new PluginInfo("baz", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); PluginsService.Bundle bundle3 = new PluginsService.Bundle(info3, pluginDir); bundles.add(bundle3); List sortedBundles = PluginsService.sortBundles(bundles); @@ -335,7 +344,8 @@ public void testSortBundlesNoDeps() throws Exception { public void testSortBundlesMissingDep() throws Exception { Path pluginDir = createTempDir(); - PluginInfo info = new PluginInfo("foo", "desc", "1.0", "MyPlugin", Collections.singletonList("dne"), false, false); + PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("dne"), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.sortBundles(Collections.singleton(bundle)) @@ -346,16 +356,20 @@ public void testSortBundlesMissingDep() throws Exception { public void testSortBundlesCommonDep() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order - PluginInfo info1 = new PluginInfo("grandparent", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info1 = new PluginInfo("grandparent", "desc", "1.0",Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); PluginsService.Bundle bundle1 = new PluginsService.Bundle(info1, pluginDir); bundles.add(bundle1); - PluginInfo info2 = new PluginInfo("foo", "desc", "1.0", "MyPlugin", Collections.singletonList("common"), false, false); + PluginInfo info2 = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("common"), false, false); PluginsService.Bundle bundle2 = new PluginsService.Bundle(info2, pluginDir); bundles.add(bundle2); - PluginInfo info3 = new PluginInfo("bar", "desc", "1.0", "MyPlugin", Collections.singletonList("common"), false, false); + PluginInfo info3 = new PluginInfo("bar", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("common"), false, false); PluginsService.Bundle bundle3 = new PluginsService.Bundle(info3, pluginDir); bundles.add(bundle3); - PluginInfo info4 = new PluginInfo("common", "desc", "1.0", "MyPlugin", Collections.singletonList("grandparent"), false, false); + PluginInfo info4 = new PluginInfo("common", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("grandparent"), false, false); PluginsService.Bundle bundle4 = new PluginsService.Bundle(info4, pluginDir); bundles.add(bundle4); List sortedBundles = PluginsService.sortBundles(bundles); @@ -365,10 +379,12 @@ public void testSortBundlesCommonDep() throws Exception { public void testSortBundlesAlreadyOrdered() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order - PluginInfo info1 = new PluginInfo("dep", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info1 = new PluginInfo("dep", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); PluginsService.Bundle bundle1 = new PluginsService.Bundle(info1, pluginDir); bundles.add(bundle1); - PluginInfo info2 = new PluginInfo("myplugin", "desc", "1.0", "MyPlugin", Collections.singletonList("dep"), false, false); + PluginInfo info2 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("dep"), false, false); PluginsService.Bundle bundle2 = new PluginsService.Bundle(info2, pluginDir); bundles.add(bundle2); List sortedBundles = PluginsService.sortBundles(bundles); @@ -426,7 +442,8 @@ public void testJarHellDuplicateCodebaseWithDep() throws Exception { makeJar(dupJar); Map> transitiveDeps = new HashMap<>(); transitiveDeps.put("dep", Collections.singleton(dupJar.toUri().toURL())); - PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", "MyPlugin", Collections.singletonList("dep"), false, false); + PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("dep"), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.checkBundleJarHell(bundle, transitiveDeps)); @@ -444,7 +461,8 @@ public void testJarHellDuplicateCodebaseAcrossDeps() throws Exception { Map> transitiveDeps = new HashMap<>(); transitiveDeps.put("dep1", Collections.singleton(dupJar.toUri().toURL())); transitiveDeps.put("dep2", Collections.singleton(dupJar.toUri().toURL())); - PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", "MyPlugin", Arrays.asList("dep1", "dep2"), false, false); + PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Arrays.asList("dep1", "dep2"), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.checkBundleJarHell(bundle, transitiveDeps)); @@ -460,7 +478,8 @@ public void testJarHellDuplicateClassWithCore() throws Exception { Path pluginDir = createTempDir(); Path pluginJar = pluginDir.resolve("plugin.jar"); makeJar(pluginJar, Level.class); - PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", "MyPlugin", Collections.emptyList(), false, false); + PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.emptyList(), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.checkBundleJarHell(bundle, new HashMap<>())); @@ -478,7 +497,8 @@ public void testJarHellDuplicateClassWithDep() throws Exception { makeJar(depJar, DummyClass1.class); Map> transitiveDeps = new HashMap<>(); transitiveDeps.put("dep", Collections.singleton(depJar.toUri().toURL())); - PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", "MyPlugin", Collections.singletonList("dep"), false, false); + PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Collections.singletonList("dep"), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.checkBundleJarHell(bundle, transitiveDeps)); @@ -500,7 +520,8 @@ public void testJarHellDuplicateClassAcrossDeps() throws Exception { Map> transitiveDeps = new HashMap<>(); transitiveDeps.put("dep1", Collections.singleton(dep1Jar.toUri().toURL())); transitiveDeps.put("dep2", Collections.singleton(dep2Jar.toUri().toURL())); - PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", "MyPlugin", Arrays.asList("dep1", "dep2"), false, false); + PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Arrays.asList("dep1", "dep2"), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.checkBundleJarHell(bundle, transitiveDeps)); @@ -522,7 +543,8 @@ public void testJarHellTransitiveMap() throws Exception { Map> transitiveDeps = new HashMap<>(); transitiveDeps.put("dep1", Collections.singleton(dep1Jar.toUri().toURL())); transitiveDeps.put("dep2", Collections.singleton(dep2Jar.toUri().toURL())); - PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", "MyPlugin", Arrays.asList("dep1", "dep2"), false, false); + PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", + "MyPlugin", Arrays.asList("dep1", "dep2"), false, false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); PluginsService.checkBundleJarHell(bundle, transitiveDeps); Set deps = transitiveDeps.get("myplugin"); @@ -568,4 +590,147 @@ public void testNonExtensibleDep() throws Exception { IllegalStateException e = expectThrows(IllegalStateException.class, () -> newPluginsService(settings)); assertEquals("Plugin [myplugin] cannot extend non-extensible plugin [nonextensible]", e.getMessage()); } + + public void testIncompatibleElasticsearchVersion() throws Exception { + PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.V_5_0_0, + "1.8", "FakePlugin", Collections.emptyList(), false, false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("was built for Elasticsearch version 5.0.0")); + } + + public void testIncompatibleJavaVersion() throws Exception { + PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.CURRENT, + "1000000.0", "FakePlugin", Collections.emptyList(), false, false); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("my_plugin requires Java")); + } + + public void testFindPluginDirs() throws IOException { + final Path plugins = createTempDir(); + + final Path fake = plugins.resolve("fake"); + + PluginTestUtil.writePluginProperties( + fake, + "description", "description", + "name", "fake", + "version", "1.0.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "test.DummyPlugin"); + + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, fake.resolve("plugin.jar")); + } + + final Path fakeMeta = plugins.resolve("fake-meta"); + + PluginTestUtil.writeMetaPluginProperties(fakeMeta, "description", "description", "name", "fake-meta"); + + final Path fakeMetaCore = fakeMeta.resolve("fake-meta-core"); + PluginTestUtil.writePluginProperties( + fakeMetaCore, + "description", "description", + "name", "fake-meta-core", + "version", "1.0.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "test.DummyPlugin"); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, fakeMetaCore.resolve("plugin.jar")); + } + + assertThat(PluginsService.findPluginDirs(plugins), containsInAnyOrder(fake, fakeMetaCore)); + } + + public void testMissingMandatoryPlugin() { + final Settings settings = + Settings.builder() + .put("path.home", createTempDir()) + .put("plugin.mandatory", "fake") + .build(); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> newPluginsService(settings)); + assertThat(e, hasToString(containsString("missing mandatory plugins [fake]"))); + } + + public void testExistingMandatoryClasspathPlugin() { + final Settings settings = + Settings.builder() + .put("path.home", createTempDir()) + .put("plugin.mandatory", "org.elasticsearch.plugins.PluginsServiceTests$FakePlugin") + .build(); + newPluginsService(settings, FakePlugin.class); + } + + public static class FakePlugin extends Plugin { + + public FakePlugin() { + + } + + } + + public void testExistingMandatoryInstalledPlugin() throws IOException { + // This test opens a child classloader, reading a jar under the test temp + // dir (a dummy plugin). Classloaders are closed by GC, so when test teardown + // occurs the jar is deleted while the classloader is still open. However, on + // windows, files cannot be deleted when they are still open by a process. + assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); + final Path pathHome = createTempDir(); + final Path plugins = pathHome.resolve("plugins"); + final Path fake = plugins.resolve("fake"); + + PluginTestUtil.writePluginProperties( + fake, + "description", "description", + "name", "fake", + "version", "1.0.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "test.DummyPlugin"); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, fake.resolve("plugin.jar")); + } + + final Settings settings = + Settings.builder() + .put("path.home", pathHome) + .put("plugin.mandatory", "fake") + .build(); + newPluginsService(settings); + } + + public void testExistingMandatoryMetaPlugin() throws IOException { + // This test opens a child classloader, reading a jar under the test temp + // dir (a dummy plugin). Classloaders are closed by GC, so when test teardown + // occurs the jar is deleted while the classloader is still open. However, on + // windows, files cannot be deleted when they are still open by a process. + assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); + final Path pathHome = createTempDir(); + final Path plugins = pathHome.resolve("plugins"); + final Path fakeMeta = plugins.resolve("fake-meta"); + + PluginTestUtil.writeMetaPluginProperties(fakeMeta, "description", "description", "name", "fake-meta"); + + final Path fake = fakeMeta.resolve("fake"); + PluginTestUtil.writePluginProperties( + fake, + "description", "description", + "name", "fake", + "version", "1.0.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "test.DummyPlugin"); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, fake.resolve("plugin.jar")); + } + + final Settings settings = + Settings.builder() + .put("path.home", pathHome) + .put("plugin.mandatory", "fake-meta") + .build(); + newPluginsService(settings); + } + } diff --git a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java index 2200c013a52e4..801ed758cb228 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -37,14 +38,16 @@ public void testFromXContentLoading() throws Exception { // failure to load to old namespace scripts with the same id but different langs XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject().field("lang0#id0", "script0").field("lang1#id0", "script1").endObject(); - XContentParser parser0 = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, builder.bytes()); + XContentParser parser0 = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, builder.bytes().streamInput()); expectThrows(IllegalArgumentException.class, () -> ScriptMetaData.fromXContent(parser0)); // failure to load a new namespace script and old namespace script with the same id but different langs builder = XContentFactory.jsonBuilder(); builder.startObject().field("lang0#id0", "script0") .startObject("id0").field("lang", "lang1").field("source", "script1").endObject().endObject(); - XContentParser parser1 = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, builder.bytes()); + XContentParser parser1 = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, builder.bytes().streamInput()); expectThrows(IllegalArgumentException.class, () -> ScriptMetaData.fromXContent(parser1)); // failure to load a new namespace script and old namespace script with the same id but different langs with additional scripts @@ -52,14 +55,16 @@ public void testFromXContentLoading() throws Exception { builder.startObject().field("lang0#id0", "script0").field("lang0#id1", "script1") .startObject("id1").field("lang", "lang0").field("source", "script0").endObject() .startObject("id0").field("lang", "lang1").field("source", "script1").endObject().endObject(); - XContentParser parser2 = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, builder.bytes()); + XContentParser parser2 = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, builder.bytes().streamInput()); expectThrows(IllegalArgumentException.class, () -> ScriptMetaData.fromXContent(parser2)); // okay to load the same script from the new and old namespace if the lang is the same builder = XContentFactory.jsonBuilder(); builder.startObject().field("lang0#id0", "script0") .startObject("id0").field("lang", "lang0").field("source", "script1").endObject().endObject(); - XContentParser parser3 = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, builder.bytes()); + XContentParser parser3 = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, builder.bytes().streamInput()); ScriptMetaData.fromXContent(parser3); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 032bb8d59140c..c50fb89f334af 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -121,9 +121,7 @@ public void setupSuiteScopeCluster() throws Exception { // random cities with no location for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) { - if (randomBoolean() || true) { - cities.add(indexCity("idx-multi", cityName)); - } + cities.add(indexCity("idx-multi", cityName)); } indexRandom(true, cities); prepareCreate("empty_bucket_idx") diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index cbd58bd4acdff..63026a1255745 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -30,8 +30,11 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; @@ -44,6 +47,9 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.SearchHit; @@ -59,9 +65,14 @@ import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; +import org.elasticsearch.search.aggregations.bucket.nested.InternalNested; +import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregator; import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; import java.io.IOException; import java.net.InetAddress; @@ -74,6 +85,7 @@ import java.util.function.BiFunction; import java.util.function.Function; +import static org.elasticsearch.index.mapper.SeqNoFieldMapper.PRIMARY_TERM_NAME; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -999,6 +1011,81 @@ public void testGlobalAggregationWithScore() throws IOException { } } + public void testWithNestedAggregations() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 10; i++) { + int[] nestedValues = new int[i]; + for (int j = 0; j < i; j++) { + nestedValues[j] = j; + } + indexWriter.addDocuments(generateDocsWithNested(Integer.toString(i), i, nestedValues)); + } + indexWriter.commit(); + for (Aggregator.SubAggCollectionMode mode : Aggregator.SubAggCollectionMode.values()) { + for (boolean withScore : new boolean[]{true, false}) { + NestedAggregationBuilder nested = new NestedAggregationBuilder("nested", "nested_object") + .subAggregation(new TermsAggregationBuilder("terms", ValueType.LONG) + .field("nested_value") + // force the breadth_first mode + .collectMode(mode) + .order(BucketOrder.key(true)) + .subAggregation( + new TopHitsAggregationBuilder("top_hits") + .sort(withScore ? new ScoreSortBuilder() : new FieldSortBuilder("_doc")) + .storedField("_none_") + ) + ); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + fieldType.setHasDocValues(true); + fieldType.setName("nested_value"); + try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) { + InternalNested result = search(newSearcher(indexReader, false, true), + // match root document only + new DocValuesFieldExistsQuery(PRIMARY_TERM_NAME), nested, fieldType); + InternalMultiBucketAggregation terms = result.getAggregations().get("terms"); + assertThat(terms.getBuckets().size(), equalTo(9)); + int ptr = 9; + for (MultiBucketsAggregation.Bucket bucket : terms.getBuckets()) { + InternalTopHits topHits = bucket.getAggregations().get("top_hits"); + assertThat(topHits.getHits().totalHits, equalTo((long) ptr)); + if (withScore) { + assertThat(topHits.getHits().getMaxScore(), equalTo(1f)); + } else { + assertThat(topHits.getHits().getMaxScore(), equalTo(Float.NaN)); + } + --ptr; + } + } + } + } + } + } + } + + private final SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + private List generateDocsWithNested(String id, int value, int[] nestedValues) { + List documents = new ArrayList<>(); + + for (int nestedValue : nestedValues) { + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "docs#" + id, UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__nested_object", TypeFieldMapper.Defaults.FIELD_TYPE)); + document.add(new SortedNumericDocValuesField("nested_value", nestedValue)); + documents.add(document); + } + + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "docs#" + id, UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "docs", TypeFieldMapper.Defaults.FIELD_TYPE)); + document.add(new SortedNumericDocValuesField("value", value)); + document.add(sequenceIDFields.primaryTerm); + documents.add(document); + + return documents; + } + + private IndexReader createIndexWithLongs() throws IOException { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index debd51a2633b4..3846168009dc6 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -69,7 +69,6 @@ protected Collection> nodePlugins() { } private static double[] randomPercents(long minValue, long maxValue) { - final int length = randomIntBetween(1, 20); final double[] percents = new double[length]; for (int i = 0; i < percents.length; ++i) { @@ -97,7 +96,7 @@ private static PercentileRanksAggregationBuilder randomCompression(PercentileRan return builder; } - private void assertConsistent(double[] pcts, PercentileRanks values, long minValue, long maxValue) { + private void assertConsistent(double[] pcts, PercentileRanks values, long minValue) { final List percentileList = CollectionUtils.iterableAsArrayList(values); assertEquals(pcts.length, percentileList.size()); for (int i = 0; i < pcts.length; ++i) { @@ -109,9 +108,6 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal if (percentile.getPercent() == 0) { assertThat(percentile.getValue(), lessThanOrEqualTo((double) minValue)); } - if (percentile.getPercent() == 100) { - assertThat(percentile.getValue(), greaterThanOrEqualTo((double) maxValue)); - } } for (int i = 1; i < percentileList.size(); ++i) { @@ -193,7 +189,7 @@ public void testSingleValuedField() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -233,7 +229,7 @@ public void testSingleValuedFieldOutsideRange() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -248,7 +244,7 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -266,7 +262,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1); + assertConsistent(pcts, values, minValue - 1); } @Override @@ -286,7 +282,7 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1); + assertConsistent(pcts, values, minValue - 1); } @Override @@ -301,7 +297,7 @@ public void testMultiValuedField() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues); + assertConsistent(pcts, values, minValues); } @Override @@ -319,7 +315,7 @@ public void testMultiValuedFieldWithValueScript() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1); + assertConsistent(pcts, values, minValues - 1); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { @@ -336,7 +332,7 @@ public void testMultiValuedFieldWithValueScriptReverse() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, -maxValues, -minValues); + assertConsistent(pcts, values, -maxValues); } @Override @@ -356,7 +352,7 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1); + assertConsistent(pcts, values, minValues - 1); } @Override @@ -373,7 +369,7 @@ public void testScriptSingleValued() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -394,7 +390,7 @@ public void testScriptSingleValuedWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1); + assertConsistent(pcts, values, minValue - 1); } @Override @@ -412,7 +408,7 @@ public void testScriptMultiValued() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues); + assertConsistent(pcts, values, minValues); } @Override @@ -431,7 +427,7 @@ public void testScriptMultiValuedWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1); + assertConsistent(pcts, values, minValues - 1); } public void testOrderBySubAggregation() { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java index 1605e3710e25b..85ab361a8b337 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java @@ -74,12 +74,12 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { }, tdigest -> { assertEquals(7L, tdigest.state.size()); assertEquals(7L, tdigest.state.centroidCount()); - assertEquals(4.0d, tdigest.percentile(75), 0.0d); - assertEquals("4.0", tdigest.percentileAsString(75)); + assertEquals(4.5d, tdigest.percentile(75), 0.0d); + assertEquals("4.5", tdigest.percentileAsString(75)); assertEquals(2.0d, tdigest.percentile(50), 0.0d); assertEquals("2.0", tdigest.percentileAsString(50)); - assertEquals(1.0d, tdigest.percentile(20), 0.0d); - assertEquals("1.0", tdigest.percentileAsString(20)); + assertEquals(1.0d, tdigest.percentile(22), 0.0d); + assertEquals("1.0", tdigest.percentileAsString(22)); }); } @@ -97,14 +97,14 @@ public void testSomeMatchesNumericDocValues() throws IOException { assertEquals(tdigest.state.centroidCount(), 7L); assertEquals(8.0d, tdigest.percentile(100), 0.0d); assertEquals("8.0", tdigest.percentileAsString(100)); - assertEquals(5.48d, tdigest.percentile(86), 0.0d); - assertEquals("5.48", tdigest.percentileAsString(86)); + assertEquals(6.98d, tdigest.percentile(88), 0.0d); + assertEquals("6.98", tdigest.percentileAsString(88)); assertEquals(1.0d, tdigest.percentile(33), 0.0d); assertEquals("1.0", tdigest.percentileAsString(33)); assertEquals(1.0d, tdigest.percentile(25), 0.0d); assertEquals("1.0", tdigest.percentileAsString(25)); - assertEquals(0.06d, tdigest.percentile(1), 0.0d); - assertEquals("0.06", tdigest.percentileAsString(1)); + assertEquals(0.0d, tdigest.percentile(1), 0.0d); + assertEquals("0.0", tdigest.percentileAsString(1)); }); } @@ -124,7 +124,7 @@ public void testQueryFiltering() throws IOException { assertEquals(4L, tdigest.state.centroidCount()); assertEquals(2.0d, tdigest.percentile(100), 0.0d); assertEquals(1.0d, tdigest.percentile(50), 0.0d); - assertEquals(0.75d, tdigest.percentile(25), 0.0d); + assertEquals(0.5d, tdigest.percentile(25), 0.0d); }); testCase(LongPoint.newRangeQuery("row", 100, 110), docs, tdigest -> { diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index f68d3a3583503..782a16f793b91 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -159,7 +160,8 @@ private IndexMetaData add(IndexMetaData indexMetaData, String alias, @Nullable C public QueryBuilder aliasFilter(IndexMetaData indexMetaData, String... aliasNames) { CheckedFunction filterParser = bytes -> { - try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry(), bytes)) { + try (XContentParser parser = XContentFactory.xContent(bytes) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes)) { return parseInnerQueryBuilder(parser); } }; diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 06d738cfb6016..16365d829a83b 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -181,6 +181,37 @@ public void testPostFilterDisablesCountOptimization() throws Exception { dir.close(); } + public void testTerminateAfterWithFilter() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig() + .setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + for (int i = 0; i < 10; i++) { + doc.add(new StringField("foo", Integer.toString(i), Store.NO)); + } + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + IndexSearcher contextSearcher = new IndexSearcher(reader); + TestSearchContext context = new TestSearchContext(null, indexShard); + context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.terminateAfter(1); + context.setSize(10); + for (int i = 0; i < 10; i++) { + context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); + QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); + assertEquals(1, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + } + reader.close(); + dir.close(); + } + + public void testMinScoreDisablesCountOptimization() throws Exception { Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); @@ -346,6 +377,8 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(collector.getTotalHits(), equalTo(1)); + context.queryCollectors().clear(); } { context.setSize(0); diff --git a/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 3de0d78be5f93..6ba6eb5515b5f 100644 --- a/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -236,7 +236,7 @@ public void testSimpleTerminateAfterCount() throws Exception { refresh(); SearchResponse searchResponse; - for (int i = 1; i <= max; i++) { + for (int i = 1; i < max; i++) { searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i).execute().actionGet(); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java index 70c4396ce8867..7a57d2c3e672f 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContent; @@ -174,7 +175,8 @@ public void testUnknownSuggestionTypeThrows() throws IOException { + "\"collate_match\":true}]" + "}]" + "}"; - try (XContentParser parser = xContent.createParser(xContentRegistry(), suggestionString)) { + try (XContentParser parser = xContent.createParser(xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, suggestionString)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index e7dccf702fe26..a51fec8254482 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -480,7 +480,7 @@ public void testFetchShardsSkipUnavailable() throws Exception { AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(1, TimeUnit.SECONDS)); + assertTrue(responseLatch.await(5, TimeUnit.SECONDS)); assertNull(failReference.get()); assertNotNull(reference.get()); ClusterSearchShardsResponse response = reference.get(); @@ -590,17 +590,28 @@ public void run() { CountDownLatch latch = new CountDownLatch(numConnectionAttempts); for (int i = 0; i < numConnectionAttempts; i++) { AtomicBoolean executed = new AtomicBoolean(false); - ActionListener listener = ActionListener.wrap(x -> { - assertTrue(executed.compareAndSet(false, true)); - latch.countDown();}, x -> { - assertTrue(executed.compareAndSet(false, true)); - latch.countDown(); - if (x instanceof RejectedExecutionException) { - // that's fine - } else { - throw new AssertionError(x); - } - }); + ActionListener listener = ActionListener.wrap( + x -> { + assertTrue(executed.compareAndSet(false, true)); + latch.countDown();}, + x -> { + /* + * This can occur on a thread submitted to the thread pool while we are closing the + * remote cluster connection at the end of the test. + */ + if (x instanceof CancellableThreads.ExecutionCancelledException) { + // we should already be shutting down + assertTrue(executed.get()); + return; + } + + assertTrue(executed.compareAndSet(false, true)); + latch.countDown(); + + if (!(x instanceof RejectedExecutionException)) { + throw new AssertionError(x); + } + }); connection.updateSeedNodes(seedNodes, listener); } latch.await(); diff --git a/settings.gradle b/settings.gradle index e86f0f5c64498..420b4104d621d 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.Version + String dirName = rootProject.projectDir.name rootProject.name = dirName @@ -13,11 +15,15 @@ List projects = [ 'client:client-benchmark-noop-api-plugin', 'client:benchmark', 'benchmarks', - 'distribution:integ-test-zip', - 'distribution:zip', - 'distribution:tar', - 'distribution:deb', - 'distribution:rpm', + 'distribution:archives:integ-test-zip', + 'distribution:archives:zip', + 'distribution:archives:tar', + 'distribution:packages:deb', + 'distribution:packages:rpm', + 'distribution:bwc:next-minor-snapshot', + 'distribution:bwc:staged-minor-snapshot', + 'distribution:bwc:next-bugfix-snapshot', + 'distribution:bwc:maintenance-bugfix-snapshot', 'distribution:tools:launchers', 'distribution:tools:plugin-cli', 'server', @@ -36,7 +42,7 @@ List projects = [ * of the dir hierarchy to have a build.gradle. Otherwise we would have to iterate * all files/directories in the source tree to find all projects. */ -void addSubProjects(String path, File dir, List projects, List branches) { +void addSubProjects(String path, File dir) { if (dir.isDirectory() == false) return; if (dir.name == 'buildSrc') return; if (new File(dir, 'build.gradle').exists() == false) return; @@ -44,44 +50,28 @@ void addSubProjects(String path, File dir, List projects, List b final String projectName = "${path}:${dir.name}" include projectName - - if (dir.name == 'bwc-snapshot-dummy-projects') { - for (final String branch : branches) { - final String snapshotProjectName = "${projectName}:bwc-snapshot-${branch}" - projects.add(snapshotProjectName) - include snapshotProjectName - project("${snapshotProjectName}").projectDir = dir - } - // TODO do we want to assert that there's nothing else in the bwc directory? - } else { - if (path.isEmpty() || path.startsWith(':example-plugins')) { + if (path.isEmpty() || path.startsWith(':example-plugins')) { project(projectName).projectDir = dir } for (File subdir : dir.listFiles()) { - addSubProjects(projectName, subdir, projects, branches) + addSubProjects(projectName, subdir) } } -} + // include example plugins first, so adding plugin dirs below won't muck with :example-plugins File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples') for (File example : examplePluginsDir.listFiles()) { if (example.isDirectory() == false) continue; if (example.name.startsWith('build') || example.name.startsWith('.')) continue; - addSubProjects(':example-plugins', example, projects, []) + addSubProjects(':example-plugins', example) } project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples') -addSubProjects('', new File(rootProject.projectDir, 'libs'), projects, []) -addSubProjects('', new File(rootProject.projectDir, 'modules'), projects, []) -addSubProjects('', new File(rootProject.projectDir, 'plugins'), projects, []) -addSubProjects('', new File(rootProject.projectDir, 'qa'), projects, []) - -/* Create projects for building BWC snapshot distributions from the heads of other branches */ -final List branches = ['5.6', '6.0', '6.1', '6.2', '6.x'] -for (final String branch : branches) { - projects.add("distribution:bwc-snapshot-${branch}".toString()) -} +addSubProjects('', new File(rootProject.projectDir, 'libs')) +addSubProjects('', new File(rootProject.projectDir, 'modules')) +addSubProjects('', new File(rootProject.projectDir, 'plugins')) +addSubProjects('', new File(rootProject.projectDir, 'qa')) boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') if (isEclipse) { @@ -91,18 +81,13 @@ if (isEclipse) { projects << 'libs:elasticsearch-core-tests' projects << 'libs:elasticsearch-nio-tests' projects << 'libs:secure-sm-tests' + projects << 'libs:grok-tests' } include projects.toArray(new String[0]) project(':build-tools').projectDir = new File(rootProject.projectDir, 'buildSrc') -/* The BWC snapshot projects share the same build directory and build file, - * but apply to different backwards compatibility branches. */ -for (final String branch : branches) { - project(":distribution:bwc-snapshot-${branch}").projectDir = new File(rootProject.projectDir, 'distribution/bwc') -} - if (isEclipse) { project(":server").projectDir = new File(rootProject.projectDir, 'server/src/main') project(":server").buildFileName = 'eclipse-build.gradle' @@ -120,13 +105,16 @@ if (isEclipse) { project(":libs:secure-sm").buildFileName = 'eclipse-build.gradle' project(":libs:secure-sm-tests").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/test') project(":libs:secure-sm-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:grok").projectDir = new File(rootProject.projectDir, 'libs/grok/src/main') + project(":libs:grok").buildFileName = 'eclipse-build.gradle' + project(":libs:grok-tests").projectDir = new File(rootProject.projectDir, 'libs/grok/src/test') + project(":libs:grok-tests").buildFileName = 'eclipse-build.gradle' } // look for extra plugins for elasticsearch File extraProjects = new File(rootProject.projectDir.parentFile, "${dirName}-extra") if (extraProjects.exists()) { for (File extraProjectDir : extraProjects.listFiles()) { - addSubProjects('', extraProjectDir, projects, branches) + addSubProjects('', extraProjectDir) } } - diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 4e6ef482210b9..193fcb30988c6 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -71,6 +71,6 @@ task namingConventionsMain(type: org.elasticsearch.gradle.precommit.NamingConven precommit.dependsOn namingConventionsMain test.configure { - systemProperty 'tests.gradle_index_compat_versions', versionCollection.versionsIndexCompatibleWithCurrent.join(',') - systemProperty 'tests.gradle_wire_compat_versions', versionCollection.versionsWireCompatibleWithCurrent.join(',') + systemProperty 'tests.gradle_index_compat_versions', bwcVersions.indexCompatible.join(',') + systemProperty 'tests.gradle_wire_compat_versions', bwcVersions.wireCompatible.join(',') } diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index f8c24ead77eba..1e64bd281219c 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -20,12 +20,14 @@ package org.elasticsearch.bootstrap; import com.carrotsearch.randomizedtesting.RandomizedRunner; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.plugins.PluginInfo; import org.elasticsearch.secure_sm.SecureSM; @@ -86,7 +88,8 @@ public class BootstrapForTesting { // check for jar hell try { - JarHell.checkJarHell(); + final Logger logger = ESLoggerFactory.getLogger(JarHell.class); + JarHell.checkJarHell(logger::debug); } catch (Exception e) { throw new RuntimeException("found jar hell in test classpath", e); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java index ec3c9e4dbb430..3e42e3b304e00 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java @@ -76,7 +76,7 @@ public static Settings randomIndexSettings() { return builder.build(); } - private static XContentBuilder randomMapping(String type) throws IOException { + public static XContentBuilder randomMapping(String type) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject().startObject(type); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 0912045c86595..e10c248dad733 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -65,8 +65,8 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; @@ -263,7 +263,9 @@ protected Translog createTranslog() throws IOException { protected Translog createTranslog(Path translogPath) throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - return new Translog(translogConfig, null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + final String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId); + return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), + () -> SequenceNumbers.NO_OPS_PERFORMED); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index dcb4390328498..be20eb0ca2e37 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -621,7 +621,7 @@ protected void snapshotShard(final IndexShard shard, final Snapshot snapshot, final Repository repository) throws IOException { final IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); - try (Engine.IndexCommitRef indexCommitRef = shard.acquireIndexCommit(false, true)) { + try (Engine.IndexCommitRef indexCommitRef = shard.acquireLastIndexCommit(true)) { Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java index 0def04a79eaf1..cad7b388430bb 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java @@ -22,6 +22,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.elasticsearch.index.VersionType; import java.util.ArrayList; import java.util.HashMap; @@ -138,6 +139,9 @@ public static IngestDocument randomIngestDocument(Random random, Map randomSource(Random random) { @@ -219,6 +223,11 @@ public static String randomString(Random random) { return RandomStrings.randomUnicodeOfCodepointLengthBetween(random, 1, 10); } + private static Long randomNonNegtiveLong(Random random) { + long randomLong = random.nextLong(); + return randomLong == Long.MIN_VALUE ? 0 : Math.abs(randomLong); + } + private static void addRandomFields(Random random, Map parentNode, int currentDepth) { if (currentDepth > 5) { return; diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index f70ebe84d8db8..3697b4ee2438d 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -309,8 +310,9 @@ public static SearchSourceBuilder randomSearchSourceBuilder( } jsonBuilder.endArray(); jsonBuilder.endObject(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, - jsonBuilder.bytes()); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + jsonBuilder.bytes().streamInput()); parser.nextToken(); parser.nextToken(); parser.nextToken(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableSerializationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableSerializationTestCase.java index feabca586b887..13fbbf9517aeb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableSerializationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableSerializationTestCase.java @@ -41,9 +41,8 @@ public abstract class AbstractDiffableSerializationTestCase> diffReader(); - public void testDiffableSerialization() throws IOException { + public final void testDiffableSerialization() throws IOException { DiffableTestUtils.testDiffableSerialization(this::createTestInstance, this::makeTestChanges, getNamedWriteableRegistry(), instanceReader(), diffReader()); } - } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableWireSerializationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableWireSerializationTestCase.java index aa801308825e6..06f8919da58bb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableWireSerializationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractDiffableWireSerializationTestCase.java @@ -39,9 +39,8 @@ public abstract class AbstractDiffableWireSerializationTestCase> diffReader(); - public void testDiffableSerialization() throws IOException { + public final void testDiffableSerialization() throws IOException { DiffableTestUtils.testDiffableSerialization(this::createTestInstance, this::makeTestChanges, getNamedWriteableRegistry(), instanceReader(), diffReader()); } - } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index f8b1572fa09cb..a56180bfc5ecf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -404,8 +405,9 @@ static List> alterateQueries(Set queries, Set levels = new LinkedList<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 9eea29b4f46cb..cca2f5c6702dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -34,7 +34,7 @@ public abstract class AbstractSerializingTestCase getCopyFunction() { } /** - * Returns a {@link MutateFunction} that can be used to make create a copy + * Returns a {@link MutateFunction} that can be used to create a copy * of the given instance that is different to this instance. This defaults * to null. */ @@ -71,7 +71,7 @@ protected MutateFunction getMutateFunction() { * Tests that the equals and hashcode methods are consistent and copied * versions of the instance have are equal. */ - public void testEqualsAndHashcode() throws IOException { + public final void testEqualsAndHashcode() { for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), getCopyFunction(), getMutateFunction()); } @@ -80,7 +80,7 @@ public void testEqualsAndHashcode() throws IOException { /** * Test serialization and deserialization of the test instance. */ - public void testSerialization() throws IOException { + public final void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { T testInstance = createTestInstance(); assertSerialization(testInstance); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java index 7a51c00ac144e..657bc6d0e25d2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.function.Predicate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; @@ -36,7 +37,7 @@ public abstract class AbstractStreamableXContentTestCase getRandomFieldsExcludeFilter() { + return field -> false; + } + private T parseInstance(XContentParser parser) throws IOException { T parsedInstance = doParseInstance(parser); assertNull(parser.nextToken()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java index 1166ae170c21a..49e12f82e79ab 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java @@ -58,7 +58,7 @@ protected T mutateInstance(T instance) throws IOException { * Tests that the equals and hashcode methods are consistent and copied * versions of the instance have are equal. */ - public void testEqualsAndHashcode() throws IOException { + public final void testEqualsAndHashcode() { for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); } @@ -67,7 +67,7 @@ public void testEqualsAndHashcode() throws IOException { /** * Test serialization and deserialization of the test instance. */ - public void testSerialization() throws IOException { + public final void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { T testInstance = createTestInstance(); assertSerialization(testInstance); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index aaed939c310c9..26e9c5e2327e1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -29,6 +29,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; + import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -1095,7 +1096,7 @@ public static void assertEqualsWithErrorMessageFromXConte */ protected final XContentParser createParser(XContentBuilder builder) throws IOException { return builder.generator().contentType().xContent() - .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, builder.bytes()); + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, builder.bytes().streamInput()); } /** @@ -1123,7 +1124,7 @@ protected final XContentParser createParser(XContent xContent, byte[] data) thro * Create a new {@link XContentParser}. */ protected final XContentParser createParser(XContent xContent, BytesReference data) throws IOException { - return xContent.createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, data); + return xContent.createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, data.streamInput()); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 29b0abc51581c..c53364ea27993 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -29,10 +29,8 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; -import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; @@ -66,8 +64,6 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -93,8 +89,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; -import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.MockTransportClient; @@ -1106,7 +1100,7 @@ public void beforeIndexDeletion() throws Exception { // test that have ongoing write operations after the test (for example because ttl is used // and not all docs have been purged after the test) and inherit from // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures. - assertShardIndexCounter(); + assertNoPendingIndexOperations(); //check that shards that have same sync id also contain same number of documents assertSameSyncIdSameDocs(); assertOpenTranslogReferences(); @@ -1136,30 +1130,19 @@ private void assertSameSyncIdSameDocs() { } } - private void assertShardIndexCounter() throws Exception { + private void assertNoPendingIndexOperations() throws Exception { assertBusy(() -> { final Collection nodesAndClients = nodes.values(); for (NodeAndClient nodeAndClient : nodesAndClients) { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { - int activeOperationsCount = indexShard.getActiveOperationsCount(); - if (activeOperationsCount > 0) { - TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager(); - DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode(); - List taskInfos = taskManager.getTasks().values().stream() - .filter(task -> task instanceof ReplicationTask) - .map(task -> task.taskInfo(localNode.getId(), true)) - .collect(Collectors.toList()); - ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList()); - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint().value(response); - throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " + - nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" + - builder.string()); - } catch (IOException e) { - throw new RuntimeException("caught exception while building response [" + response + "]", e); - } + List operations = indexShard.getActiveOperations(); + if (operations.size() > 0) { + throw new AssertionError( + "shard " + indexShard.shardId() + " on node [" + nodeAndClient.name + "] has pending operations:\n --> " + + operations.stream().collect(Collectors.joining("\n --> ")) + ); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index d452b33f5778f..9fde8b66a1f96 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -24,7 +24,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Optional; @@ -49,67 +52,71 @@ public class VersionUtils { static Tuple, List> resolveReleasedVersions(Version current, Class versionClass) { List versions = Version.getDeclaredVersions(versionClass); - if (!Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - return Tuple.tuple(versions, Collections.emptyList()); - } - Version last = versions.remove(versions.size() - 1); assert last.equals(current) : "The highest version must be the current one " - + "but was [" + last + "] and current was [" + current + "]"; - - /* In the 5.x series prior to 5.6, unreleased version constants had an - * `_UNRELEASED` suffix, and when making the first release on a minor release - * branch the last, unreleased, version constant from the previous minor branch - * was dropped. After 5.6, there is no `_UNRELEASED` suffix on version constants' - * names and, additionally, they are not dropped when a new minor release branch - * starts. - * - * This means that in 6.x and later series the last release _in each - * minor branch_ is unreleased, whereas in 5.x it's more complicated: There were - * (sometimes, and sometimes multiple) minor branches containing no releases, each - * of which contains a single version constant of the form 5.n.0, and these - * branches always followed a branch that _did_ contain a version of the - * form 5.m.p (p>0). All versions strictly before the last 5.m version are released, - * and all other 5.* versions are unreleased. - */ - - if (current.major == 5 && current.revision != 0) { - /* The current (i.e. latest) version is 5.a.b, b nonzero, which - * means that all other versions are released. */ + + "but was [" + versions.get(versions.size() - 1) + "] and current was [" + current + "]"; + + if (current.revision != 0) { + /* If we are in a stable branch there should be no unreleased version constants + * because we don't expect to release any new versions in older branches. If there + * are extra constants then gradle will yell about it. */ return new Tuple<>(unmodifiableList(versions), singletonList(current)); } - final List unreleased = new ArrayList<>(); - unreleased.add(current); - Version prevConsideredVersion = current; - - for (int i = versions.size() - 1; i >= 0; i--) { - Version currConsideredVersion = versions.get(i); - if (currConsideredVersion.major == 5) { - unreleased.add(currConsideredVersion); - versions.remove(i); - if (currConsideredVersion.revision != 0) { - /* Currently considering the latest version in the 5.x series, - * which is (a) unreleased and (b) the only such. So we're done. */ - break; - } - /* ... else we're on a version of the form 5.n.0, and have not yet - * considered a version of the form 5.n.m (m>0), so this entire branch - * is unreleased, so carry on looking for a branch containing releases. - */ - } else if (currConsideredVersion.major != prevConsideredVersion.major - || currConsideredVersion.minor != prevConsideredVersion.minor) { - /* Have moved to the end of a new minor branch, so this is - * an unreleased version. */ - unreleased.add(currConsideredVersion); - versions.remove(i); + /* If we are on a patch release then we know that at least the version before the + * current one is unreleased. If it is released then gradle would be complaining. */ + int unreleasedIndex = versions.size() - 1; + while (true) { + if (unreleasedIndex < 0) { + throw new IllegalArgumentException("Couldn't find first non-alpha release"); } - prevConsideredVersion = currConsideredVersion; - + /* We don't support backwards compatibility for alphas, betas, and rcs. But + * they were released so we add them to the released list. Usually this doesn't + * matter to consumers, but consumers that do care should filter non-release + * versions. */ + if (versions.get(unreleasedIndex).isRelease()) { + break; + } + unreleasedIndex--; } - Collections.reverse(unreleased); - return new Tuple<>(unmodifiableList(versions), unmodifiableList(unreleased)); + Version unreleased = versions.remove(unreleasedIndex); + if (unreleased.revision == 0) { + /* + * If the last unreleased version is itself a patch release then Gradle enforces that there is yet another unreleased version + * before that. However, we have to skip alpha/betas/RCs too (e.g., consider when the version constants are ..., 5.6.3, 5.6.4, + * 6.0.0-alpha1, ..., 6.0.0-rc1, 6.0.0-rc2, 6.0.0, 6.1.0 on the 6.x branch. In this case, we will have pruned 6.0.0 and 6.1.0 as + * unreleased versions, but we also need to prune 5.6.4. At this point though, unreleasedIndex will be pointing to 6.0.0-rc2, so + * we have to skip backwards until we find a non-alpha/beta/RC again. Then we can prune that version as an unreleased version + * too. + */ + do { + unreleasedIndex--; + } while (versions.get(unreleasedIndex).isRelease() == false); + Version earlierUnreleased = versions.remove(unreleasedIndex); + + // This earlierUnreleased is either the snapshot on the minor branch lower, or its possible its a staged release. If it is a + // staged release, remove it and return it in unreleased as well. + if (earlierUnreleased.revision == 0) { + unreleasedIndex--; + Version actualUnreleasedPreviousMinor = versions.remove(unreleasedIndex); + return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(actualUnreleasedPreviousMinor, + earlierUnreleased, unreleased, current))); + } + + return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(earlierUnreleased, unreleased, current))); + } else if (unreleased.major == current.major) { + // need to remove one more of the last major's minor set + do { + unreleasedIndex--; + } while (unreleasedIndex > 0 && versions.get(unreleasedIndex).major == current.major); + if (unreleasedIndex > 0) { + // some of the test cases return very small lists, so its possible this is just the end of the list, if so, dont include it + Version earlierMajorsMinor = versions.remove(unreleasedIndex); + return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(earlierMajorsMinor, unreleased, current))); + } + } + return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(unreleased, current))); } private static final List RELEASED_VERSIONS; diff --git a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java index 063f05544431c..cd556a9115ad3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java @@ -286,7 +286,7 @@ static List getInsertPaths(XContentParser parser, Stack currentP * {@link ObjectPath}. * The key/value arguments can suppliers that either return fixed or random values. */ - static XContentBuilder insertIntoXContent(XContent xContent, BytesReference original, List paths, Supplier key, + public static XContentBuilder insertIntoXContent(XContent xContent, BytesReference original, List paths, Supplier key, Supplier value) throws IOException { ObjectPath object = ObjectPath.createFromXContent(xContent, original); for (String path : paths) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index f30c498b21020..bf0b7376b8148 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -133,9 +133,8 @@ public CloseAction flushOrClose(CloseAction originalAction) throws IOException { } } - public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, - ReferenceManager manager) throws EngineException { - IndexReader reader = searcher.getIndexReader(); + public AssertingIndexSearcher newSearcher(Engine.Searcher searcher) throws EngineException { + IndexReader reader = searcher.reader(); IndexReader wrappedReader = reader; assert reader != null; if (reader instanceof DirectoryReader && mockContext.wrapReader) { @@ -143,7 +142,7 @@ public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, } // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); - assertingIndexSearcher.setSimilarity(searcher.getSimilarity(true)); + assertingIndexSearcher.setSimilarity(searcher.searcher().getSimilarity(true)); assertingIndexSearcher.setQueryCache(filterCache); assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); return assertingIndexSearcher; @@ -183,10 +182,9 @@ public DirectoryReaderWrapper(DirectoryReader in, SubReaderWrapper subReaderWrap } - public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher, IndexSearcher searcher, - ReferenceManager manager) { - final AssertingIndexSearcher assertingIndexSearcher = newSearcher(source, searcher, manager); - assertingIndexSearcher.setSimilarity(searcher.getSimilarity(true)); + public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher) { + final AssertingIndexSearcher assertingIndexSearcher = newSearcher(engineSearcher); + assertingIndexSearcher.setSimilarity(engineSearcher.searcher().getSimilarity(true)); // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java index 92c7b4d9fc0d0..6fdfce83b6504 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -19,9 +19,7 @@ package org.elasticsearch.test.engine; import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.search.SearcherManager; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; @@ -79,8 +77,8 @@ private void flushAndCloseInternal() throws IOException { } @Override - protected Searcher newSearcher(String source, IndexSearcher searcher, ReferenceManager manager) throws EngineException { - final Searcher engineSearcher = super.newSearcher(source, searcher, manager); - return support().wrapSearcher(source, engineSearcher, searcher, manager); + public Searcher acquireSearcher(String source, SearcherScope scope) { + final Searcher engineSearcher = super.acquireSearcher(source, scope); + return support().wrapSearcher(source, engineSearcher); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index ff31240169ef7..09e849cf7ca6a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -57,6 +57,7 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -835,9 +836,11 @@ public static void assertToXContentEquivalent(BytesReference expected, BytesRefe //Note that byte[] holding binary values need special treatment as they need to be properly compared item per item. Map actualMap = null; Map expectedMap = null; - try (XContentParser actualParser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, actual)) { + try (XContentParser actualParser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, actual.streamInput())) { actualMap = actualParser.map(); - try (XContentParser expectedParser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, expected)) { + try (XContentParser expectedParser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, expected.streamInput())) { expectedMap = expectedParser.map(); try { assertMapEquals(expectedMap, actualMap); diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index 60cc6ceeccfa7..e021df52c60fe 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.runner.Description; @@ -107,7 +106,7 @@ private Map processTestLogging(final TestLogging testLogging) { } for (final Map.Entry entry : map.entrySet()) { final Logger logger = resolveLogger(entry.getKey()); - ServerLoggers.setLevel(logger, entry.getValue()); + Loggers.setLevel(logger, entry.getValue()); } return existing; } @@ -146,7 +145,7 @@ private static Map getLoggersAndLevelsFromAnnotation(final TestL private Map reset(final Map map) { for (final Map.Entry previousLogger : map.entrySet()) { final Logger logger = resolveLogger(previousLogger.getKey()); - ServerLoggers.setLevel(logger, previousLogger.getValue()); + Loggers.setLevel(logger, previousLogger.getValue()); } return Collections.emptyMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 01fd3bad0f3e1..f5e834aa90c69 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -85,9 +84,9 @@ public ClientYamlTestResponse callApi(String apiName, Map params Map pathParts = new HashMap<>(); Map queryStringParams = new HashMap<>(); - Set apiRequiredPathParts = restApi.getPathParts().entrySet().stream().filter(e -> e.getValue() == true).map(Entry::getKey) + Set apiRequiredPathParts = restApi.getPathParts().entrySet().stream().filter(Entry::getValue).map(Entry::getKey) .collect(Collectors.toSet()); - Set apiRequiredParameters = restApi.getParams().entrySet().stream().filter(e -> e.getValue() == true).map(Entry::getKey) + Set apiRequiredParameters = restApi.getParams().entrySet().stream().filter(Entry::getValue).map(Entry::getKey) .collect(Collectors.toSet()); for (Map.Entry entry : params.entrySet()) { @@ -151,7 +150,7 @@ public ClientYamlTestResponse callApi(String apiName, Map params for (String pathPart : restPath.getPathParts()) { try { finalPath.append('/'); - // We append "/" to the path part to handle parts that start with - or other invalid characters + // We prepend "/" to the path part to handle parts that start with - or other invalid characters URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null); //manually escape any slash that each part may contain finalPath.append(uri.getRawPath().substring(1).replaceAll("/", "%2F")); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java index 793a71a95a2a3..245e7956595c1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java @@ -23,6 +23,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Response; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -114,7 +115,8 @@ public String getBodyAsString() { } else { //if the body is in a binary format and gets requested as a string (e.g. to log a test failure), we convert it to json try (XContentBuilder jsonBuilder = XContentFactory.jsonBuilder()) { - try (XContentParser parser = bodyContentType.xContent().createParser(NamedXContentRegistry.EMPTY, body)) { + try (XContentParser parser = bodyContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, body)) { jsonBuilder.copyCurrentStructure(parser); } bodyAsString = jsonBuilder.string(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 5ee78c6942dec..927f9b46c966a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -144,15 +143,6 @@ protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec return new ClientYamlTestClient(restSpec, restClient, hosts, esVersion); } - @Override - protected void afterIfFailed(List errors) { - // Dump the stash on failure. Instead of dumping it in true json we escape `\n`s so stack traces are easier to read - logger.info("Stash dump on failure [{}]", - Strings.toString(restTestExecutionContext.stash(), true, true) - .replace("\\n", "\n").replace("\\r", "\r").replace("\\t", "\t")); - super.afterIfFailed(errors); - } - public static Iterable createParameters() throws Exception { String[] paths = resolvePathsProperty(REST_TESTS_SUITE, ""); // default to all tests under the test root List tests = new ArrayList<>(); @@ -344,10 +334,16 @@ public void test() throws IOException { private void executeSection(ExecutableSection executableSection) { try { executableSection.execute(restTestExecutionContext); - } catch (Exception e) { - throw new RuntimeException(errorMessage(executableSection, e), e); - } catch (AssertionError e) { - throw new AssertionError(errorMessage(executableSection, e), e); + } catch (AssertionError | Exception e) { + // Dump the stash on failure. Instead of dumping it in true json we escape `\n`s so stack traces are easier to read + logger.info("Stash dump on test failure [{}]", + Strings.toString(restTestExecutionContext.stash(), true, true) + .replace("\\n", "\n").replace("\\r", "\r").replace("\\t", "\t")); + if (e instanceof AssertionError) { + throw new AssertionError(errorMessage(executableSection, e), e); + } else { + throw new RuntimeException(errorMessage(executableSection, e), e); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java index 7b5952c7a5eb4..8ebeca4233abd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java @@ -22,6 +22,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -48,7 +49,8 @@ public static ObjectPath createFromResponse(Response response) throws IOExceptio } public static ObjectPath createFromXContent(XContent xContent, BytesReference input) throws IOException { - try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, input)) { + try (XContentParser parser = xContent + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, input.streamInput())) { if (parser.nextToken() == XContentParser.Token.START_ARRAY) { return new ObjectPath(parser.listOrderedMap()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 082040fb1eb47..7c6647d65f044 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -128,7 +129,8 @@ public static DoSection parse(XContentParser parser) throws IOException { } else if (token.isValue()) { if ("body".equals(paramName)) { String body = parser.text(); - XContentParser bodyParser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, body); + XContentParser bodyParser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, body); //multiple bodies are supported e.g. in case of bulk provided as a whole string while(bodyParser.nextToken() != null) { apiCallSection.addBody(bodyParser.mapOrdered()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 858a8ebd5ed0b..1efd210b110c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -95,7 +95,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { - Logger logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index ed0431d96785c..ec7d15a3e02ca 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -30,7 +30,9 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.NetworkService; @@ -2390,6 +2392,7 @@ protected void doRun() throws Exception { serviceC.start(); serviceC.acceptIncomingRequests(); CountDownLatch responseLatch = new CountDownLatch(1); + AtomicReference receivedException = new AtomicReference<>(null); TransportResponseHandler transportResponseHandler = new TransportResponseHandler() { @Override public TransportResponse newInstance() { @@ -2403,6 +2406,7 @@ public void handleResponse(TransportResponse response) { @Override public void handleException(TransportException exp) { + receivedException.set(exp); responseLatch.countDown(); } @@ -2427,7 +2431,7 @@ public String executor() { TransportRequestOptions.Type.STATE); try (Transport.Connection connection = serviceC.openConnection(serviceB.getLocalNode(), builder.build())) { assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here - TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + TransportStats transportStats = serviceC.transport.getStats(); // request has been sent assertEquals(1, transportStats.getRxCount()); assertEquals(1, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); @@ -2437,7 +2441,7 @@ public String executor() { transportResponseHandler); receivedLatch.await(); assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here - TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + TransportStats transportStats = serviceC.transport.getStats(); // request has been sent assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); @@ -2448,10 +2452,14 @@ public String executor() { stats = serviceC.transport.getStats(); // exception response has been received assertEquals(2, stats.getRxCount()); assertEquals(2, stats.getTxCount()); - int addressLen = serviceB.boundAddress().publishAddress().address().getAddress().getAddress().length; - // if we are bound to a IPv6 address the response address is serialized with the exception so it will be different depending - // on the stack. The emphemeral port will always be in the same range - assertEquals(183 + addressLen, stats.getRxSize().getBytes()); + TransportException exception = receivedException.get(); + assertNotNull(exception); + BytesStreamOutput streamOutput = new BytesStreamOutput(); + exception.writeTo(streamOutput); + String failedMessage = "Unexpected read bytes size. The transport exception that was received=" + exception; + // 49 bytes are the non-exception message bytes that have been received. It should include the initial + // handshake message and the header, version, etc bytes in the exception message. + assertEquals(failedMessage, 49 + streamOutput.bytes().length(), stats.getRxSize().getBytes()); assertEquals(91, stats.getTxSize().getBytes()); } finally { serviceC.close(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java index 9015ad17e357d..67a9a40f0fc1f 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java @@ -119,27 +119,12 @@ public void testResolveReleasedVersionsForReleaseBranch() { List released = t.v1(); List unreleased = t.v2(); - final List expectedReleased; - final List expectedUnreleased; - if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - expectedReleased = Arrays.asList( - TestReleaseBranch.V_5_3_0, - TestReleaseBranch.V_5_3_1, - TestReleaseBranch.V_5_3_2, - TestReleaseBranch.V_5_4_0); - expectedUnreleased = Collections.singletonList(TestReleaseBranch.V_5_4_1); - } else { - expectedReleased = Arrays.asList( - TestReleaseBranch.V_5_3_0, - TestReleaseBranch.V_5_3_1, - TestReleaseBranch.V_5_3_2, - TestReleaseBranch.V_5_4_0, - TestReleaseBranch.V_5_4_1); - expectedUnreleased = Collections.emptyList(); - } - - assertThat(released, equalTo(expectedReleased)); - assertThat(unreleased, equalTo(expectedUnreleased)); + assertThat(released, equalTo(Arrays.asList( + TestReleaseBranch.V_5_3_0, + TestReleaseBranch.V_5_3_1, + TestReleaseBranch.V_5_3_2, + TestReleaseBranch.V_5_4_0))); + assertThat(unreleased, equalTo(Collections.singletonList(TestReleaseBranch.V_5_4_1))); } public static class TestStableBranch { @@ -155,19 +140,12 @@ public void testResolveReleasedVersionsForUnreleasedStableBranch() { List released = t.v1(); List unreleased = t.v2(); - final List expectedReleased; - final List expectedUnreleased; - if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - expectedReleased = Arrays.asList(TestStableBranch.V_5_3_0, TestStableBranch.V_5_3_1); - expectedUnreleased = Arrays.asList(TestStableBranch.V_5_3_2, TestStableBranch.V_5_4_0); - } else { - expectedReleased = - Arrays.asList(TestStableBranch.V_5_3_0, TestStableBranch.V_5_3_1, TestStableBranch.V_5_3_2, TestStableBranch.V_5_4_0); - expectedUnreleased = Collections.emptyList(); - } - - assertThat(released, equalTo(expectedReleased)); - assertThat(unreleased, equalTo(expectedUnreleased)); + assertThat(released, equalTo(Arrays.asList( + TestStableBranch.V_5_3_0, + TestStableBranch.V_5_3_1))); + assertThat(unreleased, equalTo(Arrays.asList( + TestStableBranch.V_5_3_2, + TestStableBranch.V_5_4_0))); } public static class TestStableBranchBehindStableBranch { @@ -184,25 +162,13 @@ public void testResolveReleasedVersionsForStableBranchBehindStableBranch() { List released = t.v1(); List unreleased = t.v2(); - final List expectedReleased; - final List expectedUnreleased; - if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - expectedReleased = Arrays.asList(TestStableBranchBehindStableBranch.V_5_3_0, TestStableBranchBehindStableBranch.V_5_3_1); - expectedUnreleased = Arrays.asList( - TestStableBranchBehindStableBranch.V_5_3_2, - TestStableBranchBehindStableBranch.V_5_4_0, - TestStableBranchBehindStableBranch.V_5_5_0); - } else { - expectedReleased = Arrays.asList( - TestStableBranchBehindStableBranch.V_5_3_0, - TestStableBranchBehindStableBranch.V_5_3_1, - TestStableBranchBehindStableBranch.V_5_3_2, - TestStableBranchBehindStableBranch.V_5_4_0, - TestStableBranchBehindStableBranch.V_5_5_0); - expectedUnreleased = Collections.emptyList(); - } - assertThat(released, equalTo(expectedReleased)); - assertThat(unreleased, equalTo(expectedUnreleased)); + assertThat(released, equalTo(Arrays.asList( + TestStableBranchBehindStableBranch.V_5_3_0, + TestStableBranchBehindStableBranch.V_5_3_1))); + assertThat(unreleased, equalTo(Arrays.asList( + TestStableBranchBehindStableBranch.V_5_3_2, + TestStableBranchBehindStableBranch.V_5_4_0, + TestStableBranchBehindStableBranch.V_5_5_0))); } public static class TestUnstableBranch { @@ -222,28 +188,15 @@ public void testResolveReleasedVersionsForUnstableBranch() { List released = t.v1(); List unreleased = t.v2(); - final List expectedReleased; - final List expectedUnreleased; - if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - expectedReleased = Arrays.asList( - TestUnstableBranch.V_5_3_0, - TestUnstableBranch.V_5_3_1, - TestUnstableBranch.V_6_0_0_alpha1, - TestUnstableBranch.V_6_0_0_alpha2); - expectedUnreleased = Arrays.asList(TestUnstableBranch.V_5_3_2, TestUnstableBranch.V_5_4_0, TestUnstableBranch.V_6_0_0_beta1); - } else { - expectedReleased = Arrays.asList( - TestUnstableBranch.V_5_3_0, - TestUnstableBranch.V_5_3_1, - TestUnstableBranch.V_5_3_2, - TestUnstableBranch.V_5_4_0, - TestUnstableBranch.V_6_0_0_alpha1, - TestUnstableBranch.V_6_0_0_alpha2, - TestUnstableBranch.V_6_0_0_beta1); - expectedUnreleased = Collections.emptyList(); - } - assertThat(released, equalTo(expectedReleased)); - assertThat(unreleased, equalTo(expectedUnreleased)); + assertThat(released, equalTo(Arrays.asList( + TestUnstableBranch.V_5_3_0, + TestUnstableBranch.V_5_3_1, + TestUnstableBranch.V_6_0_0_alpha1, + TestUnstableBranch.V_6_0_0_alpha2))); + assertThat(unreleased, equalTo(Arrays.asList( + TestUnstableBranch.V_5_3_2, + TestUnstableBranch.V_5_4_0, + TestUnstableBranch.V_6_0_0_beta1))); } public static class TestNewMajorRelease { @@ -265,34 +218,17 @@ public void testResolveReleasedVersionsAtNewMajorRelease() { List released = t.v1(); List unreleased = t.v2(); - final List expectedReleased; - final List expectedUnreleased; - if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - expectedReleased = Arrays.asList( - TestNewMajorRelease.V_5_6_0, - TestNewMajorRelease.V_5_6_1, - TestNewMajorRelease.V_6_0_0_alpha1, - TestNewMajorRelease.V_6_0_0_alpha2, - TestNewMajorRelease.V_6_0_0_beta1, - TestNewMajorRelease.V_6_0_0_beta2, - TestNewMajorRelease.V_6_0_0); - expectedUnreleased = Arrays.asList(TestNewMajorRelease.V_5_6_2, TestNewMajorRelease.V_6_0_1); - } else { - expectedReleased = Arrays.asList( - TestNewMajorRelease.V_5_6_0, - TestNewMajorRelease.V_5_6_1, - TestNewMajorRelease.V_5_6_2, - TestNewMajorRelease.V_6_0_0_alpha1, - TestNewMajorRelease.V_6_0_0_alpha2, - TestNewMajorRelease.V_6_0_0_beta1, - TestNewMajorRelease.V_6_0_0_beta2, - TestNewMajorRelease.V_6_0_0, - TestNewMajorRelease.V_6_0_1); - expectedUnreleased = Collections.emptyList(); - } - - assertThat(released, equalTo(expectedReleased)); - assertThat(unreleased, equalTo(expectedUnreleased)); + assertThat(released, equalTo(Arrays.asList( + TestNewMajorRelease.V_5_6_0, + TestNewMajorRelease.V_5_6_1, + TestNewMajorRelease.V_5_6_2, + TestNewMajorRelease.V_6_0_0_alpha1, + TestNewMajorRelease.V_6_0_0_alpha2, + TestNewMajorRelease.V_6_0_0_beta1, + TestNewMajorRelease.V_6_0_0_beta2, + TestNewMajorRelease.V_6_0_0))); + assertThat(unreleased, equalTo(Arrays.asList( + TestNewMajorRelease.V_6_0_1))); } public static class TestVersionBumpIn6x { @@ -315,36 +251,18 @@ public void testResolveReleasedVersionsAtVersionBumpIn6x() { List released = t.v1(); List unreleased = t.v2(); - final List expectedReleased; - final List expectedUnreleased; - - if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - expectedReleased = Arrays.asList( - TestVersionBumpIn6x.V_5_6_0, - TestVersionBumpIn6x.V_5_6_1, - TestVersionBumpIn6x.V_6_0_0_alpha1, - TestVersionBumpIn6x.V_6_0_0_alpha2, - TestVersionBumpIn6x.V_6_0_0_beta1, - TestVersionBumpIn6x.V_6_0_0_beta2, - TestVersionBumpIn6x.V_6_0_0); - expectedUnreleased = Arrays.asList(TestVersionBumpIn6x.V_5_6_2, TestVersionBumpIn6x.V_6_0_1, TestVersionBumpIn6x.V_6_1_0); - } else { - expectedReleased = Arrays.asList( - TestVersionBumpIn6x.V_5_6_0, - TestVersionBumpIn6x.V_5_6_1, - TestVersionBumpIn6x.V_5_6_2, - TestVersionBumpIn6x.V_6_0_0_alpha1, - TestVersionBumpIn6x.V_6_0_0_alpha2, - TestVersionBumpIn6x.V_6_0_0_beta1, - TestVersionBumpIn6x.V_6_0_0_beta2, - TestVersionBumpIn6x.V_6_0_0, - TestVersionBumpIn6x.V_6_0_1, - TestVersionBumpIn6x.V_6_1_0); - expectedUnreleased = Collections.emptyList(); - } - - assertThat(released, equalTo(expectedReleased)); - assertThat(unreleased, equalTo(expectedUnreleased)); + assertThat(released, equalTo(Arrays.asList( + TestVersionBumpIn6x.V_5_6_0, + TestVersionBumpIn6x.V_5_6_1, + TestVersionBumpIn6x.V_6_0_0_alpha1, + TestVersionBumpIn6x.V_6_0_0_alpha2, + TestVersionBumpIn6x.V_6_0_0_beta1, + TestVersionBumpIn6x.V_6_0_0_beta2, + TestVersionBumpIn6x.V_6_0_0))); + assertThat(unreleased, equalTo(Arrays.asList( + TestVersionBumpIn6x.V_5_6_2, + TestVersionBumpIn6x.V_6_0_1, + TestVersionBumpIn6x.V_6_1_0))); } public static class TestNewMinorBranchIn6x { @@ -370,44 +288,21 @@ public void testResolveReleasedVersionsAtNewMinorBranchIn6x() { List released = t.v1(); List unreleased = t.v2(); - final List expectedReleased; - final List expectedUnreleased; - if (Booleans.parseBoolean(System.getProperty("build.snapshot", "true"))) { - expectedReleased = Arrays.asList( - TestNewMinorBranchIn6x.V_5_6_0, - TestNewMinorBranchIn6x.V_5_6_1, - TestNewMinorBranchIn6x.V_6_0_0_alpha1, - TestNewMinorBranchIn6x.V_6_0_0_alpha2, - TestNewMinorBranchIn6x.V_6_0_0_beta1, - TestNewMinorBranchIn6x.V_6_0_0_beta2, - TestNewMinorBranchIn6x.V_6_0_0, - TestNewMinorBranchIn6x.V_6_1_0, - TestNewMinorBranchIn6x.V_6_1_1); - expectedUnreleased = Arrays.asList( - TestNewMinorBranchIn6x.V_5_6_2, - TestNewMinorBranchIn6x.V_6_0_1, - TestNewMinorBranchIn6x.V_6_1_2, - TestNewMinorBranchIn6x.V_6_2_0); - } else { - expectedReleased = Arrays.asList( - TestNewMinorBranchIn6x.V_5_6_0, - TestNewMinorBranchIn6x.V_5_6_1, - TestNewMinorBranchIn6x.V_5_6_2, - TestNewMinorBranchIn6x.V_6_0_0_alpha1, - TestNewMinorBranchIn6x.V_6_0_0_alpha2, - TestNewMinorBranchIn6x.V_6_0_0_beta1, - TestNewMinorBranchIn6x.V_6_0_0_beta2, - TestNewMinorBranchIn6x.V_6_0_0, - TestNewMinorBranchIn6x.V_6_0_1, - TestNewMinorBranchIn6x.V_6_1_0, - TestNewMinorBranchIn6x.V_6_1_1, - TestNewMinorBranchIn6x.V_6_1_2, - TestNewMinorBranchIn6x.V_6_2_0); - expectedUnreleased = Collections.emptyList(); - } - - assertThat(released, equalTo(expectedReleased)); - assertThat(unreleased, equalTo(expectedUnreleased)); + assertThat(released, equalTo(Arrays.asList( + TestNewMinorBranchIn6x.V_5_6_0, + TestNewMinorBranchIn6x.V_5_6_1, + TestNewMinorBranchIn6x.V_6_0_0_alpha1, + TestNewMinorBranchIn6x.V_6_0_0_alpha2, + TestNewMinorBranchIn6x.V_6_0_0_beta1, + TestNewMinorBranchIn6x.V_6_0_0_beta2, + TestNewMinorBranchIn6x.V_6_0_0, + TestNewMinorBranchIn6x.V_6_0_1, + TestNewMinorBranchIn6x.V_6_1_0, + TestNewMinorBranchIn6x.V_6_1_1))); + assertThat(unreleased, equalTo(Arrays.asList( + TestNewMinorBranchIn6x.V_5_6_2, + TestNewMinorBranchIn6x.V_6_1_2, + TestNewMinorBranchIn6x.V_6_2_0))); } /**